1 //===- MemorySanitizer.cpp - detector of uninitialized reads --------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// This file is a part of MemorySanitizer, a detector of uninitialized
11 /// reads.
12 ///
13 /// The algorithm of the tool is similar to Memcheck
14 /// (http://goo.gl/QKbem). We associate a few shadow bits with every
15 /// byte of the application memory, poison the shadow of the malloc-ed
16 /// or alloca-ed memory, load the shadow bits on every memory read,
17 /// propagate the shadow bits through some of the arithmetic
18 /// instruction (including MOV), store the shadow bits on every memory
19 /// write, report a bug on some other instructions (e.g. JMP) if the
20 /// associated shadow is poisoned.
21 ///
22 /// But there are differences too. The first and the major one:
23 /// compiler instrumentation instead of binary instrumentation. This
24 /// gives us much better register allocation, possible compiler
25 /// optimizations and a fast start-up. But this brings the major issue
26 /// as well: msan needs to see all program events, including system
27 /// calls and reads/writes in system libraries, so we either need to
28 /// compile *everything* with msan or use a binary translation
29 /// component (e.g. DynamoRIO) to instrument pre-built libraries.
30 /// Another difference from Memcheck is that we use 8 shadow bits per
31 /// byte of application memory and use a direct shadow mapping. This
32 /// greatly simplifies the instrumentation code and avoids races on
33 /// shadow updates (Memcheck is single-threaded so races are not a
34 /// concern there. Memcheck uses 2 shadow bits per byte with a slow
35 /// path storage that uses 8 bits per byte).
36 ///
37 /// The default value of shadow is 0, which means "clean" (not poisoned).
38 ///
39 /// Every module initializer should call __msan_init to ensure that the
40 /// shadow memory is ready. On error, __msan_warning is called. Since
41 /// parameters and return values may be passed via registers, we have a
42 /// specialized thread-local shadow for return values
43 /// (__msan_retval_tls) and parameters (__msan_param_tls).
44 ///
45 ///                           Origin tracking.
46 ///
47 /// MemorySanitizer can track origins (allocation points) of all uninitialized
48 /// values. This behavior is controlled with a flag (msan-track-origins) and is
49 /// disabled by default.
50 ///
51 /// Origins are 4-byte values created and interpreted by the runtime library.
52 /// They are stored in a second shadow mapping, one 4-byte value for 4 bytes
53 /// of application memory. Propagation of origins is basically a bunch of
54 /// "select" instructions that pick the origin of a dirty argument, if an
55 /// instruction has one.
56 ///
57 /// Every 4 aligned, consecutive bytes of application memory have one origin
58 /// value associated with them. If these bytes contain uninitialized data
59 /// coming from 2 different allocations, the last store wins. Because of this,
60 /// MemorySanitizer reports can show unrelated origins, but this is unlikely in
61 /// practice.
62 ///
63 /// Origins are meaningless for fully initialized values, so MemorySanitizer
64 /// avoids storing origin to memory when a fully initialized value is stored.
65 /// This way it avoids needless overwriting origin of the 4-byte region on
66 /// a short (i.e. 1 byte) clean store, and it is also good for performance.
67 ///
68 ///                            Atomic handling.
69 ///
70 /// Ideally, every atomic store of application value should update the
71 /// corresponding shadow location in an atomic way. Unfortunately, atomic store
72 /// of two disjoint locations can not be done without severe slowdown.
73 ///
74 /// Therefore, we implement an approximation that may err on the safe side.
75 /// In this implementation, every atomically accessed location in the program
76 /// may only change from (partially) uninitialized to fully initialized, but
77 /// not the other way around. We load the shadow _after_ the application load,
78 /// and we store the shadow _before_ the app store. Also, we always store clean
79 /// shadow (if the application store is atomic). This way, if the store-load
80 /// pair constitutes a happens-before arc, shadow store and load are correctly
81 /// ordered such that the load will get either the value that was stored, or
82 /// some later value (which is always clean).
83 ///
84 /// This does not work very well with Compare-And-Swap (CAS) and
85 /// Read-Modify-Write (RMW) operations. To follow the above logic, CAS and RMW
86 /// must store the new shadow before the app operation, and load the shadow
87 /// after the app operation. Computers don't work this way. Current
88 /// implementation ignores the load aspect of CAS/RMW, always returning a clean
89 /// value. It implements the store part as a simple atomic store by storing a
90 /// clean shadow.
91 ///
92 ///                      Instrumenting inline assembly.
93 ///
94 /// For inline assembly code LLVM has little idea about which memory locations
95 /// become initialized depending on the arguments. It can be possible to figure
96 /// out which arguments are meant to point to inputs and outputs, but the
97 /// actual semantics can be only visible at runtime. In the Linux kernel it's
98 /// also possible that the arguments only indicate the offset for a base taken
99 /// from a segment register, so it's dangerous to treat any asm() arguments as
100 /// pointers. We take a conservative approach generating calls to
101 ///   __msan_instrument_asm_store(ptr, size)
102 /// , which defer the memory unpoisoning to the runtime library.
103 /// The latter can perform more complex address checks to figure out whether
104 /// it's safe to touch the shadow memory.
105 /// Like with atomic operations, we call __msan_instrument_asm_store() before
106 /// the assembly call, so that changes to the shadow memory will be seen by
107 /// other threads together with main memory initialization.
108 ///
109 ///                  KernelMemorySanitizer (KMSAN) implementation.
110 ///
111 /// The major differences between KMSAN and MSan instrumentation are:
112 ///  - KMSAN always tracks the origins and implies msan-keep-going=true;
113 ///  - KMSAN allocates shadow and origin memory for each page separately, so
114 ///    there are no explicit accesses to shadow and origin in the
115 ///    instrumentation.
116 ///    Shadow and origin values for a particular X-byte memory location
117 ///    (X=1,2,4,8) are accessed through pointers obtained via the
118 ///      __msan_metadata_ptr_for_load_X(ptr)
119 ///      __msan_metadata_ptr_for_store_X(ptr)
120 ///    functions. The corresponding functions check that the X-byte accesses
121 ///    are possible and returns the pointers to shadow and origin memory.
122 ///    Arbitrary sized accesses are handled with:
123 ///      __msan_metadata_ptr_for_load_n(ptr, size)
124 ///      __msan_metadata_ptr_for_store_n(ptr, size);
125 ///  - TLS variables are stored in a single per-task struct. A call to a
126 ///    function __msan_get_context_state() returning a pointer to that struct
127 ///    is inserted into every instrumented function before the entry block;
128 ///  - __msan_warning() takes a 32-bit origin parameter;
129 ///  - local variables are poisoned with __msan_poison_alloca() upon function
130 ///    entry and unpoisoned with __msan_unpoison_alloca() before leaving the
131 ///    function;
132 ///  - the pass doesn't declare any global variables or add global constructors
133 ///    to the translation unit.
134 ///
135 /// Also, KMSAN currently ignores uninitialized memory passed into inline asm
136 /// calls, making sure we're on the safe side wrt. possible false positives.
137 ///
138 ///  KernelMemorySanitizer only supports X86_64 at the moment.
139 ///
140 //===----------------------------------------------------------------------===//
141 
142 #include "llvm/Transforms/Instrumentation/MemorySanitizer.h"
143 #include "llvm/ADT/APInt.h"
144 #include "llvm/ADT/ArrayRef.h"
145 #include "llvm/ADT/DepthFirstIterator.h"
146 #include "llvm/ADT/SmallSet.h"
147 #include "llvm/ADT/SmallString.h"
148 #include "llvm/ADT/SmallVector.h"
149 #include "llvm/ADT/StringExtras.h"
150 #include "llvm/ADT/StringRef.h"
151 #include "llvm/ADT/Triple.h"
152 #include "llvm/Analysis/TargetLibraryInfo.h"
153 #include "llvm/IR/Argument.h"
154 #include "llvm/IR/Attributes.h"
155 #include "llvm/IR/BasicBlock.h"
156 #include "llvm/IR/CallingConv.h"
157 #include "llvm/IR/Constant.h"
158 #include "llvm/IR/Constants.h"
159 #include "llvm/IR/DataLayout.h"
160 #include "llvm/IR/DerivedTypes.h"
161 #include "llvm/IR/Function.h"
162 #include "llvm/IR/GlobalValue.h"
163 #include "llvm/IR/GlobalVariable.h"
164 #include "llvm/IR/IRBuilder.h"
165 #include "llvm/IR/InlineAsm.h"
166 #include "llvm/IR/InstVisitor.h"
167 #include "llvm/IR/InstrTypes.h"
168 #include "llvm/IR/Instruction.h"
169 #include "llvm/IR/Instructions.h"
170 #include "llvm/IR/IntrinsicInst.h"
171 #include "llvm/IR/Intrinsics.h"
172 #include "llvm/IR/IntrinsicsX86.h"
173 #include "llvm/IR/LLVMContext.h"
174 #include "llvm/IR/MDBuilder.h"
175 #include "llvm/IR/Module.h"
176 #include "llvm/IR/Type.h"
177 #include "llvm/IR/Value.h"
178 #include "llvm/IR/ValueMap.h"
179 #include "llvm/InitializePasses.h"
180 #include "llvm/Pass.h"
181 #include "llvm/Support/AtomicOrdering.h"
182 #include "llvm/Support/Casting.h"
183 #include "llvm/Support/CommandLine.h"
184 #include "llvm/Support/Compiler.h"
185 #include "llvm/Support/Debug.h"
186 #include "llvm/Support/ErrorHandling.h"
187 #include "llvm/Support/MathExtras.h"
188 #include "llvm/Support/raw_ostream.h"
189 #include "llvm/Transforms/Instrumentation.h"
190 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
191 #include "llvm/Transforms/Utils/Local.h"
192 #include "llvm/Transforms/Utils/ModuleUtils.h"
193 #include <algorithm>
194 #include <cassert>
195 #include <cstddef>
196 #include <cstdint>
197 #include <memory>
198 #include <string>
199 #include <tuple>
200 
201 using namespace llvm;
202 
203 #define DEBUG_TYPE "msan"
204 
205 static const unsigned kOriginSize = 4;
206 static const Align kMinOriginAlignment = Align(4);
207 static const Align kShadowTLSAlignment = Align(8);
208 
209 // These constants must be kept in sync with the ones in msan.h.
210 static const unsigned kParamTLSSize = 800;
211 static const unsigned kRetvalTLSSize = 800;
212 
213 // Accesses sizes are powers of two: 1, 2, 4, 8.
214 static const size_t kNumberOfAccessSizes = 4;
215 
216 /// Track origins of uninitialized values.
217 ///
218 /// Adds a section to MemorySanitizer report that points to the allocation
219 /// (stack or heap) the uninitialized bits came from originally.
220 static cl::opt<int> ClTrackOrigins("msan-track-origins",
221        cl::desc("Track origins (allocation sites) of poisoned memory"),
222        cl::Hidden, cl::init(0));
223 
224 static cl::opt<bool> ClKeepGoing("msan-keep-going",
225        cl::desc("keep going after reporting a UMR"),
226        cl::Hidden, cl::init(false));
227 
228 static cl::opt<bool> ClPoisonStack("msan-poison-stack",
229        cl::desc("poison uninitialized stack variables"),
230        cl::Hidden, cl::init(true));
231 
232 static cl::opt<bool> ClPoisonStackWithCall("msan-poison-stack-with-call",
233        cl::desc("poison uninitialized stack variables with a call"),
234        cl::Hidden, cl::init(false));
235 
236 static cl::opt<int> ClPoisonStackPattern("msan-poison-stack-pattern",
237        cl::desc("poison uninitialized stack variables with the given pattern"),
238        cl::Hidden, cl::init(0xff));
239 
240 static cl::opt<bool> ClPoisonUndef("msan-poison-undef",
241        cl::desc("poison undef temps"),
242        cl::Hidden, cl::init(true));
243 
244 static cl::opt<bool> ClHandleICmp("msan-handle-icmp",
245        cl::desc("propagate shadow through ICmpEQ and ICmpNE"),
246        cl::Hidden, cl::init(true));
247 
248 static cl::opt<bool> ClHandleICmpExact("msan-handle-icmp-exact",
249        cl::desc("exact handling of relational integer ICmp"),
250        cl::Hidden, cl::init(false));
251 
252 static cl::opt<bool> ClHandleLifetimeIntrinsics(
253     "msan-handle-lifetime-intrinsics",
254     cl::desc(
255         "when possible, poison scoped variables at the beginning of the scope "
256         "(slower, but more precise)"),
257     cl::Hidden, cl::init(true));
258 
259 // When compiling the Linux kernel, we sometimes see false positives related to
260 // MSan being unable to understand that inline assembly calls may initialize
261 // local variables.
262 // This flag makes the compiler conservatively unpoison every memory location
263 // passed into an assembly call. Note that this may cause false positives.
264 // Because it's impossible to figure out the array sizes, we can only unpoison
265 // the first sizeof(type) bytes for each type* pointer.
266 // The instrumentation is only enabled in KMSAN builds, and only if
267 // -msan-handle-asm-conservative is on. This is done because we may want to
268 // quickly disable assembly instrumentation when it breaks.
269 static cl::opt<bool> ClHandleAsmConservative(
270     "msan-handle-asm-conservative",
271     cl::desc("conservative handling of inline assembly"), cl::Hidden,
272     cl::init(true));
273 
274 // This flag controls whether we check the shadow of the address
275 // operand of load or store. Such bugs are very rare, since load from
276 // a garbage address typically results in SEGV, but still happen
277 // (e.g. only lower bits of address are garbage, or the access happens
278 // early at program startup where malloc-ed memory is more likely to
279 // be zeroed. As of 2012-08-28 this flag adds 20% slowdown.
280 static cl::opt<bool> ClCheckAccessAddress("msan-check-access-address",
281        cl::desc("report accesses through a pointer which has poisoned shadow"),
282        cl::Hidden, cl::init(true));
283 
284 static cl::opt<bool> ClDumpStrictInstructions("msan-dump-strict-instructions",
285        cl::desc("print out instructions with default strict semantics"),
286        cl::Hidden, cl::init(false));
287 
288 static cl::opt<int> ClInstrumentationWithCallThreshold(
289     "msan-instrumentation-with-call-threshold",
290     cl::desc(
291         "If the function being instrumented requires more than "
292         "this number of checks and origin stores, use callbacks instead of "
293         "inline checks (-1 means never use callbacks)."),
294     cl::Hidden, cl::init(3500));
295 
296 static cl::opt<bool>
297     ClEnableKmsan("msan-kernel",
298                   cl::desc("Enable KernelMemorySanitizer instrumentation"),
299                   cl::Hidden, cl::init(false));
300 
301 // This is an experiment to enable handling of cases where shadow is a non-zero
302 // compile-time constant. For some unexplainable reason they were silently
303 // ignored in the instrumentation.
304 static cl::opt<bool> ClCheckConstantShadow("msan-check-constant-shadow",
305        cl::desc("Insert checks for constant shadow values"),
306        cl::Hidden, cl::init(false));
307 
308 // This is off by default because of a bug in gold:
309 // https://sourceware.org/bugzilla/show_bug.cgi?id=19002
310 static cl::opt<bool> ClWithComdat("msan-with-comdat",
311        cl::desc("Place MSan constructors in comdat sections"),
312        cl::Hidden, cl::init(false));
313 
314 // These options allow to specify custom memory map parameters
315 // See MemoryMapParams for details.
316 static cl::opt<uint64_t> ClAndMask("msan-and-mask",
317                                    cl::desc("Define custom MSan AndMask"),
318                                    cl::Hidden, cl::init(0));
319 
320 static cl::opt<uint64_t> ClXorMask("msan-xor-mask",
321                                    cl::desc("Define custom MSan XorMask"),
322                                    cl::Hidden, cl::init(0));
323 
324 static cl::opt<uint64_t> ClShadowBase("msan-shadow-base",
325                                       cl::desc("Define custom MSan ShadowBase"),
326                                       cl::Hidden, cl::init(0));
327 
328 static cl::opt<uint64_t> ClOriginBase("msan-origin-base",
329                                       cl::desc("Define custom MSan OriginBase"),
330                                       cl::Hidden, cl::init(0));
331 
332 static const char *const kMsanModuleCtorName = "msan.module_ctor";
333 static const char *const kMsanInitName = "__msan_init";
334 
335 namespace {
336 
337 // Memory map parameters used in application-to-shadow address calculation.
338 // Offset = (Addr & ~AndMask) ^ XorMask
339 // Shadow = ShadowBase + Offset
340 // Origin = OriginBase + Offset
341 struct MemoryMapParams {
342   uint64_t AndMask;
343   uint64_t XorMask;
344   uint64_t ShadowBase;
345   uint64_t OriginBase;
346 };
347 
348 struct PlatformMemoryMapParams {
349   const MemoryMapParams *bits32;
350   const MemoryMapParams *bits64;
351 };
352 
353 } // end anonymous namespace
354 
355 // i386 Linux
356 static const MemoryMapParams Linux_I386_MemoryMapParams = {
357   0x000080000000,  // AndMask
358   0,               // XorMask (not used)
359   0,               // ShadowBase (not used)
360   0x000040000000,  // OriginBase
361 };
362 
363 // x86_64 Linux
364 static const MemoryMapParams Linux_X86_64_MemoryMapParams = {
365 #ifdef MSAN_LINUX_X86_64_OLD_MAPPING
366   0x400000000000,  // AndMask
367   0,               // XorMask (not used)
368   0,               // ShadowBase (not used)
369   0x200000000000,  // OriginBase
370 #else
371   0,               // AndMask (not used)
372   0x500000000000,  // XorMask
373   0,               // ShadowBase (not used)
374   0x100000000000,  // OriginBase
375 #endif
376 };
377 
378 // mips64 Linux
379 static const MemoryMapParams Linux_MIPS64_MemoryMapParams = {
380   0,               // AndMask (not used)
381   0x008000000000,  // XorMask
382   0,               // ShadowBase (not used)
383   0x002000000000,  // OriginBase
384 };
385 
386 // ppc64 Linux
387 static const MemoryMapParams Linux_PowerPC64_MemoryMapParams = {
388   0xE00000000000,  // AndMask
389   0x100000000000,  // XorMask
390   0x080000000000,  // ShadowBase
391   0x1C0000000000,  // OriginBase
392 };
393 
394 // s390x Linux
395 static const MemoryMapParams Linux_S390X_MemoryMapParams = {
396     0xC00000000000, // AndMask
397     0,              // XorMask (not used)
398     0x080000000000, // ShadowBase
399     0x1C0000000000, // OriginBase
400 };
401 
402 // aarch64 Linux
403 static const MemoryMapParams Linux_AArch64_MemoryMapParams = {
404   0,               // AndMask (not used)
405   0x06000000000,   // XorMask
406   0,               // ShadowBase (not used)
407   0x01000000000,   // OriginBase
408 };
409 
410 // i386 FreeBSD
411 static const MemoryMapParams FreeBSD_I386_MemoryMapParams = {
412   0x000180000000,  // AndMask
413   0x000040000000,  // XorMask
414   0x000020000000,  // ShadowBase
415   0x000700000000,  // OriginBase
416 };
417 
418 // x86_64 FreeBSD
419 static const MemoryMapParams FreeBSD_X86_64_MemoryMapParams = {
420   0xc00000000000,  // AndMask
421   0x200000000000,  // XorMask
422   0x100000000000,  // ShadowBase
423   0x380000000000,  // OriginBase
424 };
425 
426 // x86_64 NetBSD
427 static const MemoryMapParams NetBSD_X86_64_MemoryMapParams = {
428   0,               // AndMask
429   0x500000000000,  // XorMask
430   0,               // ShadowBase
431   0x100000000000,  // OriginBase
432 };
433 
434 static const PlatformMemoryMapParams Linux_X86_MemoryMapParams = {
435   &Linux_I386_MemoryMapParams,
436   &Linux_X86_64_MemoryMapParams,
437 };
438 
439 static const PlatformMemoryMapParams Linux_MIPS_MemoryMapParams = {
440   nullptr,
441   &Linux_MIPS64_MemoryMapParams,
442 };
443 
444 static const PlatformMemoryMapParams Linux_PowerPC_MemoryMapParams = {
445   nullptr,
446   &Linux_PowerPC64_MemoryMapParams,
447 };
448 
449 static const PlatformMemoryMapParams Linux_S390_MemoryMapParams = {
450     nullptr,
451     &Linux_S390X_MemoryMapParams,
452 };
453 
454 static const PlatformMemoryMapParams Linux_ARM_MemoryMapParams = {
455   nullptr,
456   &Linux_AArch64_MemoryMapParams,
457 };
458 
459 static const PlatformMemoryMapParams FreeBSD_X86_MemoryMapParams = {
460   &FreeBSD_I386_MemoryMapParams,
461   &FreeBSD_X86_64_MemoryMapParams,
462 };
463 
464 static const PlatformMemoryMapParams NetBSD_X86_MemoryMapParams = {
465   nullptr,
466   &NetBSD_X86_64_MemoryMapParams,
467 };
468 
469 namespace {
470 
471 /// Instrument functions of a module to detect uninitialized reads.
472 ///
473 /// Instantiating MemorySanitizer inserts the msan runtime library API function
474 /// declarations into the module if they don't exist already. Instantiating
475 /// ensures the __msan_init function is in the list of global constructors for
476 /// the module.
477 class MemorySanitizer {
478 public:
479   MemorySanitizer(Module &M, MemorySanitizerOptions Options)
480       : CompileKernel(Options.Kernel), TrackOrigins(Options.TrackOrigins),
481         Recover(Options.Recover) {
482     initializeModule(M);
483   }
484 
485   // MSan cannot be moved or copied because of MapParams.
486   MemorySanitizer(MemorySanitizer &&) = delete;
487   MemorySanitizer &operator=(MemorySanitizer &&) = delete;
488   MemorySanitizer(const MemorySanitizer &) = delete;
489   MemorySanitizer &operator=(const MemorySanitizer &) = delete;
490 
491   bool sanitizeFunction(Function &F, TargetLibraryInfo &TLI);
492 
493 private:
494   friend struct MemorySanitizerVisitor;
495   friend struct VarArgAMD64Helper;
496   friend struct VarArgMIPS64Helper;
497   friend struct VarArgAArch64Helper;
498   friend struct VarArgPowerPC64Helper;
499   friend struct VarArgSystemZHelper;
500 
501   void initializeModule(Module &M);
502   void initializeCallbacks(Module &M);
503   void createKernelApi(Module &M);
504   void createUserspaceApi(Module &M);
505 
506   /// True if we're compiling the Linux kernel.
507   bool CompileKernel;
508   /// Track origins (allocation points) of uninitialized values.
509   int TrackOrigins;
510   bool Recover;
511 
512   LLVMContext *C;
513   Type *IntptrTy;
514   Type *OriginTy;
515 
516   // XxxTLS variables represent the per-thread state in MSan and per-task state
517   // in KMSAN.
518   // For the userspace these point to thread-local globals. In the kernel land
519   // they point to the members of a per-task struct obtained via a call to
520   // __msan_get_context_state().
521 
522   /// Thread-local shadow storage for function parameters.
523   Value *ParamTLS;
524 
525   /// Thread-local origin storage for function parameters.
526   Value *ParamOriginTLS;
527 
528   /// Thread-local shadow storage for function return value.
529   Value *RetvalTLS;
530 
531   /// Thread-local origin storage for function return value.
532   Value *RetvalOriginTLS;
533 
534   /// Thread-local shadow storage for in-register va_arg function
535   /// parameters (x86_64-specific).
536   Value *VAArgTLS;
537 
538   /// Thread-local shadow storage for in-register va_arg function
539   /// parameters (x86_64-specific).
540   Value *VAArgOriginTLS;
541 
542   /// Thread-local shadow storage for va_arg overflow area
543   /// (x86_64-specific).
544   Value *VAArgOverflowSizeTLS;
545 
546   /// Thread-local space used to pass origin value to the UMR reporting
547   /// function.
548   Value *OriginTLS;
549 
550   /// Are the instrumentation callbacks set up?
551   bool CallbacksInitialized = false;
552 
553   /// The run-time callback to print a warning.
554   FunctionCallee WarningFn;
555 
556   // These arrays are indexed by log2(AccessSize).
557   FunctionCallee MaybeWarningFn[kNumberOfAccessSizes];
558   FunctionCallee MaybeStoreOriginFn[kNumberOfAccessSizes];
559 
560   /// Run-time helper that generates a new origin value for a stack
561   /// allocation.
562   FunctionCallee MsanSetAllocaOrigin4Fn;
563 
564   /// Run-time helper that poisons stack on function entry.
565   FunctionCallee MsanPoisonStackFn;
566 
567   /// Run-time helper that records a store (or any event) of an
568   /// uninitialized value and returns an updated origin id encoding this info.
569   FunctionCallee MsanChainOriginFn;
570 
571   /// MSan runtime replacements for memmove, memcpy and memset.
572   FunctionCallee MemmoveFn, MemcpyFn, MemsetFn;
573 
574   /// KMSAN callback for task-local function argument shadow.
575   StructType *MsanContextStateTy;
576   FunctionCallee MsanGetContextStateFn;
577 
578   /// Functions for poisoning/unpoisoning local variables
579   FunctionCallee MsanPoisonAllocaFn, MsanUnpoisonAllocaFn;
580 
581   /// Each of the MsanMetadataPtrXxx functions returns a pair of shadow/origin
582   /// pointers.
583   FunctionCallee MsanMetadataPtrForLoadN, MsanMetadataPtrForStoreN;
584   FunctionCallee MsanMetadataPtrForLoad_1_8[4];
585   FunctionCallee MsanMetadataPtrForStore_1_8[4];
586   FunctionCallee MsanInstrumentAsmStoreFn;
587 
588   /// Helper to choose between different MsanMetadataPtrXxx().
589   FunctionCallee getKmsanShadowOriginAccessFn(bool isStore, int size);
590 
591   /// Memory map parameters used in application-to-shadow calculation.
592   const MemoryMapParams *MapParams;
593 
594   /// Custom memory map parameters used when -msan-shadow-base or
595   // -msan-origin-base is provided.
596   MemoryMapParams CustomMapParams;
597 
598   MDNode *ColdCallWeights;
599 
600   /// Branch weights for origin store.
601   MDNode *OriginStoreWeights;
602 
603   /// An empty volatile inline asm that prevents callback merge.
604   InlineAsm *EmptyAsm;
605 };
606 
607 void insertModuleCtor(Module &M) {
608   getOrCreateSanitizerCtorAndInitFunctions(
609       M, kMsanModuleCtorName, kMsanInitName,
610       /*InitArgTypes=*/{},
611       /*InitArgs=*/{},
612       // This callback is invoked when the functions are created the first
613       // time. Hook them into the global ctors list in that case:
614       [&](Function *Ctor, FunctionCallee) {
615         if (!ClWithComdat) {
616           appendToGlobalCtors(M, Ctor, 0);
617           return;
618         }
619         Comdat *MsanCtorComdat = M.getOrInsertComdat(kMsanModuleCtorName);
620         Ctor->setComdat(MsanCtorComdat);
621         appendToGlobalCtors(M, Ctor, 0, Ctor);
622       });
623 }
624 
625 /// A legacy function pass for msan instrumentation.
626 ///
627 /// Instruments functions to detect uninitialized reads.
628 struct MemorySanitizerLegacyPass : public FunctionPass {
629   // Pass identification, replacement for typeid.
630   static char ID;
631 
632   MemorySanitizerLegacyPass(MemorySanitizerOptions Options = {})
633       : FunctionPass(ID), Options(Options) {
634     initializeMemorySanitizerLegacyPassPass(*PassRegistry::getPassRegistry());
635   }
636   StringRef getPassName() const override { return "MemorySanitizerLegacyPass"; }
637 
638   void getAnalysisUsage(AnalysisUsage &AU) const override {
639     AU.addRequired<TargetLibraryInfoWrapperPass>();
640   }
641 
642   bool runOnFunction(Function &F) override {
643     return MSan->sanitizeFunction(
644         F, getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F));
645   }
646   bool doInitialization(Module &M) override;
647 
648   Optional<MemorySanitizer> MSan;
649   MemorySanitizerOptions Options;
650 };
651 
652 template <class T> T getOptOrDefault(const cl::opt<T> &Opt, T Default) {
653   return (Opt.getNumOccurrences() > 0) ? Opt : Default;
654 }
655 
656 } // end anonymous namespace
657 
658 MemorySanitizerOptions::MemorySanitizerOptions(int TO, bool R, bool K)
659     : Kernel(getOptOrDefault(ClEnableKmsan, K)),
660       TrackOrigins(getOptOrDefault(ClTrackOrigins, Kernel ? 2 : TO)),
661       Recover(getOptOrDefault(ClKeepGoing, Kernel || R)) {}
662 
663 PreservedAnalyses MemorySanitizerPass::run(Function &F,
664                                            FunctionAnalysisManager &FAM) {
665   MemorySanitizer Msan(*F.getParent(), Options);
666   if (Msan.sanitizeFunction(F, FAM.getResult<TargetLibraryAnalysis>(F)))
667     return PreservedAnalyses::none();
668   return PreservedAnalyses::all();
669 }
670 
671 PreservedAnalyses MemorySanitizerPass::run(Module &M,
672                                            ModuleAnalysisManager &AM) {
673   if (Options.Kernel)
674     return PreservedAnalyses::all();
675   insertModuleCtor(M);
676   return PreservedAnalyses::none();
677 }
678 
679 char MemorySanitizerLegacyPass::ID = 0;
680 
681 INITIALIZE_PASS_BEGIN(MemorySanitizerLegacyPass, "msan",
682                       "MemorySanitizer: detects uninitialized reads.", false,
683                       false)
684 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
685 INITIALIZE_PASS_END(MemorySanitizerLegacyPass, "msan",
686                     "MemorySanitizer: detects uninitialized reads.", false,
687                     false)
688 
689 FunctionPass *
690 llvm::createMemorySanitizerLegacyPassPass(MemorySanitizerOptions Options) {
691   return new MemorySanitizerLegacyPass(Options);
692 }
693 
694 /// Create a non-const global initialized with the given string.
695 ///
696 /// Creates a writable global for Str so that we can pass it to the
697 /// run-time lib. Runtime uses first 4 bytes of the string to store the
698 /// frame ID, so the string needs to be mutable.
699 static GlobalVariable *createPrivateNonConstGlobalForString(Module &M,
700                                                             StringRef Str) {
701   Constant *StrConst = ConstantDataArray::getString(M.getContext(), Str);
702   return new GlobalVariable(M, StrConst->getType(), /*isConstant=*/false,
703                             GlobalValue::PrivateLinkage, StrConst, "");
704 }
705 
706 /// Create KMSAN API callbacks.
707 void MemorySanitizer::createKernelApi(Module &M) {
708   IRBuilder<> IRB(*C);
709 
710   // These will be initialized in insertKmsanPrologue().
711   RetvalTLS = nullptr;
712   RetvalOriginTLS = nullptr;
713   ParamTLS = nullptr;
714   ParamOriginTLS = nullptr;
715   VAArgTLS = nullptr;
716   VAArgOriginTLS = nullptr;
717   VAArgOverflowSizeTLS = nullptr;
718   // OriginTLS is unused in the kernel.
719   OriginTLS = nullptr;
720 
721   // __msan_warning() in the kernel takes an origin.
722   WarningFn = M.getOrInsertFunction("__msan_warning", IRB.getVoidTy(),
723                                     IRB.getInt32Ty());
724   // Requests the per-task context state (kmsan_context_state*) from the
725   // runtime library.
726   MsanContextStateTy = StructType::get(
727       ArrayType::get(IRB.getInt64Ty(), kParamTLSSize / 8),
728       ArrayType::get(IRB.getInt64Ty(), kRetvalTLSSize / 8),
729       ArrayType::get(IRB.getInt64Ty(), kParamTLSSize / 8),
730       ArrayType::get(IRB.getInt64Ty(), kParamTLSSize / 8), /* va_arg_origin */
731       IRB.getInt64Ty(), ArrayType::get(OriginTy, kParamTLSSize / 4), OriginTy,
732       OriginTy);
733   MsanGetContextStateFn = M.getOrInsertFunction(
734       "__msan_get_context_state", PointerType::get(MsanContextStateTy, 0));
735 
736   Type *RetTy = StructType::get(PointerType::get(IRB.getInt8Ty(), 0),
737                                 PointerType::get(IRB.getInt32Ty(), 0));
738 
739   for (int ind = 0, size = 1; ind < 4; ind++, size <<= 1) {
740     std::string name_load =
741         "__msan_metadata_ptr_for_load_" + std::to_string(size);
742     std::string name_store =
743         "__msan_metadata_ptr_for_store_" + std::to_string(size);
744     MsanMetadataPtrForLoad_1_8[ind] = M.getOrInsertFunction(
745         name_load, RetTy, PointerType::get(IRB.getInt8Ty(), 0));
746     MsanMetadataPtrForStore_1_8[ind] = M.getOrInsertFunction(
747         name_store, RetTy, PointerType::get(IRB.getInt8Ty(), 0));
748   }
749 
750   MsanMetadataPtrForLoadN = M.getOrInsertFunction(
751       "__msan_metadata_ptr_for_load_n", RetTy,
752       PointerType::get(IRB.getInt8Ty(), 0), IRB.getInt64Ty());
753   MsanMetadataPtrForStoreN = M.getOrInsertFunction(
754       "__msan_metadata_ptr_for_store_n", RetTy,
755       PointerType::get(IRB.getInt8Ty(), 0), IRB.getInt64Ty());
756 
757   // Functions for poisoning and unpoisoning memory.
758   MsanPoisonAllocaFn =
759       M.getOrInsertFunction("__msan_poison_alloca", IRB.getVoidTy(),
760                             IRB.getInt8PtrTy(), IntptrTy, IRB.getInt8PtrTy());
761   MsanUnpoisonAllocaFn = M.getOrInsertFunction(
762       "__msan_unpoison_alloca", IRB.getVoidTy(), IRB.getInt8PtrTy(), IntptrTy);
763 }
764 
765 static Constant *getOrInsertGlobal(Module &M, StringRef Name, Type *Ty) {
766   return M.getOrInsertGlobal(Name, Ty, [&] {
767     return new GlobalVariable(M, Ty, false, GlobalVariable::ExternalLinkage,
768                               nullptr, Name, nullptr,
769                               GlobalVariable::InitialExecTLSModel);
770   });
771 }
772 
773 /// Insert declarations for userspace-specific functions and globals.
774 void MemorySanitizer::createUserspaceApi(Module &M) {
775   IRBuilder<> IRB(*C);
776   // Create the callback.
777   // FIXME: this function should have "Cold" calling conv,
778   // which is not yet implemented.
779   StringRef WarningFnName = Recover ? "__msan_warning"
780                                     : "__msan_warning_noreturn";
781   WarningFn = M.getOrInsertFunction(WarningFnName, IRB.getVoidTy());
782 
783   // Create the global TLS variables.
784   RetvalTLS =
785       getOrInsertGlobal(M, "__msan_retval_tls",
786                         ArrayType::get(IRB.getInt64Ty(), kRetvalTLSSize / 8));
787 
788   RetvalOriginTLS = getOrInsertGlobal(M, "__msan_retval_origin_tls", OriginTy);
789 
790   ParamTLS =
791       getOrInsertGlobal(M, "__msan_param_tls",
792                         ArrayType::get(IRB.getInt64Ty(), kParamTLSSize / 8));
793 
794   ParamOriginTLS =
795       getOrInsertGlobal(M, "__msan_param_origin_tls",
796                         ArrayType::get(OriginTy, kParamTLSSize / 4));
797 
798   VAArgTLS =
799       getOrInsertGlobal(M, "__msan_va_arg_tls",
800                         ArrayType::get(IRB.getInt64Ty(), kParamTLSSize / 8));
801 
802   VAArgOriginTLS =
803       getOrInsertGlobal(M, "__msan_va_arg_origin_tls",
804                         ArrayType::get(OriginTy, kParamTLSSize / 4));
805 
806   VAArgOverflowSizeTLS =
807       getOrInsertGlobal(M, "__msan_va_arg_overflow_size_tls", IRB.getInt64Ty());
808   OriginTLS = getOrInsertGlobal(M, "__msan_origin_tls", IRB.getInt32Ty());
809 
810   for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes;
811        AccessSizeIndex++) {
812     unsigned AccessSize = 1 << AccessSizeIndex;
813     std::string FunctionName = "__msan_maybe_warning_" + itostr(AccessSize);
814     SmallVector<std::pair<unsigned, Attribute>, 2> MaybeWarningFnAttrs;
815     MaybeWarningFnAttrs.push_back(std::make_pair(
816         AttributeList::FirstArgIndex, Attribute::get(*C, Attribute::ZExt)));
817     MaybeWarningFnAttrs.push_back(std::make_pair(
818         AttributeList::FirstArgIndex + 1, Attribute::get(*C, Attribute::ZExt)));
819     MaybeWarningFn[AccessSizeIndex] = M.getOrInsertFunction(
820         FunctionName, AttributeList::get(*C, MaybeWarningFnAttrs),
821         IRB.getVoidTy(), IRB.getIntNTy(AccessSize * 8), IRB.getInt32Ty());
822 
823     FunctionName = "__msan_maybe_store_origin_" + itostr(AccessSize);
824     SmallVector<std::pair<unsigned, Attribute>, 2> MaybeStoreOriginFnAttrs;
825     MaybeStoreOriginFnAttrs.push_back(std::make_pair(
826         AttributeList::FirstArgIndex, Attribute::get(*C, Attribute::ZExt)));
827     MaybeStoreOriginFnAttrs.push_back(std::make_pair(
828         AttributeList::FirstArgIndex + 2, Attribute::get(*C, Attribute::ZExt)));
829     MaybeStoreOriginFn[AccessSizeIndex] = M.getOrInsertFunction(
830         FunctionName, AttributeList::get(*C, MaybeStoreOriginFnAttrs),
831         IRB.getVoidTy(), IRB.getIntNTy(AccessSize * 8), IRB.getInt8PtrTy(),
832         IRB.getInt32Ty());
833   }
834 
835   MsanSetAllocaOrigin4Fn = M.getOrInsertFunction(
836     "__msan_set_alloca_origin4", IRB.getVoidTy(), IRB.getInt8PtrTy(), IntptrTy,
837     IRB.getInt8PtrTy(), IntptrTy);
838   MsanPoisonStackFn =
839       M.getOrInsertFunction("__msan_poison_stack", IRB.getVoidTy(),
840                             IRB.getInt8PtrTy(), IntptrTy);
841 }
842 
843 /// Insert extern declaration of runtime-provided functions and globals.
844 void MemorySanitizer::initializeCallbacks(Module &M) {
845   // Only do this once.
846   if (CallbacksInitialized)
847     return;
848 
849   IRBuilder<> IRB(*C);
850   // Initialize callbacks that are common for kernel and userspace
851   // instrumentation.
852   MsanChainOriginFn = M.getOrInsertFunction(
853     "__msan_chain_origin", IRB.getInt32Ty(), IRB.getInt32Ty());
854   MemmoveFn = M.getOrInsertFunction(
855     "__msan_memmove", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
856     IRB.getInt8PtrTy(), IntptrTy);
857   MemcpyFn = M.getOrInsertFunction(
858     "__msan_memcpy", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
859     IntptrTy);
860   MemsetFn = M.getOrInsertFunction(
861     "__msan_memset", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt32Ty(),
862     IntptrTy);
863   // We insert an empty inline asm after __msan_report* to avoid callback merge.
864   EmptyAsm = InlineAsm::get(FunctionType::get(IRB.getVoidTy(), false),
865                             StringRef(""), StringRef(""),
866                             /*hasSideEffects=*/true);
867 
868   MsanInstrumentAsmStoreFn =
869       M.getOrInsertFunction("__msan_instrument_asm_store", IRB.getVoidTy(),
870                             PointerType::get(IRB.getInt8Ty(), 0), IntptrTy);
871 
872   if (CompileKernel) {
873     createKernelApi(M);
874   } else {
875     createUserspaceApi(M);
876   }
877   CallbacksInitialized = true;
878 }
879 
880 FunctionCallee MemorySanitizer::getKmsanShadowOriginAccessFn(bool isStore,
881                                                              int size) {
882   FunctionCallee *Fns =
883       isStore ? MsanMetadataPtrForStore_1_8 : MsanMetadataPtrForLoad_1_8;
884   switch (size) {
885   case 1:
886     return Fns[0];
887   case 2:
888     return Fns[1];
889   case 4:
890     return Fns[2];
891   case 8:
892     return Fns[3];
893   default:
894     return nullptr;
895   }
896 }
897 
898 /// Module-level initialization.
899 ///
900 /// inserts a call to __msan_init to the module's constructor list.
901 void MemorySanitizer::initializeModule(Module &M) {
902   auto &DL = M.getDataLayout();
903 
904   bool ShadowPassed = ClShadowBase.getNumOccurrences() > 0;
905   bool OriginPassed = ClOriginBase.getNumOccurrences() > 0;
906   // Check the overrides first
907   if (ShadowPassed || OriginPassed) {
908     CustomMapParams.AndMask = ClAndMask;
909     CustomMapParams.XorMask = ClXorMask;
910     CustomMapParams.ShadowBase = ClShadowBase;
911     CustomMapParams.OriginBase = ClOriginBase;
912     MapParams = &CustomMapParams;
913   } else {
914     Triple TargetTriple(M.getTargetTriple());
915     switch (TargetTriple.getOS()) {
916       case Triple::FreeBSD:
917         switch (TargetTriple.getArch()) {
918           case Triple::x86_64:
919             MapParams = FreeBSD_X86_MemoryMapParams.bits64;
920             break;
921           case Triple::x86:
922             MapParams = FreeBSD_X86_MemoryMapParams.bits32;
923             break;
924           default:
925             report_fatal_error("unsupported architecture");
926         }
927         break;
928       case Triple::NetBSD:
929         switch (TargetTriple.getArch()) {
930           case Triple::x86_64:
931             MapParams = NetBSD_X86_MemoryMapParams.bits64;
932             break;
933           default:
934             report_fatal_error("unsupported architecture");
935         }
936         break;
937       case Triple::Linux:
938         switch (TargetTriple.getArch()) {
939           case Triple::x86_64:
940             MapParams = Linux_X86_MemoryMapParams.bits64;
941             break;
942           case Triple::x86:
943             MapParams = Linux_X86_MemoryMapParams.bits32;
944             break;
945           case Triple::mips64:
946           case Triple::mips64el:
947             MapParams = Linux_MIPS_MemoryMapParams.bits64;
948             break;
949           case Triple::ppc64:
950           case Triple::ppc64le:
951             MapParams = Linux_PowerPC_MemoryMapParams.bits64;
952             break;
953           case Triple::systemz:
954             MapParams = Linux_S390_MemoryMapParams.bits64;
955             break;
956           case Triple::aarch64:
957           case Triple::aarch64_be:
958             MapParams = Linux_ARM_MemoryMapParams.bits64;
959             break;
960           default:
961             report_fatal_error("unsupported architecture");
962         }
963         break;
964       default:
965         report_fatal_error("unsupported operating system");
966     }
967   }
968 
969   C = &(M.getContext());
970   IRBuilder<> IRB(*C);
971   IntptrTy = IRB.getIntPtrTy(DL);
972   OriginTy = IRB.getInt32Ty();
973 
974   ColdCallWeights = MDBuilder(*C).createBranchWeights(1, 1000);
975   OriginStoreWeights = MDBuilder(*C).createBranchWeights(1, 1000);
976 
977   if (!CompileKernel) {
978     if (TrackOrigins)
979       M.getOrInsertGlobal("__msan_track_origins", IRB.getInt32Ty(), [&] {
980         return new GlobalVariable(
981             M, IRB.getInt32Ty(), true, GlobalValue::WeakODRLinkage,
982             IRB.getInt32(TrackOrigins), "__msan_track_origins");
983       });
984 
985     if (Recover)
986       M.getOrInsertGlobal("__msan_keep_going", IRB.getInt32Ty(), [&] {
987         return new GlobalVariable(M, IRB.getInt32Ty(), true,
988                                   GlobalValue::WeakODRLinkage,
989                                   IRB.getInt32(Recover), "__msan_keep_going");
990       });
991 }
992 }
993 
994 bool MemorySanitizerLegacyPass::doInitialization(Module &M) {
995   if (!Options.Kernel)
996     insertModuleCtor(M);
997   MSan.emplace(M, Options);
998   return true;
999 }
1000 
1001 namespace {
1002 
1003 /// A helper class that handles instrumentation of VarArg
1004 /// functions on a particular platform.
1005 ///
1006 /// Implementations are expected to insert the instrumentation
1007 /// necessary to propagate argument shadow through VarArg function
1008 /// calls. Visit* methods are called during an InstVisitor pass over
1009 /// the function, and should avoid creating new basic blocks. A new
1010 /// instance of this class is created for each instrumented function.
1011 struct VarArgHelper {
1012   virtual ~VarArgHelper() = default;
1013 
1014   /// Visit a CallBase.
1015   virtual void visitCallBase(CallBase &CB, IRBuilder<> &IRB) = 0;
1016 
1017   /// Visit a va_start call.
1018   virtual void visitVAStartInst(VAStartInst &I) = 0;
1019 
1020   /// Visit a va_copy call.
1021   virtual void visitVACopyInst(VACopyInst &I) = 0;
1022 
1023   /// Finalize function instrumentation.
1024   ///
1025   /// This method is called after visiting all interesting (see above)
1026   /// instructions in a function.
1027   virtual void finalizeInstrumentation() = 0;
1028 };
1029 
1030 struct MemorySanitizerVisitor;
1031 
1032 } // end anonymous namespace
1033 
1034 static VarArgHelper *CreateVarArgHelper(Function &Func, MemorySanitizer &Msan,
1035                                         MemorySanitizerVisitor &Visitor);
1036 
1037 static unsigned TypeSizeToSizeIndex(unsigned TypeSize) {
1038   if (TypeSize <= 8) return 0;
1039   return Log2_32_Ceil((TypeSize + 7) / 8);
1040 }
1041 
1042 namespace {
1043 
1044 /// This class does all the work for a given function. Store and Load
1045 /// instructions store and load corresponding shadow and origin
1046 /// values. Most instructions propagate shadow from arguments to their
1047 /// return values. Certain instructions (most importantly, BranchInst)
1048 /// test their argument shadow and print reports (with a runtime call) if it's
1049 /// non-zero.
1050 struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
1051   Function &F;
1052   MemorySanitizer &MS;
1053   SmallVector<PHINode *, 16> ShadowPHINodes, OriginPHINodes;
1054   ValueMap<Value*, Value*> ShadowMap, OriginMap;
1055   std::unique_ptr<VarArgHelper> VAHelper;
1056   const TargetLibraryInfo *TLI;
1057   BasicBlock *ActualFnStart;
1058 
1059   // The following flags disable parts of MSan instrumentation based on
1060   // blacklist contents and command-line options.
1061   bool InsertChecks;
1062   bool PropagateShadow;
1063   bool PoisonStack;
1064   bool PoisonUndef;
1065   bool CheckReturnValue;
1066 
1067   struct ShadowOriginAndInsertPoint {
1068     Value *Shadow;
1069     Value *Origin;
1070     Instruction *OrigIns;
1071 
1072     ShadowOriginAndInsertPoint(Value *S, Value *O, Instruction *I)
1073       : Shadow(S), Origin(O), OrigIns(I) {}
1074   };
1075   SmallVector<ShadowOriginAndInsertPoint, 16> InstrumentationList;
1076   bool InstrumentLifetimeStart = ClHandleLifetimeIntrinsics;
1077   SmallSet<AllocaInst *, 16> AllocaSet;
1078   SmallVector<std::pair<IntrinsicInst *, AllocaInst *>, 16> LifetimeStartList;
1079   SmallVector<StoreInst *, 16> StoreList;
1080 
1081   MemorySanitizerVisitor(Function &F, MemorySanitizer &MS,
1082                          const TargetLibraryInfo &TLI)
1083       : F(F), MS(MS), VAHelper(CreateVarArgHelper(F, MS, *this)), TLI(&TLI) {
1084     bool SanitizeFunction = F.hasFnAttribute(Attribute::SanitizeMemory);
1085     InsertChecks = SanitizeFunction;
1086     PropagateShadow = SanitizeFunction;
1087     PoisonStack = SanitizeFunction && ClPoisonStack;
1088     PoisonUndef = SanitizeFunction && ClPoisonUndef;
1089     // FIXME: Consider using SpecialCaseList to specify a list of functions that
1090     // must always return fully initialized values. For now, we hardcode "main".
1091     CheckReturnValue = SanitizeFunction && (F.getName() == "main");
1092 
1093     MS.initializeCallbacks(*F.getParent());
1094     if (MS.CompileKernel)
1095       ActualFnStart = insertKmsanPrologue(F);
1096     else
1097       ActualFnStart = &F.getEntryBlock();
1098 
1099     LLVM_DEBUG(if (!InsertChecks) dbgs()
1100                << "MemorySanitizer is not inserting checks into '"
1101                << F.getName() << "'\n");
1102   }
1103 
1104   Value *updateOrigin(Value *V, IRBuilder<> &IRB) {
1105     if (MS.TrackOrigins <= 1) return V;
1106     return IRB.CreateCall(MS.MsanChainOriginFn, V);
1107   }
1108 
1109   Value *originToIntptr(IRBuilder<> &IRB, Value *Origin) {
1110     const DataLayout &DL = F.getParent()->getDataLayout();
1111     unsigned IntptrSize = DL.getTypeStoreSize(MS.IntptrTy);
1112     if (IntptrSize == kOriginSize) return Origin;
1113     assert(IntptrSize == kOriginSize * 2);
1114     Origin = IRB.CreateIntCast(Origin, MS.IntptrTy, /* isSigned */ false);
1115     return IRB.CreateOr(Origin, IRB.CreateShl(Origin, kOriginSize * 8));
1116   }
1117 
1118   /// Fill memory range with the given origin value.
1119   void paintOrigin(IRBuilder<> &IRB, Value *Origin, Value *OriginPtr,
1120                    unsigned Size, Align Alignment) {
1121     const DataLayout &DL = F.getParent()->getDataLayout();
1122     const Align IntptrAlignment = Align(DL.getABITypeAlignment(MS.IntptrTy));
1123     unsigned IntptrSize = DL.getTypeStoreSize(MS.IntptrTy);
1124     assert(IntptrAlignment >= kMinOriginAlignment);
1125     assert(IntptrSize >= kOriginSize);
1126 
1127     unsigned Ofs = 0;
1128     Align CurrentAlignment = Alignment;
1129     if (Alignment >= IntptrAlignment && IntptrSize > kOriginSize) {
1130       Value *IntptrOrigin = originToIntptr(IRB, Origin);
1131       Value *IntptrOriginPtr =
1132           IRB.CreatePointerCast(OriginPtr, PointerType::get(MS.IntptrTy, 0));
1133       for (unsigned i = 0; i < Size / IntptrSize; ++i) {
1134         Value *Ptr = i ? IRB.CreateConstGEP1_32(MS.IntptrTy, IntptrOriginPtr, i)
1135                        : IntptrOriginPtr;
1136         IRB.CreateAlignedStore(IntptrOrigin, Ptr, CurrentAlignment);
1137         Ofs += IntptrSize / kOriginSize;
1138         CurrentAlignment = IntptrAlignment;
1139       }
1140     }
1141 
1142     for (unsigned i = Ofs; i < (Size + kOriginSize - 1) / kOriginSize; ++i) {
1143       Value *GEP =
1144           i ? IRB.CreateConstGEP1_32(MS.OriginTy, OriginPtr, i) : OriginPtr;
1145       IRB.CreateAlignedStore(Origin, GEP, CurrentAlignment);
1146       CurrentAlignment = kMinOriginAlignment;
1147     }
1148   }
1149 
1150   void storeOrigin(IRBuilder<> &IRB, Value *Addr, Value *Shadow, Value *Origin,
1151                    Value *OriginPtr, Align Alignment, bool AsCall) {
1152     const DataLayout &DL = F.getParent()->getDataLayout();
1153     const Align OriginAlignment = std::max(kMinOriginAlignment, Alignment);
1154     unsigned StoreSize = DL.getTypeStoreSize(Shadow->getType());
1155     if (Shadow->getType()->isAggregateType()) {
1156       paintOrigin(IRB, updateOrigin(Origin, IRB), OriginPtr, StoreSize,
1157                   OriginAlignment);
1158     } else {
1159       Value *ConvertedShadow = convertToShadowTyNoVec(Shadow, IRB);
1160       if (auto *ConstantShadow = dyn_cast<Constant>(ConvertedShadow)) {
1161         if (ClCheckConstantShadow && !ConstantShadow->isZeroValue())
1162           paintOrigin(IRB, updateOrigin(Origin, IRB), OriginPtr, StoreSize,
1163                       OriginAlignment);
1164         return;
1165       }
1166 
1167       unsigned TypeSizeInBits =
1168           DL.getTypeSizeInBits(ConvertedShadow->getType());
1169       unsigned SizeIndex = TypeSizeToSizeIndex(TypeSizeInBits);
1170       if (AsCall && SizeIndex < kNumberOfAccessSizes && !MS.CompileKernel) {
1171         FunctionCallee Fn = MS.MaybeStoreOriginFn[SizeIndex];
1172         Value *ConvertedShadow2 = IRB.CreateZExt(
1173             ConvertedShadow, IRB.getIntNTy(8 * (1 << SizeIndex)));
1174         IRB.CreateCall(Fn, {ConvertedShadow2,
1175                             IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()),
1176                             Origin});
1177       } else {
1178         Value *Cmp = IRB.CreateICmpNE(
1179             ConvertedShadow, getCleanShadow(ConvertedShadow), "_mscmp");
1180         Instruction *CheckTerm = SplitBlockAndInsertIfThen(
1181             Cmp, &*IRB.GetInsertPoint(), false, MS.OriginStoreWeights);
1182         IRBuilder<> IRBNew(CheckTerm);
1183         paintOrigin(IRBNew, updateOrigin(Origin, IRBNew), OriginPtr, StoreSize,
1184                     OriginAlignment);
1185       }
1186     }
1187   }
1188 
1189   void materializeStores(bool InstrumentWithCalls) {
1190     for (StoreInst *SI : StoreList) {
1191       IRBuilder<> IRB(SI);
1192       Value *Val = SI->getValueOperand();
1193       Value *Addr = SI->getPointerOperand();
1194       Value *Shadow = SI->isAtomic() ? getCleanShadow(Val) : getShadow(Val);
1195       Value *ShadowPtr, *OriginPtr;
1196       Type *ShadowTy = Shadow->getType();
1197       const Align Alignment = assumeAligned(SI->getAlignment());
1198       const Align OriginAlignment = std::max(kMinOriginAlignment, Alignment);
1199       std::tie(ShadowPtr, OriginPtr) =
1200           getShadowOriginPtr(Addr, IRB, ShadowTy, Alignment, /*isStore*/ true);
1201 
1202       StoreInst *NewSI = IRB.CreateAlignedStore(Shadow, ShadowPtr, Alignment);
1203       LLVM_DEBUG(dbgs() << "  STORE: " << *NewSI << "\n");
1204       (void)NewSI;
1205 
1206       if (SI->isAtomic())
1207         SI->setOrdering(addReleaseOrdering(SI->getOrdering()));
1208 
1209       if (MS.TrackOrigins && !SI->isAtomic())
1210         storeOrigin(IRB, Addr, Shadow, getOrigin(Val), OriginPtr,
1211                     OriginAlignment, InstrumentWithCalls);
1212     }
1213   }
1214 
1215   /// Helper function to insert a warning at IRB's current insert point.
1216   void insertWarningFn(IRBuilder<> &IRB, Value *Origin) {
1217     if (!Origin)
1218       Origin = (Value *)IRB.getInt32(0);
1219     if (MS.CompileKernel) {
1220       IRB.CreateCall(MS.WarningFn, Origin);
1221     } else {
1222       if (MS.TrackOrigins) {
1223         IRB.CreateStore(Origin, MS.OriginTLS);
1224       }
1225       IRB.CreateCall(MS.WarningFn, {});
1226     }
1227     IRB.CreateCall(MS.EmptyAsm, {});
1228     // FIXME: Insert UnreachableInst if !MS.Recover?
1229     // This may invalidate some of the following checks and needs to be done
1230     // at the very end.
1231   }
1232 
1233   void materializeOneCheck(Instruction *OrigIns, Value *Shadow, Value *Origin,
1234                            bool AsCall) {
1235     IRBuilder<> IRB(OrigIns);
1236     LLVM_DEBUG(dbgs() << "  SHAD0 : " << *Shadow << "\n");
1237     Value *ConvertedShadow = convertToShadowTyNoVec(Shadow, IRB);
1238     LLVM_DEBUG(dbgs() << "  SHAD1 : " << *ConvertedShadow << "\n");
1239 
1240     if (auto *ConstantShadow = dyn_cast<Constant>(ConvertedShadow)) {
1241       if (ClCheckConstantShadow && !ConstantShadow->isZeroValue()) {
1242         insertWarningFn(IRB, Origin);
1243       }
1244       return;
1245     }
1246 
1247     const DataLayout &DL = OrigIns->getModule()->getDataLayout();
1248 
1249     unsigned TypeSizeInBits = DL.getTypeSizeInBits(ConvertedShadow->getType());
1250     unsigned SizeIndex = TypeSizeToSizeIndex(TypeSizeInBits);
1251     if (AsCall && SizeIndex < kNumberOfAccessSizes && !MS.CompileKernel) {
1252       FunctionCallee Fn = MS.MaybeWarningFn[SizeIndex];
1253       Value *ConvertedShadow2 =
1254           IRB.CreateZExt(ConvertedShadow, IRB.getIntNTy(8 * (1 << SizeIndex)));
1255       IRB.CreateCall(Fn, {ConvertedShadow2, MS.TrackOrigins && Origin
1256                                                 ? Origin
1257                                                 : (Value *)IRB.getInt32(0)});
1258     } else {
1259       Value *Cmp = IRB.CreateICmpNE(ConvertedShadow,
1260                                     getCleanShadow(ConvertedShadow), "_mscmp");
1261       Instruction *CheckTerm = SplitBlockAndInsertIfThen(
1262           Cmp, OrigIns,
1263           /* Unreachable */ !MS.Recover, MS.ColdCallWeights);
1264 
1265       IRB.SetInsertPoint(CheckTerm);
1266       insertWarningFn(IRB, Origin);
1267       LLVM_DEBUG(dbgs() << "  CHECK: " << *Cmp << "\n");
1268     }
1269   }
1270 
1271   void materializeChecks(bool InstrumentWithCalls) {
1272     for (const auto &ShadowData : InstrumentationList) {
1273       Instruction *OrigIns = ShadowData.OrigIns;
1274       Value *Shadow = ShadowData.Shadow;
1275       Value *Origin = ShadowData.Origin;
1276       materializeOneCheck(OrigIns, Shadow, Origin, InstrumentWithCalls);
1277     }
1278     LLVM_DEBUG(dbgs() << "DONE:\n" << F);
1279   }
1280 
1281   BasicBlock *insertKmsanPrologue(Function &F) {
1282     BasicBlock *ret =
1283         SplitBlock(&F.getEntryBlock(), F.getEntryBlock().getFirstNonPHI());
1284     IRBuilder<> IRB(F.getEntryBlock().getFirstNonPHI());
1285     Value *ContextState = IRB.CreateCall(MS.MsanGetContextStateFn, {});
1286     Constant *Zero = IRB.getInt32(0);
1287     MS.ParamTLS = IRB.CreateGEP(MS.MsanContextStateTy, ContextState,
1288                                 {Zero, IRB.getInt32(0)}, "param_shadow");
1289     MS.RetvalTLS = IRB.CreateGEP(MS.MsanContextStateTy, ContextState,
1290                                  {Zero, IRB.getInt32(1)}, "retval_shadow");
1291     MS.VAArgTLS = IRB.CreateGEP(MS.MsanContextStateTy, ContextState,
1292                                 {Zero, IRB.getInt32(2)}, "va_arg_shadow");
1293     MS.VAArgOriginTLS = IRB.CreateGEP(MS.MsanContextStateTy, ContextState,
1294                                       {Zero, IRB.getInt32(3)}, "va_arg_origin");
1295     MS.VAArgOverflowSizeTLS =
1296         IRB.CreateGEP(MS.MsanContextStateTy, ContextState,
1297                       {Zero, IRB.getInt32(4)}, "va_arg_overflow_size");
1298     MS.ParamOriginTLS = IRB.CreateGEP(MS.MsanContextStateTy, ContextState,
1299                                       {Zero, IRB.getInt32(5)}, "param_origin");
1300     MS.RetvalOriginTLS =
1301         IRB.CreateGEP(MS.MsanContextStateTy, ContextState,
1302                       {Zero, IRB.getInt32(6)}, "retval_origin");
1303     return ret;
1304   }
1305 
1306   /// Add MemorySanitizer instrumentation to a function.
1307   bool runOnFunction() {
1308     // In the presence of unreachable blocks, we may see Phi nodes with
1309     // incoming nodes from such blocks. Since InstVisitor skips unreachable
1310     // blocks, such nodes will not have any shadow value associated with them.
1311     // It's easier to remove unreachable blocks than deal with missing shadow.
1312     removeUnreachableBlocks(F);
1313 
1314     // Iterate all BBs in depth-first order and create shadow instructions
1315     // for all instructions (where applicable).
1316     // For PHI nodes we create dummy shadow PHIs which will be finalized later.
1317     for (BasicBlock *BB : depth_first(ActualFnStart))
1318       visit(*BB);
1319 
1320     // Finalize PHI nodes.
1321     for (PHINode *PN : ShadowPHINodes) {
1322       PHINode *PNS = cast<PHINode>(getShadow(PN));
1323       PHINode *PNO = MS.TrackOrigins ? cast<PHINode>(getOrigin(PN)) : nullptr;
1324       size_t NumValues = PN->getNumIncomingValues();
1325       for (size_t v = 0; v < NumValues; v++) {
1326         PNS->addIncoming(getShadow(PN, v), PN->getIncomingBlock(v));
1327         if (PNO) PNO->addIncoming(getOrigin(PN, v), PN->getIncomingBlock(v));
1328       }
1329     }
1330 
1331     VAHelper->finalizeInstrumentation();
1332 
1333     // Poison llvm.lifetime.start intrinsics, if we haven't fallen back to
1334     // instrumenting only allocas.
1335     if (InstrumentLifetimeStart) {
1336       for (auto Item : LifetimeStartList) {
1337         instrumentAlloca(*Item.second, Item.first);
1338         AllocaSet.erase(Item.second);
1339       }
1340     }
1341     // Poison the allocas for which we didn't instrument the corresponding
1342     // lifetime intrinsics.
1343     for (AllocaInst *AI : AllocaSet)
1344       instrumentAlloca(*AI);
1345 
1346     bool InstrumentWithCalls = ClInstrumentationWithCallThreshold >= 0 &&
1347                                InstrumentationList.size() + StoreList.size() >
1348                                    (unsigned)ClInstrumentationWithCallThreshold;
1349 
1350     // Insert shadow value checks.
1351     materializeChecks(InstrumentWithCalls);
1352 
1353     // Delayed instrumentation of StoreInst.
1354     // This may not add new address checks.
1355     materializeStores(InstrumentWithCalls);
1356 
1357     return true;
1358   }
1359 
1360   /// Compute the shadow type that corresponds to a given Value.
1361   Type *getShadowTy(Value *V) {
1362     return getShadowTy(V->getType());
1363   }
1364 
1365   /// Compute the shadow type that corresponds to a given Type.
1366   Type *getShadowTy(Type *OrigTy) {
1367     if (!OrigTy->isSized()) {
1368       return nullptr;
1369     }
1370     // For integer type, shadow is the same as the original type.
1371     // This may return weird-sized types like i1.
1372     if (IntegerType *IT = dyn_cast<IntegerType>(OrigTy))
1373       return IT;
1374     const DataLayout &DL = F.getParent()->getDataLayout();
1375     if (VectorType *VT = dyn_cast<VectorType>(OrigTy)) {
1376       uint32_t EltSize = DL.getTypeSizeInBits(VT->getElementType());
1377       return FixedVectorType::get(IntegerType::get(*MS.C, EltSize),
1378                                   VT->getNumElements());
1379     }
1380     if (ArrayType *AT = dyn_cast<ArrayType>(OrigTy)) {
1381       return ArrayType::get(getShadowTy(AT->getElementType()),
1382                             AT->getNumElements());
1383     }
1384     if (StructType *ST = dyn_cast<StructType>(OrigTy)) {
1385       SmallVector<Type*, 4> Elements;
1386       for (unsigned i = 0, n = ST->getNumElements(); i < n; i++)
1387         Elements.push_back(getShadowTy(ST->getElementType(i)));
1388       StructType *Res = StructType::get(*MS.C, Elements, ST->isPacked());
1389       LLVM_DEBUG(dbgs() << "getShadowTy: " << *ST << " ===> " << *Res << "\n");
1390       return Res;
1391     }
1392     uint32_t TypeSize = DL.getTypeSizeInBits(OrigTy);
1393     return IntegerType::get(*MS.C, TypeSize);
1394   }
1395 
1396   /// Flatten a vector type.
1397   Type *getShadowTyNoVec(Type *ty) {
1398     if (VectorType *vt = dyn_cast<VectorType>(ty))
1399       return IntegerType::get(*MS.C,
1400                               vt->getPrimitiveSizeInBits().getFixedSize());
1401     return ty;
1402   }
1403 
1404   /// Convert a shadow value to it's flattened variant.
1405   Value *convertToShadowTyNoVec(Value *V, IRBuilder<> &IRB) {
1406     Type *Ty = V->getType();
1407     Type *NoVecTy = getShadowTyNoVec(Ty);
1408     if (Ty == NoVecTy) return V;
1409     return IRB.CreateBitCast(V, NoVecTy);
1410   }
1411 
1412   /// Compute the integer shadow offset that corresponds to a given
1413   /// application address.
1414   ///
1415   /// Offset = (Addr & ~AndMask) ^ XorMask
1416   Value *getShadowPtrOffset(Value *Addr, IRBuilder<> &IRB) {
1417     Value *OffsetLong = IRB.CreatePointerCast(Addr, MS.IntptrTy);
1418 
1419     uint64_t AndMask = MS.MapParams->AndMask;
1420     if (AndMask)
1421       OffsetLong =
1422           IRB.CreateAnd(OffsetLong, ConstantInt::get(MS.IntptrTy, ~AndMask));
1423 
1424     uint64_t XorMask = MS.MapParams->XorMask;
1425     if (XorMask)
1426       OffsetLong =
1427           IRB.CreateXor(OffsetLong, ConstantInt::get(MS.IntptrTy, XorMask));
1428     return OffsetLong;
1429   }
1430 
1431   /// Compute the shadow and origin addresses corresponding to a given
1432   /// application address.
1433   ///
1434   /// Shadow = ShadowBase + Offset
1435   /// Origin = (OriginBase + Offset) & ~3ULL
1436   std::pair<Value *, Value *>
1437   getShadowOriginPtrUserspace(Value *Addr, IRBuilder<> &IRB, Type *ShadowTy,
1438                               MaybeAlign Alignment) {
1439     Value *ShadowOffset = getShadowPtrOffset(Addr, IRB);
1440     Value *ShadowLong = ShadowOffset;
1441     uint64_t ShadowBase = MS.MapParams->ShadowBase;
1442     if (ShadowBase != 0) {
1443       ShadowLong =
1444         IRB.CreateAdd(ShadowLong,
1445                       ConstantInt::get(MS.IntptrTy, ShadowBase));
1446     }
1447     Value *ShadowPtr =
1448         IRB.CreateIntToPtr(ShadowLong, PointerType::get(ShadowTy, 0));
1449     Value *OriginPtr = nullptr;
1450     if (MS.TrackOrigins) {
1451       Value *OriginLong = ShadowOffset;
1452       uint64_t OriginBase = MS.MapParams->OriginBase;
1453       if (OriginBase != 0)
1454         OriginLong = IRB.CreateAdd(OriginLong,
1455                                    ConstantInt::get(MS.IntptrTy, OriginBase));
1456       if (!Alignment || *Alignment < kMinOriginAlignment) {
1457         uint64_t Mask = kMinOriginAlignment.value() - 1;
1458         OriginLong =
1459             IRB.CreateAnd(OriginLong, ConstantInt::get(MS.IntptrTy, ~Mask));
1460       }
1461       OriginPtr =
1462           IRB.CreateIntToPtr(OriginLong, PointerType::get(MS.OriginTy, 0));
1463     }
1464     return std::make_pair(ShadowPtr, OriginPtr);
1465   }
1466 
1467   std::pair<Value *, Value *> getShadowOriginPtrKernel(Value *Addr,
1468                                                        IRBuilder<> &IRB,
1469                                                        Type *ShadowTy,
1470                                                        bool isStore) {
1471     Value *ShadowOriginPtrs;
1472     const DataLayout &DL = F.getParent()->getDataLayout();
1473     int Size = DL.getTypeStoreSize(ShadowTy);
1474 
1475     FunctionCallee Getter = MS.getKmsanShadowOriginAccessFn(isStore, Size);
1476     Value *AddrCast =
1477         IRB.CreatePointerCast(Addr, PointerType::get(IRB.getInt8Ty(), 0));
1478     if (Getter) {
1479       ShadowOriginPtrs = IRB.CreateCall(Getter, AddrCast);
1480     } else {
1481       Value *SizeVal = ConstantInt::get(MS.IntptrTy, Size);
1482       ShadowOriginPtrs = IRB.CreateCall(isStore ? MS.MsanMetadataPtrForStoreN
1483                                                 : MS.MsanMetadataPtrForLoadN,
1484                                         {AddrCast, SizeVal});
1485     }
1486     Value *ShadowPtr = IRB.CreateExtractValue(ShadowOriginPtrs, 0);
1487     ShadowPtr = IRB.CreatePointerCast(ShadowPtr, PointerType::get(ShadowTy, 0));
1488     Value *OriginPtr = IRB.CreateExtractValue(ShadowOriginPtrs, 1);
1489 
1490     return std::make_pair(ShadowPtr, OriginPtr);
1491   }
1492 
1493   std::pair<Value *, Value *> getShadowOriginPtr(Value *Addr, IRBuilder<> &IRB,
1494                                                  Type *ShadowTy,
1495                                                  MaybeAlign Alignment,
1496                                                  bool isStore) {
1497     if (MS.CompileKernel)
1498       return getShadowOriginPtrKernel(Addr, IRB, ShadowTy, isStore);
1499     return getShadowOriginPtrUserspace(Addr, IRB, ShadowTy, Alignment);
1500   }
1501 
1502   /// Compute the shadow address for a given function argument.
1503   ///
1504   /// Shadow = ParamTLS+ArgOffset.
1505   Value *getShadowPtrForArgument(Value *A, IRBuilder<> &IRB,
1506                                  int ArgOffset) {
1507     Value *Base = IRB.CreatePointerCast(MS.ParamTLS, MS.IntptrTy);
1508     if (ArgOffset)
1509       Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
1510     return IRB.CreateIntToPtr(Base, PointerType::get(getShadowTy(A), 0),
1511                               "_msarg");
1512   }
1513 
1514   /// Compute the origin address for a given function argument.
1515   Value *getOriginPtrForArgument(Value *A, IRBuilder<> &IRB,
1516                                  int ArgOffset) {
1517     if (!MS.TrackOrigins)
1518       return nullptr;
1519     Value *Base = IRB.CreatePointerCast(MS.ParamOriginTLS, MS.IntptrTy);
1520     if (ArgOffset)
1521       Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
1522     return IRB.CreateIntToPtr(Base, PointerType::get(MS.OriginTy, 0),
1523                               "_msarg_o");
1524   }
1525 
1526   /// Compute the shadow address for a retval.
1527   Value *getShadowPtrForRetval(Value *A, IRBuilder<> &IRB) {
1528     return IRB.CreatePointerCast(MS.RetvalTLS,
1529                                  PointerType::get(getShadowTy(A), 0),
1530                                  "_msret");
1531   }
1532 
1533   /// Compute the origin address for a retval.
1534   Value *getOriginPtrForRetval(IRBuilder<> &IRB) {
1535     // We keep a single origin for the entire retval. Might be too optimistic.
1536     return MS.RetvalOriginTLS;
1537   }
1538 
1539   /// Set SV to be the shadow value for V.
1540   void setShadow(Value *V, Value *SV) {
1541     assert(!ShadowMap.count(V) && "Values may only have one shadow");
1542     ShadowMap[V] = PropagateShadow ? SV : getCleanShadow(V);
1543   }
1544 
1545   /// Set Origin to be the origin value for V.
1546   void setOrigin(Value *V, Value *Origin) {
1547     if (!MS.TrackOrigins) return;
1548     assert(!OriginMap.count(V) && "Values may only have one origin");
1549     LLVM_DEBUG(dbgs() << "ORIGIN: " << *V << "  ==> " << *Origin << "\n");
1550     OriginMap[V] = Origin;
1551   }
1552 
1553   Constant *getCleanShadow(Type *OrigTy) {
1554     Type *ShadowTy = getShadowTy(OrigTy);
1555     if (!ShadowTy)
1556       return nullptr;
1557     return Constant::getNullValue(ShadowTy);
1558   }
1559 
1560   /// Create a clean shadow value for a given value.
1561   ///
1562   /// Clean shadow (all zeroes) means all bits of the value are defined
1563   /// (initialized).
1564   Constant *getCleanShadow(Value *V) {
1565     return getCleanShadow(V->getType());
1566   }
1567 
1568   /// Create a dirty shadow of a given shadow type.
1569   Constant *getPoisonedShadow(Type *ShadowTy) {
1570     assert(ShadowTy);
1571     if (isa<IntegerType>(ShadowTy) || isa<VectorType>(ShadowTy))
1572       return Constant::getAllOnesValue(ShadowTy);
1573     if (ArrayType *AT = dyn_cast<ArrayType>(ShadowTy)) {
1574       SmallVector<Constant *, 4> Vals(AT->getNumElements(),
1575                                       getPoisonedShadow(AT->getElementType()));
1576       return ConstantArray::get(AT, Vals);
1577     }
1578     if (StructType *ST = dyn_cast<StructType>(ShadowTy)) {
1579       SmallVector<Constant *, 4> Vals;
1580       for (unsigned i = 0, n = ST->getNumElements(); i < n; i++)
1581         Vals.push_back(getPoisonedShadow(ST->getElementType(i)));
1582       return ConstantStruct::get(ST, Vals);
1583     }
1584     llvm_unreachable("Unexpected shadow type");
1585   }
1586 
1587   /// Create a dirty shadow for a given value.
1588   Constant *getPoisonedShadow(Value *V) {
1589     Type *ShadowTy = getShadowTy(V);
1590     if (!ShadowTy)
1591       return nullptr;
1592     return getPoisonedShadow(ShadowTy);
1593   }
1594 
1595   /// Create a clean (zero) origin.
1596   Value *getCleanOrigin() {
1597     return Constant::getNullValue(MS.OriginTy);
1598   }
1599 
1600   /// Get the shadow value for a given Value.
1601   ///
1602   /// This function either returns the value set earlier with setShadow,
1603   /// or extracts if from ParamTLS (for function arguments).
1604   Value *getShadow(Value *V) {
1605     if (!PropagateShadow) return getCleanShadow(V);
1606     if (Instruction *I = dyn_cast<Instruction>(V)) {
1607       if (I->getMetadata("nosanitize"))
1608         return getCleanShadow(V);
1609       // For instructions the shadow is already stored in the map.
1610       Value *Shadow = ShadowMap[V];
1611       if (!Shadow) {
1612         LLVM_DEBUG(dbgs() << "No shadow: " << *V << "\n" << *(I->getParent()));
1613         (void)I;
1614         assert(Shadow && "No shadow for a value");
1615       }
1616       return Shadow;
1617     }
1618     if (UndefValue *U = dyn_cast<UndefValue>(V)) {
1619       Value *AllOnes = PoisonUndef ? getPoisonedShadow(V) : getCleanShadow(V);
1620       LLVM_DEBUG(dbgs() << "Undef: " << *U << " ==> " << *AllOnes << "\n");
1621       (void)U;
1622       return AllOnes;
1623     }
1624     if (Argument *A = dyn_cast<Argument>(V)) {
1625       // For arguments we compute the shadow on demand and store it in the map.
1626       Value **ShadowPtr = &ShadowMap[V];
1627       if (*ShadowPtr)
1628         return *ShadowPtr;
1629       Function *F = A->getParent();
1630       IRBuilder<> EntryIRB(ActualFnStart->getFirstNonPHI());
1631       unsigned ArgOffset = 0;
1632       const DataLayout &DL = F->getParent()->getDataLayout();
1633       for (auto &FArg : F->args()) {
1634         if (!FArg.getType()->isSized()) {
1635           LLVM_DEBUG(dbgs() << "Arg is not sized\n");
1636           continue;
1637         }
1638         unsigned Size =
1639             FArg.hasByValAttr()
1640                 ? DL.getTypeAllocSize(FArg.getParamByValType())
1641                 : DL.getTypeAllocSize(FArg.getType());
1642         if (A == &FArg) {
1643           bool Overflow = ArgOffset + Size > kParamTLSSize;
1644           Value *Base = getShadowPtrForArgument(&FArg, EntryIRB, ArgOffset);
1645           if (FArg.hasByValAttr()) {
1646             // ByVal pointer itself has clean shadow. We copy the actual
1647             // argument shadow to the underlying memory.
1648             // Figure out maximal valid memcpy alignment.
1649             const Align ArgAlign = DL.getValueOrABITypeAlignment(
1650                 MaybeAlign(FArg.getParamAlignment()), FArg.getParamByValType());
1651             Value *CpShadowPtr =
1652                 getShadowOriginPtr(V, EntryIRB, EntryIRB.getInt8Ty(), ArgAlign,
1653                                    /*isStore*/ true)
1654                     .first;
1655             // TODO(glider): need to copy origins.
1656             if (Overflow) {
1657               // ParamTLS overflow.
1658               EntryIRB.CreateMemSet(
1659                   CpShadowPtr, Constant::getNullValue(EntryIRB.getInt8Ty()),
1660                   Size, ArgAlign);
1661             } else {
1662               const Align CopyAlign = std::min(ArgAlign, kShadowTLSAlignment);
1663               Value *Cpy = EntryIRB.CreateMemCpy(CpShadowPtr, CopyAlign, Base,
1664                                                  CopyAlign, Size);
1665               LLVM_DEBUG(dbgs() << "  ByValCpy: " << *Cpy << "\n");
1666               (void)Cpy;
1667             }
1668             *ShadowPtr = getCleanShadow(V);
1669           } else {
1670             if (Overflow) {
1671               // ParamTLS overflow.
1672               *ShadowPtr = getCleanShadow(V);
1673             } else {
1674               *ShadowPtr = EntryIRB.CreateAlignedLoad(getShadowTy(&FArg), Base,
1675                                                       kShadowTLSAlignment);
1676             }
1677           }
1678           LLVM_DEBUG(dbgs()
1679                      << "  ARG:    " << FArg << " ==> " << **ShadowPtr << "\n");
1680           if (MS.TrackOrigins && !Overflow) {
1681             Value *OriginPtr =
1682                 getOriginPtrForArgument(&FArg, EntryIRB, ArgOffset);
1683             setOrigin(A, EntryIRB.CreateLoad(MS.OriginTy, OriginPtr));
1684           } else {
1685             setOrigin(A, getCleanOrigin());
1686           }
1687         }
1688         ArgOffset += alignTo(Size, kShadowTLSAlignment);
1689       }
1690       assert(*ShadowPtr && "Could not find shadow for an argument");
1691       return *ShadowPtr;
1692     }
1693     // For everything else the shadow is zero.
1694     return getCleanShadow(V);
1695   }
1696 
1697   /// Get the shadow for i-th argument of the instruction I.
1698   Value *getShadow(Instruction *I, int i) {
1699     return getShadow(I->getOperand(i));
1700   }
1701 
1702   /// Get the origin for a value.
1703   Value *getOrigin(Value *V) {
1704     if (!MS.TrackOrigins) return nullptr;
1705     if (!PropagateShadow) return getCleanOrigin();
1706     if (isa<Constant>(V)) return getCleanOrigin();
1707     assert((isa<Instruction>(V) || isa<Argument>(V)) &&
1708            "Unexpected value type in getOrigin()");
1709     if (Instruction *I = dyn_cast<Instruction>(V)) {
1710       if (I->getMetadata("nosanitize"))
1711         return getCleanOrigin();
1712     }
1713     Value *Origin = OriginMap[V];
1714     assert(Origin && "Missing origin");
1715     return Origin;
1716   }
1717 
1718   /// Get the origin for i-th argument of the instruction I.
1719   Value *getOrigin(Instruction *I, int i) {
1720     return getOrigin(I->getOperand(i));
1721   }
1722 
1723   /// Remember the place where a shadow check should be inserted.
1724   ///
1725   /// This location will be later instrumented with a check that will print a
1726   /// UMR warning in runtime if the shadow value is not 0.
1727   void insertShadowCheck(Value *Shadow, Value *Origin, Instruction *OrigIns) {
1728     assert(Shadow);
1729     if (!InsertChecks) return;
1730 #ifndef NDEBUG
1731     Type *ShadowTy = Shadow->getType();
1732     assert((isa<IntegerType>(ShadowTy) || isa<VectorType>(ShadowTy)) &&
1733            "Can only insert checks for integer and vector shadow types");
1734 #endif
1735     InstrumentationList.push_back(
1736         ShadowOriginAndInsertPoint(Shadow, Origin, OrigIns));
1737   }
1738 
1739   /// Remember the place where a shadow check should be inserted.
1740   ///
1741   /// This location will be later instrumented with a check that will print a
1742   /// UMR warning in runtime if the value is not fully defined.
1743   void insertShadowCheck(Value *Val, Instruction *OrigIns) {
1744     assert(Val);
1745     Value *Shadow, *Origin;
1746     if (ClCheckConstantShadow) {
1747       Shadow = getShadow(Val);
1748       if (!Shadow) return;
1749       Origin = getOrigin(Val);
1750     } else {
1751       Shadow = dyn_cast_or_null<Instruction>(getShadow(Val));
1752       if (!Shadow) return;
1753       Origin = dyn_cast_or_null<Instruction>(getOrigin(Val));
1754     }
1755     insertShadowCheck(Shadow, Origin, OrigIns);
1756   }
1757 
1758   AtomicOrdering addReleaseOrdering(AtomicOrdering a) {
1759     switch (a) {
1760       case AtomicOrdering::NotAtomic:
1761         return AtomicOrdering::NotAtomic;
1762       case AtomicOrdering::Unordered:
1763       case AtomicOrdering::Monotonic:
1764       case AtomicOrdering::Release:
1765         return AtomicOrdering::Release;
1766       case AtomicOrdering::Acquire:
1767       case AtomicOrdering::AcquireRelease:
1768         return AtomicOrdering::AcquireRelease;
1769       case AtomicOrdering::SequentiallyConsistent:
1770         return AtomicOrdering::SequentiallyConsistent;
1771     }
1772     llvm_unreachable("Unknown ordering");
1773   }
1774 
1775   AtomicOrdering addAcquireOrdering(AtomicOrdering a) {
1776     switch (a) {
1777       case AtomicOrdering::NotAtomic:
1778         return AtomicOrdering::NotAtomic;
1779       case AtomicOrdering::Unordered:
1780       case AtomicOrdering::Monotonic:
1781       case AtomicOrdering::Acquire:
1782         return AtomicOrdering::Acquire;
1783       case AtomicOrdering::Release:
1784       case AtomicOrdering::AcquireRelease:
1785         return AtomicOrdering::AcquireRelease;
1786       case AtomicOrdering::SequentiallyConsistent:
1787         return AtomicOrdering::SequentiallyConsistent;
1788     }
1789     llvm_unreachable("Unknown ordering");
1790   }
1791 
1792   // ------------------- Visitors.
1793   using InstVisitor<MemorySanitizerVisitor>::visit;
1794   void visit(Instruction &I) {
1795     if (!I.getMetadata("nosanitize"))
1796       InstVisitor<MemorySanitizerVisitor>::visit(I);
1797   }
1798 
1799   /// Instrument LoadInst
1800   ///
1801   /// Loads the corresponding shadow and (optionally) origin.
1802   /// Optionally, checks that the load address is fully defined.
1803   void visitLoadInst(LoadInst &I) {
1804     assert(I.getType()->isSized() && "Load type must have size");
1805     assert(!I.getMetadata("nosanitize"));
1806     IRBuilder<> IRB(I.getNextNode());
1807     Type *ShadowTy = getShadowTy(&I);
1808     Value *Addr = I.getPointerOperand();
1809     Value *ShadowPtr = nullptr, *OriginPtr = nullptr;
1810     const Align Alignment = assumeAligned(I.getAlignment());
1811     if (PropagateShadow) {
1812       std::tie(ShadowPtr, OriginPtr) =
1813           getShadowOriginPtr(Addr, IRB, ShadowTy, Alignment, /*isStore*/ false);
1814       setShadow(&I,
1815                 IRB.CreateAlignedLoad(ShadowTy, ShadowPtr, Alignment, "_msld"));
1816     } else {
1817       setShadow(&I, getCleanShadow(&I));
1818     }
1819 
1820     if (ClCheckAccessAddress)
1821       insertShadowCheck(I.getPointerOperand(), &I);
1822 
1823     if (I.isAtomic())
1824       I.setOrdering(addAcquireOrdering(I.getOrdering()));
1825 
1826     if (MS.TrackOrigins) {
1827       if (PropagateShadow) {
1828         const Align OriginAlignment = std::max(kMinOriginAlignment, Alignment);
1829         setOrigin(
1830             &I, IRB.CreateAlignedLoad(MS.OriginTy, OriginPtr, OriginAlignment));
1831       } else {
1832         setOrigin(&I, getCleanOrigin());
1833       }
1834     }
1835   }
1836 
1837   /// Instrument StoreInst
1838   ///
1839   /// Stores the corresponding shadow and (optionally) origin.
1840   /// Optionally, checks that the store address is fully defined.
1841   void visitStoreInst(StoreInst &I) {
1842     StoreList.push_back(&I);
1843     if (ClCheckAccessAddress)
1844       insertShadowCheck(I.getPointerOperand(), &I);
1845   }
1846 
1847   void handleCASOrRMW(Instruction &I) {
1848     assert(isa<AtomicRMWInst>(I) || isa<AtomicCmpXchgInst>(I));
1849 
1850     IRBuilder<> IRB(&I);
1851     Value *Addr = I.getOperand(0);
1852     Value *ShadowPtr = getShadowOriginPtr(Addr, IRB, I.getType(), Align(1),
1853                                           /*isStore*/ true)
1854                            .first;
1855 
1856     if (ClCheckAccessAddress)
1857       insertShadowCheck(Addr, &I);
1858 
1859     // Only test the conditional argument of cmpxchg instruction.
1860     // The other argument can potentially be uninitialized, but we can not
1861     // detect this situation reliably without possible false positives.
1862     if (isa<AtomicCmpXchgInst>(I))
1863       insertShadowCheck(I.getOperand(1), &I);
1864 
1865     IRB.CreateStore(getCleanShadow(&I), ShadowPtr);
1866 
1867     setShadow(&I, getCleanShadow(&I));
1868     setOrigin(&I, getCleanOrigin());
1869   }
1870 
1871   void visitAtomicRMWInst(AtomicRMWInst &I) {
1872     handleCASOrRMW(I);
1873     I.setOrdering(addReleaseOrdering(I.getOrdering()));
1874   }
1875 
1876   void visitAtomicCmpXchgInst(AtomicCmpXchgInst &I) {
1877     handleCASOrRMW(I);
1878     I.setSuccessOrdering(addReleaseOrdering(I.getSuccessOrdering()));
1879   }
1880 
1881   // Vector manipulation.
1882   void visitExtractElementInst(ExtractElementInst &I) {
1883     insertShadowCheck(I.getOperand(1), &I);
1884     IRBuilder<> IRB(&I);
1885     setShadow(&I, IRB.CreateExtractElement(getShadow(&I, 0), I.getOperand(1),
1886               "_msprop"));
1887     setOrigin(&I, getOrigin(&I, 0));
1888   }
1889 
1890   void visitInsertElementInst(InsertElementInst &I) {
1891     insertShadowCheck(I.getOperand(2), &I);
1892     IRBuilder<> IRB(&I);
1893     setShadow(&I, IRB.CreateInsertElement(getShadow(&I, 0), getShadow(&I, 1),
1894               I.getOperand(2), "_msprop"));
1895     setOriginForNaryOp(I);
1896   }
1897 
1898   void visitShuffleVectorInst(ShuffleVectorInst &I) {
1899     IRBuilder<> IRB(&I);
1900     setShadow(&I, IRB.CreateShuffleVector(getShadow(&I, 0), getShadow(&I, 1),
1901                                           I.getShuffleMask(), "_msprop"));
1902     setOriginForNaryOp(I);
1903   }
1904 
1905   // Casts.
1906   void visitSExtInst(SExtInst &I) {
1907     IRBuilder<> IRB(&I);
1908     setShadow(&I, IRB.CreateSExt(getShadow(&I, 0), I.getType(), "_msprop"));
1909     setOrigin(&I, getOrigin(&I, 0));
1910   }
1911 
1912   void visitZExtInst(ZExtInst &I) {
1913     IRBuilder<> IRB(&I);
1914     setShadow(&I, IRB.CreateZExt(getShadow(&I, 0), I.getType(), "_msprop"));
1915     setOrigin(&I, getOrigin(&I, 0));
1916   }
1917 
1918   void visitTruncInst(TruncInst &I) {
1919     IRBuilder<> IRB(&I);
1920     setShadow(&I, IRB.CreateTrunc(getShadow(&I, 0), I.getType(), "_msprop"));
1921     setOrigin(&I, getOrigin(&I, 0));
1922   }
1923 
1924   void visitBitCastInst(BitCastInst &I) {
1925     // Special case: if this is the bitcast (there is exactly 1 allowed) between
1926     // a musttail call and a ret, don't instrument. New instructions are not
1927     // allowed after a musttail call.
1928     if (auto *CI = dyn_cast<CallInst>(I.getOperand(0)))
1929       if (CI->isMustTailCall())
1930         return;
1931     IRBuilder<> IRB(&I);
1932     setShadow(&I, IRB.CreateBitCast(getShadow(&I, 0), getShadowTy(&I)));
1933     setOrigin(&I, getOrigin(&I, 0));
1934   }
1935 
1936   void visitPtrToIntInst(PtrToIntInst &I) {
1937     IRBuilder<> IRB(&I);
1938     setShadow(&I, IRB.CreateIntCast(getShadow(&I, 0), getShadowTy(&I), false,
1939              "_msprop_ptrtoint"));
1940     setOrigin(&I, getOrigin(&I, 0));
1941   }
1942 
1943   void visitIntToPtrInst(IntToPtrInst &I) {
1944     IRBuilder<> IRB(&I);
1945     setShadow(&I, IRB.CreateIntCast(getShadow(&I, 0), getShadowTy(&I), false,
1946              "_msprop_inttoptr"));
1947     setOrigin(&I, getOrigin(&I, 0));
1948   }
1949 
1950   void visitFPToSIInst(CastInst& I) { handleShadowOr(I); }
1951   void visitFPToUIInst(CastInst& I) { handleShadowOr(I); }
1952   void visitSIToFPInst(CastInst& I) { handleShadowOr(I); }
1953   void visitUIToFPInst(CastInst& I) { handleShadowOr(I); }
1954   void visitFPExtInst(CastInst& I) { handleShadowOr(I); }
1955   void visitFPTruncInst(CastInst& I) { handleShadowOr(I); }
1956 
1957   /// Propagate shadow for bitwise AND.
1958   ///
1959   /// This code is exact, i.e. if, for example, a bit in the left argument
1960   /// is defined and 0, then neither the value not definedness of the
1961   /// corresponding bit in B don't affect the resulting shadow.
1962   void visitAnd(BinaryOperator &I) {
1963     IRBuilder<> IRB(&I);
1964     //  "And" of 0 and a poisoned value results in unpoisoned value.
1965     //  1&1 => 1;     0&1 => 0;     p&1 => p;
1966     //  1&0 => 0;     0&0 => 0;     p&0 => 0;
1967     //  1&p => p;     0&p => 0;     p&p => p;
1968     //  S = (S1 & S2) | (V1 & S2) | (S1 & V2)
1969     Value *S1 = getShadow(&I, 0);
1970     Value *S2 = getShadow(&I, 1);
1971     Value *V1 = I.getOperand(0);
1972     Value *V2 = I.getOperand(1);
1973     if (V1->getType() != S1->getType()) {
1974       V1 = IRB.CreateIntCast(V1, S1->getType(), false);
1975       V2 = IRB.CreateIntCast(V2, S2->getType(), false);
1976     }
1977     Value *S1S2 = IRB.CreateAnd(S1, S2);
1978     Value *V1S2 = IRB.CreateAnd(V1, S2);
1979     Value *S1V2 = IRB.CreateAnd(S1, V2);
1980     setShadow(&I, IRB.CreateOr({S1S2, V1S2, S1V2}));
1981     setOriginForNaryOp(I);
1982   }
1983 
1984   void visitOr(BinaryOperator &I) {
1985     IRBuilder<> IRB(&I);
1986     //  "Or" of 1 and a poisoned value results in unpoisoned value.
1987     //  1|1 => 1;     0|1 => 1;     p|1 => 1;
1988     //  1|0 => 1;     0|0 => 0;     p|0 => p;
1989     //  1|p => 1;     0|p => p;     p|p => p;
1990     //  S = (S1 & S2) | (~V1 & S2) | (S1 & ~V2)
1991     Value *S1 = getShadow(&I, 0);
1992     Value *S2 = getShadow(&I, 1);
1993     Value *V1 = IRB.CreateNot(I.getOperand(0));
1994     Value *V2 = IRB.CreateNot(I.getOperand(1));
1995     if (V1->getType() != S1->getType()) {
1996       V1 = IRB.CreateIntCast(V1, S1->getType(), false);
1997       V2 = IRB.CreateIntCast(V2, S2->getType(), false);
1998     }
1999     Value *S1S2 = IRB.CreateAnd(S1, S2);
2000     Value *V1S2 = IRB.CreateAnd(V1, S2);
2001     Value *S1V2 = IRB.CreateAnd(S1, V2);
2002     setShadow(&I, IRB.CreateOr({S1S2, V1S2, S1V2}));
2003     setOriginForNaryOp(I);
2004   }
2005 
2006   /// Default propagation of shadow and/or origin.
2007   ///
2008   /// This class implements the general case of shadow propagation, used in all
2009   /// cases where we don't know and/or don't care about what the operation
2010   /// actually does. It converts all input shadow values to a common type
2011   /// (extending or truncating as necessary), and bitwise OR's them.
2012   ///
2013   /// This is much cheaper than inserting checks (i.e. requiring inputs to be
2014   /// fully initialized), and less prone to false positives.
2015   ///
2016   /// This class also implements the general case of origin propagation. For a
2017   /// Nary operation, result origin is set to the origin of an argument that is
2018   /// not entirely initialized. If there is more than one such arguments, the
2019   /// rightmost of them is picked. It does not matter which one is picked if all
2020   /// arguments are initialized.
2021   template <bool CombineShadow>
2022   class Combiner {
2023     Value *Shadow = nullptr;
2024     Value *Origin = nullptr;
2025     IRBuilder<> &IRB;
2026     MemorySanitizerVisitor *MSV;
2027 
2028   public:
2029     Combiner(MemorySanitizerVisitor *MSV, IRBuilder<> &IRB)
2030         : IRB(IRB), MSV(MSV) {}
2031 
2032     /// Add a pair of shadow and origin values to the mix.
2033     Combiner &Add(Value *OpShadow, Value *OpOrigin) {
2034       if (CombineShadow) {
2035         assert(OpShadow);
2036         if (!Shadow)
2037           Shadow = OpShadow;
2038         else {
2039           OpShadow = MSV->CreateShadowCast(IRB, OpShadow, Shadow->getType());
2040           Shadow = IRB.CreateOr(Shadow, OpShadow, "_msprop");
2041         }
2042       }
2043 
2044       if (MSV->MS.TrackOrigins) {
2045         assert(OpOrigin);
2046         if (!Origin) {
2047           Origin = OpOrigin;
2048         } else {
2049           Constant *ConstOrigin = dyn_cast<Constant>(OpOrigin);
2050           // No point in adding something that might result in 0 origin value.
2051           if (!ConstOrigin || !ConstOrigin->isNullValue()) {
2052             Value *FlatShadow = MSV->convertToShadowTyNoVec(OpShadow, IRB);
2053             Value *Cond =
2054                 IRB.CreateICmpNE(FlatShadow, MSV->getCleanShadow(FlatShadow));
2055             Origin = IRB.CreateSelect(Cond, OpOrigin, Origin);
2056           }
2057         }
2058       }
2059       return *this;
2060     }
2061 
2062     /// Add an application value to the mix.
2063     Combiner &Add(Value *V) {
2064       Value *OpShadow = MSV->getShadow(V);
2065       Value *OpOrigin = MSV->MS.TrackOrigins ? MSV->getOrigin(V) : nullptr;
2066       return Add(OpShadow, OpOrigin);
2067     }
2068 
2069     /// Set the current combined values as the given instruction's shadow
2070     /// and origin.
2071     void Done(Instruction *I) {
2072       if (CombineShadow) {
2073         assert(Shadow);
2074         Shadow = MSV->CreateShadowCast(IRB, Shadow, MSV->getShadowTy(I));
2075         MSV->setShadow(I, Shadow);
2076       }
2077       if (MSV->MS.TrackOrigins) {
2078         assert(Origin);
2079         MSV->setOrigin(I, Origin);
2080       }
2081     }
2082   };
2083 
2084   using ShadowAndOriginCombiner = Combiner<true>;
2085   using OriginCombiner = Combiner<false>;
2086 
2087   /// Propagate origin for arbitrary operation.
2088   void setOriginForNaryOp(Instruction &I) {
2089     if (!MS.TrackOrigins) return;
2090     IRBuilder<> IRB(&I);
2091     OriginCombiner OC(this, IRB);
2092     for (Instruction::op_iterator OI = I.op_begin(); OI != I.op_end(); ++OI)
2093       OC.Add(OI->get());
2094     OC.Done(&I);
2095   }
2096 
2097   size_t VectorOrPrimitiveTypeSizeInBits(Type *Ty) {
2098     assert(!(Ty->isVectorTy() && Ty->getScalarType()->isPointerTy()) &&
2099            "Vector of pointers is not a valid shadow type");
2100     return Ty->isVectorTy() ? cast<VectorType>(Ty)->getNumElements() *
2101                                   Ty->getScalarSizeInBits()
2102                             : Ty->getPrimitiveSizeInBits();
2103   }
2104 
2105   /// Cast between two shadow types, extending or truncating as
2106   /// necessary.
2107   Value *CreateShadowCast(IRBuilder<> &IRB, Value *V, Type *dstTy,
2108                           bool Signed = false) {
2109     Type *srcTy = V->getType();
2110     size_t srcSizeInBits = VectorOrPrimitiveTypeSizeInBits(srcTy);
2111     size_t dstSizeInBits = VectorOrPrimitiveTypeSizeInBits(dstTy);
2112     if (srcSizeInBits > 1 && dstSizeInBits == 1)
2113       return IRB.CreateICmpNE(V, getCleanShadow(V));
2114 
2115     if (dstTy->isIntegerTy() && srcTy->isIntegerTy())
2116       return IRB.CreateIntCast(V, dstTy, Signed);
2117     if (dstTy->isVectorTy() && srcTy->isVectorTy() &&
2118         cast<VectorType>(dstTy)->getNumElements() ==
2119             cast<VectorType>(srcTy)->getNumElements())
2120       return IRB.CreateIntCast(V, dstTy, Signed);
2121     Value *V1 = IRB.CreateBitCast(V, Type::getIntNTy(*MS.C, srcSizeInBits));
2122     Value *V2 =
2123       IRB.CreateIntCast(V1, Type::getIntNTy(*MS.C, dstSizeInBits), Signed);
2124     return IRB.CreateBitCast(V2, dstTy);
2125     // TODO: handle struct types.
2126   }
2127 
2128   /// Cast an application value to the type of its own shadow.
2129   Value *CreateAppToShadowCast(IRBuilder<> &IRB, Value *V) {
2130     Type *ShadowTy = getShadowTy(V);
2131     if (V->getType() == ShadowTy)
2132       return V;
2133     if (V->getType()->isPtrOrPtrVectorTy())
2134       return IRB.CreatePtrToInt(V, ShadowTy);
2135     else
2136       return IRB.CreateBitCast(V, ShadowTy);
2137   }
2138 
2139   /// Propagate shadow for arbitrary operation.
2140   void handleShadowOr(Instruction &I) {
2141     IRBuilder<> IRB(&I);
2142     ShadowAndOriginCombiner SC(this, IRB);
2143     for (Instruction::op_iterator OI = I.op_begin(); OI != I.op_end(); ++OI)
2144       SC.Add(OI->get());
2145     SC.Done(&I);
2146   }
2147 
2148   void visitFNeg(UnaryOperator &I) { handleShadowOr(I); }
2149 
2150   // Handle multiplication by constant.
2151   //
2152   // Handle a special case of multiplication by constant that may have one or
2153   // more zeros in the lower bits. This makes corresponding number of lower bits
2154   // of the result zero as well. We model it by shifting the other operand
2155   // shadow left by the required number of bits. Effectively, we transform
2156   // (X * (A * 2**B)) to ((X << B) * A) and instrument (X << B) as (Sx << B).
2157   // We use multiplication by 2**N instead of shift to cover the case of
2158   // multiplication by 0, which may occur in some elements of a vector operand.
2159   void handleMulByConstant(BinaryOperator &I, Constant *ConstArg,
2160                            Value *OtherArg) {
2161     Constant *ShadowMul;
2162     Type *Ty = ConstArg->getType();
2163     if (auto *VTy = dyn_cast<VectorType>(Ty)) {
2164       unsigned NumElements = VTy->getNumElements();
2165       Type *EltTy = VTy->getElementType();
2166       SmallVector<Constant *, 16> Elements;
2167       for (unsigned Idx = 0; Idx < NumElements; ++Idx) {
2168         if (ConstantInt *Elt =
2169                 dyn_cast<ConstantInt>(ConstArg->getAggregateElement(Idx))) {
2170           const APInt &V = Elt->getValue();
2171           APInt V2 = APInt(V.getBitWidth(), 1) << V.countTrailingZeros();
2172           Elements.push_back(ConstantInt::get(EltTy, V2));
2173         } else {
2174           Elements.push_back(ConstantInt::get(EltTy, 1));
2175         }
2176       }
2177       ShadowMul = ConstantVector::get(Elements);
2178     } else {
2179       if (ConstantInt *Elt = dyn_cast<ConstantInt>(ConstArg)) {
2180         const APInt &V = Elt->getValue();
2181         APInt V2 = APInt(V.getBitWidth(), 1) << V.countTrailingZeros();
2182         ShadowMul = ConstantInt::get(Ty, V2);
2183       } else {
2184         ShadowMul = ConstantInt::get(Ty, 1);
2185       }
2186     }
2187 
2188     IRBuilder<> IRB(&I);
2189     setShadow(&I,
2190               IRB.CreateMul(getShadow(OtherArg), ShadowMul, "msprop_mul_cst"));
2191     setOrigin(&I, getOrigin(OtherArg));
2192   }
2193 
2194   void visitMul(BinaryOperator &I) {
2195     Constant *constOp0 = dyn_cast<Constant>(I.getOperand(0));
2196     Constant *constOp1 = dyn_cast<Constant>(I.getOperand(1));
2197     if (constOp0 && !constOp1)
2198       handleMulByConstant(I, constOp0, I.getOperand(1));
2199     else if (constOp1 && !constOp0)
2200       handleMulByConstant(I, constOp1, I.getOperand(0));
2201     else
2202       handleShadowOr(I);
2203   }
2204 
2205   void visitFAdd(BinaryOperator &I) { handleShadowOr(I); }
2206   void visitFSub(BinaryOperator &I) { handleShadowOr(I); }
2207   void visitFMul(BinaryOperator &I) { handleShadowOr(I); }
2208   void visitAdd(BinaryOperator &I) { handleShadowOr(I); }
2209   void visitSub(BinaryOperator &I) { handleShadowOr(I); }
2210   void visitXor(BinaryOperator &I) { handleShadowOr(I); }
2211 
2212   void handleIntegerDiv(Instruction &I) {
2213     IRBuilder<> IRB(&I);
2214     // Strict on the second argument.
2215     insertShadowCheck(I.getOperand(1), &I);
2216     setShadow(&I, getShadow(&I, 0));
2217     setOrigin(&I, getOrigin(&I, 0));
2218   }
2219 
2220   void visitUDiv(BinaryOperator &I) { handleIntegerDiv(I); }
2221   void visitSDiv(BinaryOperator &I) { handleIntegerDiv(I); }
2222   void visitURem(BinaryOperator &I) { handleIntegerDiv(I); }
2223   void visitSRem(BinaryOperator &I) { handleIntegerDiv(I); }
2224 
2225   // Floating point division is side-effect free. We can not require that the
2226   // divisor is fully initialized and must propagate shadow. See PR37523.
2227   void visitFDiv(BinaryOperator &I) { handleShadowOr(I); }
2228   void visitFRem(BinaryOperator &I) { handleShadowOr(I); }
2229 
2230   /// Instrument == and != comparisons.
2231   ///
2232   /// Sometimes the comparison result is known even if some of the bits of the
2233   /// arguments are not.
2234   void handleEqualityComparison(ICmpInst &I) {
2235     IRBuilder<> IRB(&I);
2236     Value *A = I.getOperand(0);
2237     Value *B = I.getOperand(1);
2238     Value *Sa = getShadow(A);
2239     Value *Sb = getShadow(B);
2240 
2241     // Get rid of pointers and vectors of pointers.
2242     // For ints (and vectors of ints), types of A and Sa match,
2243     // and this is a no-op.
2244     A = IRB.CreatePointerCast(A, Sa->getType());
2245     B = IRB.CreatePointerCast(B, Sb->getType());
2246 
2247     // A == B  <==>  (C = A^B) == 0
2248     // A != B  <==>  (C = A^B) != 0
2249     // Sc = Sa | Sb
2250     Value *C = IRB.CreateXor(A, B);
2251     Value *Sc = IRB.CreateOr(Sa, Sb);
2252     // Now dealing with i = (C == 0) comparison (or C != 0, does not matter now)
2253     // Result is defined if one of the following is true
2254     // * there is a defined 1 bit in C
2255     // * C is fully defined
2256     // Si = !(C & ~Sc) && Sc
2257     Value *Zero = Constant::getNullValue(Sc->getType());
2258     Value *MinusOne = Constant::getAllOnesValue(Sc->getType());
2259     Value *Si =
2260       IRB.CreateAnd(IRB.CreateICmpNE(Sc, Zero),
2261                     IRB.CreateICmpEQ(
2262                       IRB.CreateAnd(IRB.CreateXor(Sc, MinusOne), C), Zero));
2263     Si->setName("_msprop_icmp");
2264     setShadow(&I, Si);
2265     setOriginForNaryOp(I);
2266   }
2267 
2268   /// Build the lowest possible value of V, taking into account V's
2269   ///        uninitialized bits.
2270   Value *getLowestPossibleValue(IRBuilder<> &IRB, Value *A, Value *Sa,
2271                                 bool isSigned) {
2272     if (isSigned) {
2273       // Split shadow into sign bit and other bits.
2274       Value *SaOtherBits = IRB.CreateLShr(IRB.CreateShl(Sa, 1), 1);
2275       Value *SaSignBit = IRB.CreateXor(Sa, SaOtherBits);
2276       // Maximise the undefined shadow bit, minimize other undefined bits.
2277       return
2278         IRB.CreateOr(IRB.CreateAnd(A, IRB.CreateNot(SaOtherBits)), SaSignBit);
2279     } else {
2280       // Minimize undefined bits.
2281       return IRB.CreateAnd(A, IRB.CreateNot(Sa));
2282     }
2283   }
2284 
2285   /// Build the highest possible value of V, taking into account V's
2286   ///        uninitialized bits.
2287   Value *getHighestPossibleValue(IRBuilder<> &IRB, Value *A, Value *Sa,
2288                                 bool isSigned) {
2289     if (isSigned) {
2290       // Split shadow into sign bit and other bits.
2291       Value *SaOtherBits = IRB.CreateLShr(IRB.CreateShl(Sa, 1), 1);
2292       Value *SaSignBit = IRB.CreateXor(Sa, SaOtherBits);
2293       // Minimise the undefined shadow bit, maximise other undefined bits.
2294       return
2295         IRB.CreateOr(IRB.CreateAnd(A, IRB.CreateNot(SaSignBit)), SaOtherBits);
2296     } else {
2297       // Maximize undefined bits.
2298       return IRB.CreateOr(A, Sa);
2299     }
2300   }
2301 
2302   /// Instrument relational comparisons.
2303   ///
2304   /// This function does exact shadow propagation for all relational
2305   /// comparisons of integers, pointers and vectors of those.
2306   /// FIXME: output seems suboptimal when one of the operands is a constant
2307   void handleRelationalComparisonExact(ICmpInst &I) {
2308     IRBuilder<> IRB(&I);
2309     Value *A = I.getOperand(0);
2310     Value *B = I.getOperand(1);
2311     Value *Sa = getShadow(A);
2312     Value *Sb = getShadow(B);
2313 
2314     // Get rid of pointers and vectors of pointers.
2315     // For ints (and vectors of ints), types of A and Sa match,
2316     // and this is a no-op.
2317     A = IRB.CreatePointerCast(A, Sa->getType());
2318     B = IRB.CreatePointerCast(B, Sb->getType());
2319 
2320     // Let [a0, a1] be the interval of possible values of A, taking into account
2321     // its undefined bits. Let [b0, b1] be the interval of possible values of B.
2322     // Then (A cmp B) is defined iff (a0 cmp b1) == (a1 cmp b0).
2323     bool IsSigned = I.isSigned();
2324     Value *S1 = IRB.CreateICmp(I.getPredicate(),
2325                                getLowestPossibleValue(IRB, A, Sa, IsSigned),
2326                                getHighestPossibleValue(IRB, B, Sb, IsSigned));
2327     Value *S2 = IRB.CreateICmp(I.getPredicate(),
2328                                getHighestPossibleValue(IRB, A, Sa, IsSigned),
2329                                getLowestPossibleValue(IRB, B, Sb, IsSigned));
2330     Value *Si = IRB.CreateXor(S1, S2);
2331     setShadow(&I, Si);
2332     setOriginForNaryOp(I);
2333   }
2334 
2335   /// Instrument signed relational comparisons.
2336   ///
2337   /// Handle sign bit tests: x<0, x>=0, x<=-1, x>-1 by propagating the highest
2338   /// bit of the shadow. Everything else is delegated to handleShadowOr().
2339   void handleSignedRelationalComparison(ICmpInst &I) {
2340     Constant *constOp;
2341     Value *op = nullptr;
2342     CmpInst::Predicate pre;
2343     if ((constOp = dyn_cast<Constant>(I.getOperand(1)))) {
2344       op = I.getOperand(0);
2345       pre = I.getPredicate();
2346     } else if ((constOp = dyn_cast<Constant>(I.getOperand(0)))) {
2347       op = I.getOperand(1);
2348       pre = I.getSwappedPredicate();
2349     } else {
2350       handleShadowOr(I);
2351       return;
2352     }
2353 
2354     if ((constOp->isNullValue() &&
2355          (pre == CmpInst::ICMP_SLT || pre == CmpInst::ICMP_SGE)) ||
2356         (constOp->isAllOnesValue() &&
2357          (pre == CmpInst::ICMP_SGT || pre == CmpInst::ICMP_SLE))) {
2358       IRBuilder<> IRB(&I);
2359       Value *Shadow = IRB.CreateICmpSLT(getShadow(op), getCleanShadow(op),
2360                                         "_msprop_icmp_s");
2361       setShadow(&I, Shadow);
2362       setOrigin(&I, getOrigin(op));
2363     } else {
2364       handleShadowOr(I);
2365     }
2366   }
2367 
2368   void visitICmpInst(ICmpInst &I) {
2369     if (!ClHandleICmp) {
2370       handleShadowOr(I);
2371       return;
2372     }
2373     if (I.isEquality()) {
2374       handleEqualityComparison(I);
2375       return;
2376     }
2377 
2378     assert(I.isRelational());
2379     if (ClHandleICmpExact) {
2380       handleRelationalComparisonExact(I);
2381       return;
2382     }
2383     if (I.isSigned()) {
2384       handleSignedRelationalComparison(I);
2385       return;
2386     }
2387 
2388     assert(I.isUnsigned());
2389     if ((isa<Constant>(I.getOperand(0)) || isa<Constant>(I.getOperand(1)))) {
2390       handleRelationalComparisonExact(I);
2391       return;
2392     }
2393 
2394     handleShadowOr(I);
2395   }
2396 
2397   void visitFCmpInst(FCmpInst &I) {
2398     handleShadowOr(I);
2399   }
2400 
2401   void handleShift(BinaryOperator &I) {
2402     IRBuilder<> IRB(&I);
2403     // If any of the S2 bits are poisoned, the whole thing is poisoned.
2404     // Otherwise perform the same shift on S1.
2405     Value *S1 = getShadow(&I, 0);
2406     Value *S2 = getShadow(&I, 1);
2407     Value *S2Conv = IRB.CreateSExt(IRB.CreateICmpNE(S2, getCleanShadow(S2)),
2408                                    S2->getType());
2409     Value *V2 = I.getOperand(1);
2410     Value *Shift = IRB.CreateBinOp(I.getOpcode(), S1, V2);
2411     setShadow(&I, IRB.CreateOr(Shift, S2Conv));
2412     setOriginForNaryOp(I);
2413   }
2414 
2415   void visitShl(BinaryOperator &I) { handleShift(I); }
2416   void visitAShr(BinaryOperator &I) { handleShift(I); }
2417   void visitLShr(BinaryOperator &I) { handleShift(I); }
2418 
2419   /// Instrument llvm.memmove
2420   ///
2421   /// At this point we don't know if llvm.memmove will be inlined or not.
2422   /// If we don't instrument it and it gets inlined,
2423   /// our interceptor will not kick in and we will lose the memmove.
2424   /// If we instrument the call here, but it does not get inlined,
2425   /// we will memove the shadow twice: which is bad in case
2426   /// of overlapping regions. So, we simply lower the intrinsic to a call.
2427   ///
2428   /// Similar situation exists for memcpy and memset.
2429   void visitMemMoveInst(MemMoveInst &I) {
2430     IRBuilder<> IRB(&I);
2431     IRB.CreateCall(
2432         MS.MemmoveFn,
2433         {IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()),
2434          IRB.CreatePointerCast(I.getArgOperand(1), IRB.getInt8PtrTy()),
2435          IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)});
2436     I.eraseFromParent();
2437   }
2438 
2439   // Similar to memmove: avoid copying shadow twice.
2440   // This is somewhat unfortunate as it may slowdown small constant memcpys.
2441   // FIXME: consider doing manual inline for small constant sizes and proper
2442   // alignment.
2443   void visitMemCpyInst(MemCpyInst &I) {
2444     IRBuilder<> IRB(&I);
2445     IRB.CreateCall(
2446         MS.MemcpyFn,
2447         {IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()),
2448          IRB.CreatePointerCast(I.getArgOperand(1), IRB.getInt8PtrTy()),
2449          IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)});
2450     I.eraseFromParent();
2451   }
2452 
2453   // Same as memcpy.
2454   void visitMemSetInst(MemSetInst &I) {
2455     IRBuilder<> IRB(&I);
2456     IRB.CreateCall(
2457         MS.MemsetFn,
2458         {IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()),
2459          IRB.CreateIntCast(I.getArgOperand(1), IRB.getInt32Ty(), false),
2460          IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)});
2461     I.eraseFromParent();
2462   }
2463 
2464   void visitVAStartInst(VAStartInst &I) {
2465     VAHelper->visitVAStartInst(I);
2466   }
2467 
2468   void visitVACopyInst(VACopyInst &I) {
2469     VAHelper->visitVACopyInst(I);
2470   }
2471 
2472   /// Handle vector store-like intrinsics.
2473   ///
2474   /// Instrument intrinsics that look like a simple SIMD store: writes memory,
2475   /// has 1 pointer argument and 1 vector argument, returns void.
2476   bool handleVectorStoreIntrinsic(IntrinsicInst &I) {
2477     IRBuilder<> IRB(&I);
2478     Value* Addr = I.getArgOperand(0);
2479     Value *Shadow = getShadow(&I, 1);
2480     Value *ShadowPtr, *OriginPtr;
2481 
2482     // We don't know the pointer alignment (could be unaligned SSE store!).
2483     // Have to assume to worst case.
2484     std::tie(ShadowPtr, OriginPtr) = getShadowOriginPtr(
2485         Addr, IRB, Shadow->getType(), Align(1), /*isStore*/ true);
2486     IRB.CreateAlignedStore(Shadow, ShadowPtr, Align(1));
2487 
2488     if (ClCheckAccessAddress)
2489       insertShadowCheck(Addr, &I);
2490 
2491     // FIXME: factor out common code from materializeStores
2492     if (MS.TrackOrigins) IRB.CreateStore(getOrigin(&I, 1), OriginPtr);
2493     return true;
2494   }
2495 
2496   /// Handle vector load-like intrinsics.
2497   ///
2498   /// Instrument intrinsics that look like a simple SIMD load: reads memory,
2499   /// has 1 pointer argument, returns a vector.
2500   bool handleVectorLoadIntrinsic(IntrinsicInst &I) {
2501     IRBuilder<> IRB(&I);
2502     Value *Addr = I.getArgOperand(0);
2503 
2504     Type *ShadowTy = getShadowTy(&I);
2505     Value *ShadowPtr = nullptr, *OriginPtr = nullptr;
2506     if (PropagateShadow) {
2507       // We don't know the pointer alignment (could be unaligned SSE load!).
2508       // Have to assume to worst case.
2509       const Align Alignment = Align(1);
2510       std::tie(ShadowPtr, OriginPtr) =
2511           getShadowOriginPtr(Addr, IRB, ShadowTy, Alignment, /*isStore*/ false);
2512       setShadow(&I,
2513                 IRB.CreateAlignedLoad(ShadowTy, ShadowPtr, Alignment, "_msld"));
2514     } else {
2515       setShadow(&I, getCleanShadow(&I));
2516     }
2517 
2518     if (ClCheckAccessAddress)
2519       insertShadowCheck(Addr, &I);
2520 
2521     if (MS.TrackOrigins) {
2522       if (PropagateShadow)
2523         setOrigin(&I, IRB.CreateLoad(MS.OriginTy, OriginPtr));
2524       else
2525         setOrigin(&I, getCleanOrigin());
2526     }
2527     return true;
2528   }
2529 
2530   /// Handle (SIMD arithmetic)-like intrinsics.
2531   ///
2532   /// Instrument intrinsics with any number of arguments of the same type,
2533   /// equal to the return type. The type should be simple (no aggregates or
2534   /// pointers; vectors are fine).
2535   /// Caller guarantees that this intrinsic does not access memory.
2536   bool maybeHandleSimpleNomemIntrinsic(IntrinsicInst &I) {
2537     Type *RetTy = I.getType();
2538     if (!(RetTy->isIntOrIntVectorTy() ||
2539           RetTy->isFPOrFPVectorTy() ||
2540           RetTy->isX86_MMXTy()))
2541       return false;
2542 
2543     unsigned NumArgOperands = I.getNumArgOperands();
2544 
2545     for (unsigned i = 0; i < NumArgOperands; ++i) {
2546       Type *Ty = I.getArgOperand(i)->getType();
2547       if (Ty != RetTy)
2548         return false;
2549     }
2550 
2551     IRBuilder<> IRB(&I);
2552     ShadowAndOriginCombiner SC(this, IRB);
2553     for (unsigned i = 0; i < NumArgOperands; ++i)
2554       SC.Add(I.getArgOperand(i));
2555     SC.Done(&I);
2556 
2557     return true;
2558   }
2559 
2560   /// Heuristically instrument unknown intrinsics.
2561   ///
2562   /// The main purpose of this code is to do something reasonable with all
2563   /// random intrinsics we might encounter, most importantly - SIMD intrinsics.
2564   /// We recognize several classes of intrinsics by their argument types and
2565   /// ModRefBehaviour and apply special instrumentation when we are reasonably
2566   /// sure that we know what the intrinsic does.
2567   ///
2568   /// We special-case intrinsics where this approach fails. See llvm.bswap
2569   /// handling as an example of that.
2570   bool handleUnknownIntrinsic(IntrinsicInst &I) {
2571     unsigned NumArgOperands = I.getNumArgOperands();
2572     if (NumArgOperands == 0)
2573       return false;
2574 
2575     if (NumArgOperands == 2 &&
2576         I.getArgOperand(0)->getType()->isPointerTy() &&
2577         I.getArgOperand(1)->getType()->isVectorTy() &&
2578         I.getType()->isVoidTy() &&
2579         !I.onlyReadsMemory()) {
2580       // This looks like a vector store.
2581       return handleVectorStoreIntrinsic(I);
2582     }
2583 
2584     if (NumArgOperands == 1 &&
2585         I.getArgOperand(0)->getType()->isPointerTy() &&
2586         I.getType()->isVectorTy() &&
2587         I.onlyReadsMemory()) {
2588       // This looks like a vector load.
2589       return handleVectorLoadIntrinsic(I);
2590     }
2591 
2592     if (I.doesNotAccessMemory())
2593       if (maybeHandleSimpleNomemIntrinsic(I))
2594         return true;
2595 
2596     // FIXME: detect and handle SSE maskstore/maskload
2597     return false;
2598   }
2599 
2600   void handleInvariantGroup(IntrinsicInst &I) {
2601     setShadow(&I, getShadow(&I, 0));
2602     setOrigin(&I, getOrigin(&I, 0));
2603   }
2604 
2605   void handleLifetimeStart(IntrinsicInst &I) {
2606     if (!PoisonStack)
2607       return;
2608     DenseMap<Value *, AllocaInst *> AllocaForValue;
2609     AllocaInst *AI =
2610         llvm::findAllocaForValue(I.getArgOperand(1), AllocaForValue);
2611     if (!AI)
2612       InstrumentLifetimeStart = false;
2613     LifetimeStartList.push_back(std::make_pair(&I, AI));
2614   }
2615 
2616   void handleBswap(IntrinsicInst &I) {
2617     IRBuilder<> IRB(&I);
2618     Value *Op = I.getArgOperand(0);
2619     Type *OpType = Op->getType();
2620     Function *BswapFunc = Intrinsic::getDeclaration(
2621       F.getParent(), Intrinsic::bswap, makeArrayRef(&OpType, 1));
2622     setShadow(&I, IRB.CreateCall(BswapFunc, getShadow(Op)));
2623     setOrigin(&I, getOrigin(Op));
2624   }
2625 
2626   // Instrument vector convert intrinsic.
2627   //
2628   // This function instruments intrinsics like cvtsi2ss:
2629   // %Out = int_xxx_cvtyyy(%ConvertOp)
2630   // or
2631   // %Out = int_xxx_cvtyyy(%CopyOp, %ConvertOp)
2632   // Intrinsic converts \p NumUsedElements elements of \p ConvertOp to the same
2633   // number \p Out elements, and (if has 2 arguments) copies the rest of the
2634   // elements from \p CopyOp.
2635   // In most cases conversion involves floating-point value which may trigger a
2636   // hardware exception when not fully initialized. For this reason we require
2637   // \p ConvertOp[0:NumUsedElements] to be fully initialized and trap otherwise.
2638   // We copy the shadow of \p CopyOp[NumUsedElements:] to \p
2639   // Out[NumUsedElements:]. This means that intrinsics without \p CopyOp always
2640   // return a fully initialized value.
2641   void handleVectorConvertIntrinsic(IntrinsicInst &I, int NumUsedElements) {
2642     IRBuilder<> IRB(&I);
2643     Value *CopyOp, *ConvertOp;
2644 
2645     switch (I.getNumArgOperands()) {
2646     case 3:
2647       assert(isa<ConstantInt>(I.getArgOperand(2)) && "Invalid rounding mode");
2648       LLVM_FALLTHROUGH;
2649     case 2:
2650       CopyOp = I.getArgOperand(0);
2651       ConvertOp = I.getArgOperand(1);
2652       break;
2653     case 1:
2654       ConvertOp = I.getArgOperand(0);
2655       CopyOp = nullptr;
2656       break;
2657     default:
2658       llvm_unreachable("Cvt intrinsic with unsupported number of arguments.");
2659     }
2660 
2661     // The first *NumUsedElements* elements of ConvertOp are converted to the
2662     // same number of output elements. The rest of the output is copied from
2663     // CopyOp, or (if not available) filled with zeroes.
2664     // Combine shadow for elements of ConvertOp that are used in this operation,
2665     // and insert a check.
2666     // FIXME: consider propagating shadow of ConvertOp, at least in the case of
2667     // int->any conversion.
2668     Value *ConvertShadow = getShadow(ConvertOp);
2669     Value *AggShadow = nullptr;
2670     if (ConvertOp->getType()->isVectorTy()) {
2671       AggShadow = IRB.CreateExtractElement(
2672           ConvertShadow, ConstantInt::get(IRB.getInt32Ty(), 0));
2673       for (int i = 1; i < NumUsedElements; ++i) {
2674         Value *MoreShadow = IRB.CreateExtractElement(
2675             ConvertShadow, ConstantInt::get(IRB.getInt32Ty(), i));
2676         AggShadow = IRB.CreateOr(AggShadow, MoreShadow);
2677       }
2678     } else {
2679       AggShadow = ConvertShadow;
2680     }
2681     assert(AggShadow->getType()->isIntegerTy());
2682     insertShadowCheck(AggShadow, getOrigin(ConvertOp), &I);
2683 
2684     // Build result shadow by zero-filling parts of CopyOp shadow that come from
2685     // ConvertOp.
2686     if (CopyOp) {
2687       assert(CopyOp->getType() == I.getType());
2688       assert(CopyOp->getType()->isVectorTy());
2689       Value *ResultShadow = getShadow(CopyOp);
2690       Type *EltTy = cast<VectorType>(ResultShadow->getType())->getElementType();
2691       for (int i = 0; i < NumUsedElements; ++i) {
2692         ResultShadow = IRB.CreateInsertElement(
2693             ResultShadow, ConstantInt::getNullValue(EltTy),
2694             ConstantInt::get(IRB.getInt32Ty(), i));
2695       }
2696       setShadow(&I, ResultShadow);
2697       setOrigin(&I, getOrigin(CopyOp));
2698     } else {
2699       setShadow(&I, getCleanShadow(&I));
2700       setOrigin(&I, getCleanOrigin());
2701     }
2702   }
2703 
2704   // Given a scalar or vector, extract lower 64 bits (or less), and return all
2705   // zeroes if it is zero, and all ones otherwise.
2706   Value *Lower64ShadowExtend(IRBuilder<> &IRB, Value *S, Type *T) {
2707     if (S->getType()->isVectorTy())
2708       S = CreateShadowCast(IRB, S, IRB.getInt64Ty(), /* Signed */ true);
2709     assert(S->getType()->getPrimitiveSizeInBits() <= 64);
2710     Value *S2 = IRB.CreateICmpNE(S, getCleanShadow(S));
2711     return CreateShadowCast(IRB, S2, T, /* Signed */ true);
2712   }
2713 
2714   // Given a vector, extract its first element, and return all
2715   // zeroes if it is zero, and all ones otherwise.
2716   Value *LowerElementShadowExtend(IRBuilder<> &IRB, Value *S, Type *T) {
2717     Value *S1 = IRB.CreateExtractElement(S, (uint64_t)0);
2718     Value *S2 = IRB.CreateICmpNE(S1, getCleanShadow(S1));
2719     return CreateShadowCast(IRB, S2, T, /* Signed */ true);
2720   }
2721 
2722   Value *VariableShadowExtend(IRBuilder<> &IRB, Value *S) {
2723     Type *T = S->getType();
2724     assert(T->isVectorTy());
2725     Value *S2 = IRB.CreateICmpNE(S, getCleanShadow(S));
2726     return IRB.CreateSExt(S2, T);
2727   }
2728 
2729   // Instrument vector shift intrinsic.
2730   //
2731   // This function instruments intrinsics like int_x86_avx2_psll_w.
2732   // Intrinsic shifts %In by %ShiftSize bits.
2733   // %ShiftSize may be a vector. In that case the lower 64 bits determine shift
2734   // size, and the rest is ignored. Behavior is defined even if shift size is
2735   // greater than register (or field) width.
2736   void handleVectorShiftIntrinsic(IntrinsicInst &I, bool Variable) {
2737     assert(I.getNumArgOperands() == 2);
2738     IRBuilder<> IRB(&I);
2739     // If any of the S2 bits are poisoned, the whole thing is poisoned.
2740     // Otherwise perform the same shift on S1.
2741     Value *S1 = getShadow(&I, 0);
2742     Value *S2 = getShadow(&I, 1);
2743     Value *S2Conv = Variable ? VariableShadowExtend(IRB, S2)
2744                              : Lower64ShadowExtend(IRB, S2, getShadowTy(&I));
2745     Value *V1 = I.getOperand(0);
2746     Value *V2 = I.getOperand(1);
2747     Value *Shift = IRB.CreateCall(I.getFunctionType(), I.getCalledOperand(),
2748                                   {IRB.CreateBitCast(S1, V1->getType()), V2});
2749     Shift = IRB.CreateBitCast(Shift, getShadowTy(&I));
2750     setShadow(&I, IRB.CreateOr(Shift, S2Conv));
2751     setOriginForNaryOp(I);
2752   }
2753 
2754   // Get an X86_MMX-sized vector type.
2755   Type *getMMXVectorTy(unsigned EltSizeInBits) {
2756     const unsigned X86_MMXSizeInBits = 64;
2757     assert(EltSizeInBits != 0 && (X86_MMXSizeInBits % EltSizeInBits) == 0 &&
2758            "Illegal MMX vector element size");
2759     return FixedVectorType::get(IntegerType::get(*MS.C, EltSizeInBits),
2760                                 X86_MMXSizeInBits / EltSizeInBits);
2761   }
2762 
2763   // Returns a signed counterpart for an (un)signed-saturate-and-pack
2764   // intrinsic.
2765   Intrinsic::ID getSignedPackIntrinsic(Intrinsic::ID id) {
2766     switch (id) {
2767       case Intrinsic::x86_sse2_packsswb_128:
2768       case Intrinsic::x86_sse2_packuswb_128:
2769         return Intrinsic::x86_sse2_packsswb_128;
2770 
2771       case Intrinsic::x86_sse2_packssdw_128:
2772       case Intrinsic::x86_sse41_packusdw:
2773         return Intrinsic::x86_sse2_packssdw_128;
2774 
2775       case Intrinsic::x86_avx2_packsswb:
2776       case Intrinsic::x86_avx2_packuswb:
2777         return Intrinsic::x86_avx2_packsswb;
2778 
2779       case Intrinsic::x86_avx2_packssdw:
2780       case Intrinsic::x86_avx2_packusdw:
2781         return Intrinsic::x86_avx2_packssdw;
2782 
2783       case Intrinsic::x86_mmx_packsswb:
2784       case Intrinsic::x86_mmx_packuswb:
2785         return Intrinsic::x86_mmx_packsswb;
2786 
2787       case Intrinsic::x86_mmx_packssdw:
2788         return Intrinsic::x86_mmx_packssdw;
2789       default:
2790         llvm_unreachable("unexpected intrinsic id");
2791     }
2792   }
2793 
2794   // Instrument vector pack intrinsic.
2795   //
2796   // This function instruments intrinsics like x86_mmx_packsswb, that
2797   // packs elements of 2 input vectors into half as many bits with saturation.
2798   // Shadow is propagated with the signed variant of the same intrinsic applied
2799   // to sext(Sa != zeroinitializer), sext(Sb != zeroinitializer).
2800   // EltSizeInBits is used only for x86mmx arguments.
2801   void handleVectorPackIntrinsic(IntrinsicInst &I, unsigned EltSizeInBits = 0) {
2802     assert(I.getNumArgOperands() == 2);
2803     bool isX86_MMX = I.getOperand(0)->getType()->isX86_MMXTy();
2804     IRBuilder<> IRB(&I);
2805     Value *S1 = getShadow(&I, 0);
2806     Value *S2 = getShadow(&I, 1);
2807     assert(isX86_MMX || S1->getType()->isVectorTy());
2808 
2809     // SExt and ICmpNE below must apply to individual elements of input vectors.
2810     // In case of x86mmx arguments, cast them to appropriate vector types and
2811     // back.
2812     Type *T = isX86_MMX ? getMMXVectorTy(EltSizeInBits) : S1->getType();
2813     if (isX86_MMX) {
2814       S1 = IRB.CreateBitCast(S1, T);
2815       S2 = IRB.CreateBitCast(S2, T);
2816     }
2817     Value *S1_ext = IRB.CreateSExt(
2818         IRB.CreateICmpNE(S1, Constant::getNullValue(T)), T);
2819     Value *S2_ext = IRB.CreateSExt(
2820         IRB.CreateICmpNE(S2, Constant::getNullValue(T)), T);
2821     if (isX86_MMX) {
2822       Type *X86_MMXTy = Type::getX86_MMXTy(*MS.C);
2823       S1_ext = IRB.CreateBitCast(S1_ext, X86_MMXTy);
2824       S2_ext = IRB.CreateBitCast(S2_ext, X86_MMXTy);
2825     }
2826 
2827     Function *ShadowFn = Intrinsic::getDeclaration(
2828         F.getParent(), getSignedPackIntrinsic(I.getIntrinsicID()));
2829 
2830     Value *S =
2831         IRB.CreateCall(ShadowFn, {S1_ext, S2_ext}, "_msprop_vector_pack");
2832     if (isX86_MMX) S = IRB.CreateBitCast(S, getShadowTy(&I));
2833     setShadow(&I, S);
2834     setOriginForNaryOp(I);
2835   }
2836 
2837   // Instrument sum-of-absolute-differences intrinsic.
2838   void handleVectorSadIntrinsic(IntrinsicInst &I) {
2839     const unsigned SignificantBitsPerResultElement = 16;
2840     bool isX86_MMX = I.getOperand(0)->getType()->isX86_MMXTy();
2841     Type *ResTy = isX86_MMX ? IntegerType::get(*MS.C, 64) : I.getType();
2842     unsigned ZeroBitsPerResultElement =
2843         ResTy->getScalarSizeInBits() - SignificantBitsPerResultElement;
2844 
2845     IRBuilder<> IRB(&I);
2846     Value *S = IRB.CreateOr(getShadow(&I, 0), getShadow(&I, 1));
2847     S = IRB.CreateBitCast(S, ResTy);
2848     S = IRB.CreateSExt(IRB.CreateICmpNE(S, Constant::getNullValue(ResTy)),
2849                        ResTy);
2850     S = IRB.CreateLShr(S, ZeroBitsPerResultElement);
2851     S = IRB.CreateBitCast(S, getShadowTy(&I));
2852     setShadow(&I, S);
2853     setOriginForNaryOp(I);
2854   }
2855 
2856   // Instrument multiply-add intrinsic.
2857   void handleVectorPmaddIntrinsic(IntrinsicInst &I,
2858                                   unsigned EltSizeInBits = 0) {
2859     bool isX86_MMX = I.getOperand(0)->getType()->isX86_MMXTy();
2860     Type *ResTy = isX86_MMX ? getMMXVectorTy(EltSizeInBits * 2) : I.getType();
2861     IRBuilder<> IRB(&I);
2862     Value *S = IRB.CreateOr(getShadow(&I, 0), getShadow(&I, 1));
2863     S = IRB.CreateBitCast(S, ResTy);
2864     S = IRB.CreateSExt(IRB.CreateICmpNE(S, Constant::getNullValue(ResTy)),
2865                        ResTy);
2866     S = IRB.CreateBitCast(S, getShadowTy(&I));
2867     setShadow(&I, S);
2868     setOriginForNaryOp(I);
2869   }
2870 
2871   // Instrument compare-packed intrinsic.
2872   // Basically, an or followed by sext(icmp ne 0) to end up with all-zeros or
2873   // all-ones shadow.
2874   void handleVectorComparePackedIntrinsic(IntrinsicInst &I) {
2875     IRBuilder<> IRB(&I);
2876     Type *ResTy = getShadowTy(&I);
2877     Value *S0 = IRB.CreateOr(getShadow(&I, 0), getShadow(&I, 1));
2878     Value *S = IRB.CreateSExt(
2879         IRB.CreateICmpNE(S0, Constant::getNullValue(ResTy)), ResTy);
2880     setShadow(&I, S);
2881     setOriginForNaryOp(I);
2882   }
2883 
2884   // Instrument compare-scalar intrinsic.
2885   // This handles both cmp* intrinsics which return the result in the first
2886   // element of a vector, and comi* which return the result as i32.
2887   void handleVectorCompareScalarIntrinsic(IntrinsicInst &I) {
2888     IRBuilder<> IRB(&I);
2889     Value *S0 = IRB.CreateOr(getShadow(&I, 0), getShadow(&I, 1));
2890     Value *S = LowerElementShadowExtend(IRB, S0, getShadowTy(&I));
2891     setShadow(&I, S);
2892     setOriginForNaryOp(I);
2893   }
2894 
2895   void handleStmxcsr(IntrinsicInst &I) {
2896     IRBuilder<> IRB(&I);
2897     Value* Addr = I.getArgOperand(0);
2898     Type *Ty = IRB.getInt32Ty();
2899     Value *ShadowPtr =
2900         getShadowOriginPtr(Addr, IRB, Ty, Align(1), /*isStore*/ true).first;
2901 
2902     IRB.CreateStore(getCleanShadow(Ty),
2903                     IRB.CreatePointerCast(ShadowPtr, Ty->getPointerTo()));
2904 
2905     if (ClCheckAccessAddress)
2906       insertShadowCheck(Addr, &I);
2907   }
2908 
2909   void handleLdmxcsr(IntrinsicInst &I) {
2910     if (!InsertChecks) return;
2911 
2912     IRBuilder<> IRB(&I);
2913     Value *Addr = I.getArgOperand(0);
2914     Type *Ty = IRB.getInt32Ty();
2915     const Align Alignment = Align(1);
2916     Value *ShadowPtr, *OriginPtr;
2917     std::tie(ShadowPtr, OriginPtr) =
2918         getShadowOriginPtr(Addr, IRB, Ty, Alignment, /*isStore*/ false);
2919 
2920     if (ClCheckAccessAddress)
2921       insertShadowCheck(Addr, &I);
2922 
2923     Value *Shadow = IRB.CreateAlignedLoad(Ty, ShadowPtr, Alignment, "_ldmxcsr");
2924     Value *Origin = MS.TrackOrigins ? IRB.CreateLoad(MS.OriginTy, OriginPtr)
2925                                     : getCleanOrigin();
2926     insertShadowCheck(Shadow, Origin, &I);
2927   }
2928 
2929   void handleMaskedStore(IntrinsicInst &I) {
2930     IRBuilder<> IRB(&I);
2931     Value *V = I.getArgOperand(0);
2932     Value *Addr = I.getArgOperand(1);
2933     const Align Alignment(
2934         cast<ConstantInt>(I.getArgOperand(2))->getZExtValue());
2935     Value *Mask = I.getArgOperand(3);
2936     Value *Shadow = getShadow(V);
2937 
2938     Value *ShadowPtr;
2939     Value *OriginPtr;
2940     std::tie(ShadowPtr, OriginPtr) = getShadowOriginPtr(
2941         Addr, IRB, Shadow->getType(), Alignment, /*isStore*/ true);
2942 
2943     if (ClCheckAccessAddress) {
2944       insertShadowCheck(Addr, &I);
2945       // Uninitialized mask is kind of like uninitialized address, but not as
2946       // scary.
2947       insertShadowCheck(Mask, &I);
2948     }
2949 
2950     IRB.CreateMaskedStore(Shadow, ShadowPtr, Alignment, Mask);
2951 
2952     if (MS.TrackOrigins) {
2953       auto &DL = F.getParent()->getDataLayout();
2954       paintOrigin(IRB, getOrigin(V), OriginPtr,
2955                   DL.getTypeStoreSize(Shadow->getType()),
2956                   std::max(Alignment, kMinOriginAlignment));
2957     }
2958   }
2959 
2960   bool handleMaskedLoad(IntrinsicInst &I) {
2961     IRBuilder<> IRB(&I);
2962     Value *Addr = I.getArgOperand(0);
2963     const Align Alignment(
2964         cast<ConstantInt>(I.getArgOperand(1))->getZExtValue());
2965     Value *Mask = I.getArgOperand(2);
2966     Value *PassThru = I.getArgOperand(3);
2967 
2968     Type *ShadowTy = getShadowTy(&I);
2969     Value *ShadowPtr, *OriginPtr;
2970     if (PropagateShadow) {
2971       std::tie(ShadowPtr, OriginPtr) =
2972           getShadowOriginPtr(Addr, IRB, ShadowTy, Alignment, /*isStore*/ false);
2973       setShadow(&I, IRB.CreateMaskedLoad(ShadowPtr, Alignment, Mask,
2974                                          getShadow(PassThru), "_msmaskedld"));
2975     } else {
2976       setShadow(&I, getCleanShadow(&I));
2977     }
2978 
2979     if (ClCheckAccessAddress) {
2980       insertShadowCheck(Addr, &I);
2981       insertShadowCheck(Mask, &I);
2982     }
2983 
2984     if (MS.TrackOrigins) {
2985       if (PropagateShadow) {
2986         // Choose between PassThru's and the loaded value's origins.
2987         Value *MaskedPassThruShadow = IRB.CreateAnd(
2988             getShadow(PassThru), IRB.CreateSExt(IRB.CreateNeg(Mask), ShadowTy));
2989 
2990         Value *Acc = IRB.CreateExtractElement(
2991             MaskedPassThruShadow, ConstantInt::get(IRB.getInt32Ty(), 0));
2992         for (int i = 1,
2993                  N = cast<VectorType>(PassThru->getType())->getNumElements();
2994              i < N; ++i) {
2995           Value *More = IRB.CreateExtractElement(
2996               MaskedPassThruShadow, ConstantInt::get(IRB.getInt32Ty(), i));
2997           Acc = IRB.CreateOr(Acc, More);
2998         }
2999 
3000         Value *Origin = IRB.CreateSelect(
3001             IRB.CreateICmpNE(Acc, Constant::getNullValue(Acc->getType())),
3002             getOrigin(PassThru), IRB.CreateLoad(MS.OriginTy, OriginPtr));
3003 
3004         setOrigin(&I, Origin);
3005       } else {
3006         setOrigin(&I, getCleanOrigin());
3007       }
3008     }
3009     return true;
3010   }
3011 
3012   // Instrument BMI / BMI2 intrinsics.
3013   // All of these intrinsics are Z = I(X, Y)
3014   // where the types of all operands and the result match, and are either i32 or i64.
3015   // The following instrumentation happens to work for all of them:
3016   //   Sz = I(Sx, Y) | (sext (Sy != 0))
3017   void handleBmiIntrinsic(IntrinsicInst &I) {
3018     IRBuilder<> IRB(&I);
3019     Type *ShadowTy = getShadowTy(&I);
3020 
3021     // If any bit of the mask operand is poisoned, then the whole thing is.
3022     Value *SMask = getShadow(&I, 1);
3023     SMask = IRB.CreateSExt(IRB.CreateICmpNE(SMask, getCleanShadow(ShadowTy)),
3024                            ShadowTy);
3025     // Apply the same intrinsic to the shadow of the first operand.
3026     Value *S = IRB.CreateCall(I.getCalledFunction(),
3027                               {getShadow(&I, 0), I.getOperand(1)});
3028     S = IRB.CreateOr(SMask, S);
3029     setShadow(&I, S);
3030     setOriginForNaryOp(I);
3031   }
3032 
3033   SmallVector<int, 8> getPclmulMask(unsigned Width, bool OddElements) {
3034     SmallVector<int, 8> Mask;
3035     for (unsigned X = OddElements ? 1 : 0; X < Width; X += 2) {
3036       Mask.append(2, X);
3037     }
3038     return Mask;
3039   }
3040 
3041   // Instrument pclmul intrinsics.
3042   // These intrinsics operate either on odd or on even elements of the input
3043   // vectors, depending on the constant in the 3rd argument, ignoring the rest.
3044   // Replace the unused elements with copies of the used ones, ex:
3045   //   (0, 1, 2, 3) -> (0, 0, 2, 2) (even case)
3046   // or
3047   //   (0, 1, 2, 3) -> (1, 1, 3, 3) (odd case)
3048   // and then apply the usual shadow combining logic.
3049   void handlePclmulIntrinsic(IntrinsicInst &I) {
3050     IRBuilder<> IRB(&I);
3051     Type *ShadowTy = getShadowTy(&I);
3052     unsigned Width =
3053         cast<VectorType>(I.getArgOperand(0)->getType())->getNumElements();
3054     assert(isa<ConstantInt>(I.getArgOperand(2)) &&
3055            "pclmul 3rd operand must be a constant");
3056     unsigned Imm = cast<ConstantInt>(I.getArgOperand(2))->getZExtValue();
3057     Value *Shuf0 =
3058         IRB.CreateShuffleVector(getShadow(&I, 0), UndefValue::get(ShadowTy),
3059                                 getPclmulMask(Width, Imm & 0x01));
3060     Value *Shuf1 =
3061         IRB.CreateShuffleVector(getShadow(&I, 1), UndefValue::get(ShadowTy),
3062                                 getPclmulMask(Width, Imm & 0x10));
3063     ShadowAndOriginCombiner SOC(this, IRB);
3064     SOC.Add(Shuf0, getOrigin(&I, 0));
3065     SOC.Add(Shuf1, getOrigin(&I, 1));
3066     SOC.Done(&I);
3067   }
3068 
3069   void visitIntrinsicInst(IntrinsicInst &I) {
3070     switch (I.getIntrinsicID()) {
3071     case Intrinsic::lifetime_start:
3072       handleLifetimeStart(I);
3073       break;
3074     case Intrinsic::launder_invariant_group:
3075     case Intrinsic::strip_invariant_group:
3076       handleInvariantGroup(I);
3077       break;
3078     case Intrinsic::bswap:
3079       handleBswap(I);
3080       break;
3081     case Intrinsic::masked_store:
3082       handleMaskedStore(I);
3083       break;
3084     case Intrinsic::masked_load:
3085       handleMaskedLoad(I);
3086       break;
3087     case Intrinsic::x86_sse_stmxcsr:
3088       handleStmxcsr(I);
3089       break;
3090     case Intrinsic::x86_sse_ldmxcsr:
3091       handleLdmxcsr(I);
3092       break;
3093     case Intrinsic::x86_avx512_vcvtsd2usi64:
3094     case Intrinsic::x86_avx512_vcvtsd2usi32:
3095     case Intrinsic::x86_avx512_vcvtss2usi64:
3096     case Intrinsic::x86_avx512_vcvtss2usi32:
3097     case Intrinsic::x86_avx512_cvttss2usi64:
3098     case Intrinsic::x86_avx512_cvttss2usi:
3099     case Intrinsic::x86_avx512_cvttsd2usi64:
3100     case Intrinsic::x86_avx512_cvttsd2usi:
3101     case Intrinsic::x86_avx512_cvtusi2ss:
3102     case Intrinsic::x86_avx512_cvtusi642sd:
3103     case Intrinsic::x86_avx512_cvtusi642ss:
3104     case Intrinsic::x86_sse2_cvtsd2si64:
3105     case Intrinsic::x86_sse2_cvtsd2si:
3106     case Intrinsic::x86_sse2_cvtsd2ss:
3107     case Intrinsic::x86_sse2_cvttsd2si64:
3108     case Intrinsic::x86_sse2_cvttsd2si:
3109     case Intrinsic::x86_sse_cvtss2si64:
3110     case Intrinsic::x86_sse_cvtss2si:
3111     case Intrinsic::x86_sse_cvttss2si64:
3112     case Intrinsic::x86_sse_cvttss2si:
3113       handleVectorConvertIntrinsic(I, 1);
3114       break;
3115     case Intrinsic::x86_sse_cvtps2pi:
3116     case Intrinsic::x86_sse_cvttps2pi:
3117       handleVectorConvertIntrinsic(I, 2);
3118       break;
3119 
3120     case Intrinsic::x86_avx512_psll_w_512:
3121     case Intrinsic::x86_avx512_psll_d_512:
3122     case Intrinsic::x86_avx512_psll_q_512:
3123     case Intrinsic::x86_avx512_pslli_w_512:
3124     case Intrinsic::x86_avx512_pslli_d_512:
3125     case Intrinsic::x86_avx512_pslli_q_512:
3126     case Intrinsic::x86_avx512_psrl_w_512:
3127     case Intrinsic::x86_avx512_psrl_d_512:
3128     case Intrinsic::x86_avx512_psrl_q_512:
3129     case Intrinsic::x86_avx512_psra_w_512:
3130     case Intrinsic::x86_avx512_psra_d_512:
3131     case Intrinsic::x86_avx512_psra_q_512:
3132     case Intrinsic::x86_avx512_psrli_w_512:
3133     case Intrinsic::x86_avx512_psrli_d_512:
3134     case Intrinsic::x86_avx512_psrli_q_512:
3135     case Intrinsic::x86_avx512_psrai_w_512:
3136     case Intrinsic::x86_avx512_psrai_d_512:
3137     case Intrinsic::x86_avx512_psrai_q_512:
3138     case Intrinsic::x86_avx512_psra_q_256:
3139     case Intrinsic::x86_avx512_psra_q_128:
3140     case Intrinsic::x86_avx512_psrai_q_256:
3141     case Intrinsic::x86_avx512_psrai_q_128:
3142     case Intrinsic::x86_avx2_psll_w:
3143     case Intrinsic::x86_avx2_psll_d:
3144     case Intrinsic::x86_avx2_psll_q:
3145     case Intrinsic::x86_avx2_pslli_w:
3146     case Intrinsic::x86_avx2_pslli_d:
3147     case Intrinsic::x86_avx2_pslli_q:
3148     case Intrinsic::x86_avx2_psrl_w:
3149     case Intrinsic::x86_avx2_psrl_d:
3150     case Intrinsic::x86_avx2_psrl_q:
3151     case Intrinsic::x86_avx2_psra_w:
3152     case Intrinsic::x86_avx2_psra_d:
3153     case Intrinsic::x86_avx2_psrli_w:
3154     case Intrinsic::x86_avx2_psrli_d:
3155     case Intrinsic::x86_avx2_psrli_q:
3156     case Intrinsic::x86_avx2_psrai_w:
3157     case Intrinsic::x86_avx2_psrai_d:
3158     case Intrinsic::x86_sse2_psll_w:
3159     case Intrinsic::x86_sse2_psll_d:
3160     case Intrinsic::x86_sse2_psll_q:
3161     case Intrinsic::x86_sse2_pslli_w:
3162     case Intrinsic::x86_sse2_pslli_d:
3163     case Intrinsic::x86_sse2_pslli_q:
3164     case Intrinsic::x86_sse2_psrl_w:
3165     case Intrinsic::x86_sse2_psrl_d:
3166     case Intrinsic::x86_sse2_psrl_q:
3167     case Intrinsic::x86_sse2_psra_w:
3168     case Intrinsic::x86_sse2_psra_d:
3169     case Intrinsic::x86_sse2_psrli_w:
3170     case Intrinsic::x86_sse2_psrli_d:
3171     case Intrinsic::x86_sse2_psrli_q:
3172     case Intrinsic::x86_sse2_psrai_w:
3173     case Intrinsic::x86_sse2_psrai_d:
3174     case Intrinsic::x86_mmx_psll_w:
3175     case Intrinsic::x86_mmx_psll_d:
3176     case Intrinsic::x86_mmx_psll_q:
3177     case Intrinsic::x86_mmx_pslli_w:
3178     case Intrinsic::x86_mmx_pslli_d:
3179     case Intrinsic::x86_mmx_pslli_q:
3180     case Intrinsic::x86_mmx_psrl_w:
3181     case Intrinsic::x86_mmx_psrl_d:
3182     case Intrinsic::x86_mmx_psrl_q:
3183     case Intrinsic::x86_mmx_psra_w:
3184     case Intrinsic::x86_mmx_psra_d:
3185     case Intrinsic::x86_mmx_psrli_w:
3186     case Intrinsic::x86_mmx_psrli_d:
3187     case Intrinsic::x86_mmx_psrli_q:
3188     case Intrinsic::x86_mmx_psrai_w:
3189     case Intrinsic::x86_mmx_psrai_d:
3190       handleVectorShiftIntrinsic(I, /* Variable */ false);
3191       break;
3192     case Intrinsic::x86_avx2_psllv_d:
3193     case Intrinsic::x86_avx2_psllv_d_256:
3194     case Intrinsic::x86_avx512_psllv_d_512:
3195     case Intrinsic::x86_avx2_psllv_q:
3196     case Intrinsic::x86_avx2_psllv_q_256:
3197     case Intrinsic::x86_avx512_psllv_q_512:
3198     case Intrinsic::x86_avx2_psrlv_d:
3199     case Intrinsic::x86_avx2_psrlv_d_256:
3200     case Intrinsic::x86_avx512_psrlv_d_512:
3201     case Intrinsic::x86_avx2_psrlv_q:
3202     case Intrinsic::x86_avx2_psrlv_q_256:
3203     case Intrinsic::x86_avx512_psrlv_q_512:
3204     case Intrinsic::x86_avx2_psrav_d:
3205     case Intrinsic::x86_avx2_psrav_d_256:
3206     case Intrinsic::x86_avx512_psrav_d_512:
3207     case Intrinsic::x86_avx512_psrav_q_128:
3208     case Intrinsic::x86_avx512_psrav_q_256:
3209     case Intrinsic::x86_avx512_psrav_q_512:
3210       handleVectorShiftIntrinsic(I, /* Variable */ true);
3211       break;
3212 
3213     case Intrinsic::x86_sse2_packsswb_128:
3214     case Intrinsic::x86_sse2_packssdw_128:
3215     case Intrinsic::x86_sse2_packuswb_128:
3216     case Intrinsic::x86_sse41_packusdw:
3217     case Intrinsic::x86_avx2_packsswb:
3218     case Intrinsic::x86_avx2_packssdw:
3219     case Intrinsic::x86_avx2_packuswb:
3220     case Intrinsic::x86_avx2_packusdw:
3221       handleVectorPackIntrinsic(I);
3222       break;
3223 
3224     case Intrinsic::x86_mmx_packsswb:
3225     case Intrinsic::x86_mmx_packuswb:
3226       handleVectorPackIntrinsic(I, 16);
3227       break;
3228 
3229     case Intrinsic::x86_mmx_packssdw:
3230       handleVectorPackIntrinsic(I, 32);
3231       break;
3232 
3233     case Intrinsic::x86_mmx_psad_bw:
3234     case Intrinsic::x86_sse2_psad_bw:
3235     case Intrinsic::x86_avx2_psad_bw:
3236       handleVectorSadIntrinsic(I);
3237       break;
3238 
3239     case Intrinsic::x86_sse2_pmadd_wd:
3240     case Intrinsic::x86_avx2_pmadd_wd:
3241     case Intrinsic::x86_ssse3_pmadd_ub_sw_128:
3242     case Intrinsic::x86_avx2_pmadd_ub_sw:
3243       handleVectorPmaddIntrinsic(I);
3244       break;
3245 
3246     case Intrinsic::x86_ssse3_pmadd_ub_sw:
3247       handleVectorPmaddIntrinsic(I, 8);
3248       break;
3249 
3250     case Intrinsic::x86_mmx_pmadd_wd:
3251       handleVectorPmaddIntrinsic(I, 16);
3252       break;
3253 
3254     case Intrinsic::x86_sse_cmp_ss:
3255     case Intrinsic::x86_sse2_cmp_sd:
3256     case Intrinsic::x86_sse_comieq_ss:
3257     case Intrinsic::x86_sse_comilt_ss:
3258     case Intrinsic::x86_sse_comile_ss:
3259     case Intrinsic::x86_sse_comigt_ss:
3260     case Intrinsic::x86_sse_comige_ss:
3261     case Intrinsic::x86_sse_comineq_ss:
3262     case Intrinsic::x86_sse_ucomieq_ss:
3263     case Intrinsic::x86_sse_ucomilt_ss:
3264     case Intrinsic::x86_sse_ucomile_ss:
3265     case Intrinsic::x86_sse_ucomigt_ss:
3266     case Intrinsic::x86_sse_ucomige_ss:
3267     case Intrinsic::x86_sse_ucomineq_ss:
3268     case Intrinsic::x86_sse2_comieq_sd:
3269     case Intrinsic::x86_sse2_comilt_sd:
3270     case Intrinsic::x86_sse2_comile_sd:
3271     case Intrinsic::x86_sse2_comigt_sd:
3272     case Intrinsic::x86_sse2_comige_sd:
3273     case Intrinsic::x86_sse2_comineq_sd:
3274     case Intrinsic::x86_sse2_ucomieq_sd:
3275     case Intrinsic::x86_sse2_ucomilt_sd:
3276     case Intrinsic::x86_sse2_ucomile_sd:
3277     case Intrinsic::x86_sse2_ucomigt_sd:
3278     case Intrinsic::x86_sse2_ucomige_sd:
3279     case Intrinsic::x86_sse2_ucomineq_sd:
3280       handleVectorCompareScalarIntrinsic(I);
3281       break;
3282 
3283     case Intrinsic::x86_sse_cmp_ps:
3284     case Intrinsic::x86_sse2_cmp_pd:
3285       // FIXME: For x86_avx_cmp_pd_256 and x86_avx_cmp_ps_256 this function
3286       // generates reasonably looking IR that fails in the backend with "Do not
3287       // know how to split the result of this operator!".
3288       handleVectorComparePackedIntrinsic(I);
3289       break;
3290 
3291     case Intrinsic::x86_bmi_bextr_32:
3292     case Intrinsic::x86_bmi_bextr_64:
3293     case Intrinsic::x86_bmi_bzhi_32:
3294     case Intrinsic::x86_bmi_bzhi_64:
3295     case Intrinsic::x86_bmi_pdep_32:
3296     case Intrinsic::x86_bmi_pdep_64:
3297     case Intrinsic::x86_bmi_pext_32:
3298     case Intrinsic::x86_bmi_pext_64:
3299       handleBmiIntrinsic(I);
3300       break;
3301 
3302     case Intrinsic::x86_pclmulqdq:
3303     case Intrinsic::x86_pclmulqdq_256:
3304     case Intrinsic::x86_pclmulqdq_512:
3305       handlePclmulIntrinsic(I);
3306       break;
3307 
3308     case Intrinsic::is_constant:
3309       // The result of llvm.is.constant() is always defined.
3310       setShadow(&I, getCleanShadow(&I));
3311       setOrigin(&I, getCleanOrigin());
3312       break;
3313 
3314     default:
3315       if (!handleUnknownIntrinsic(I))
3316         visitInstruction(I);
3317       break;
3318     }
3319   }
3320 
3321   void visitCallBase(CallBase &CB) {
3322     assert(!CB.getMetadata("nosanitize"));
3323     if (CB.isInlineAsm()) {
3324       // For inline asm (either a call to asm function, or callbr instruction),
3325       // do the usual thing: check argument shadow and mark all outputs as
3326       // clean. Note that any side effects of the inline asm that are not
3327       // immediately visible in its constraints are not handled.
3328       if (ClHandleAsmConservative && MS.CompileKernel)
3329         visitAsmInstruction(CB);
3330       else
3331         visitInstruction(CB);
3332       return;
3333     }
3334     if (auto *Call = dyn_cast<CallInst>(&CB)) {
3335       assert(!isa<IntrinsicInst>(Call) && "intrinsics are handled elsewhere");
3336 
3337       // We are going to insert code that relies on the fact that the callee
3338       // will become a non-readonly function after it is instrumented by us. To
3339       // prevent this code from being optimized out, mark that function
3340       // non-readonly in advance.
3341       if (Function *Func = Call->getCalledFunction()) {
3342         // Clear out readonly/readnone attributes.
3343         AttrBuilder B;
3344         B.addAttribute(Attribute::ReadOnly)
3345             .addAttribute(Attribute::ReadNone)
3346             .addAttribute(Attribute::WriteOnly)
3347             .addAttribute(Attribute::ArgMemOnly)
3348             .addAttribute(Attribute::Speculatable);
3349         Func->removeAttributes(AttributeList::FunctionIndex, B);
3350       }
3351 
3352       maybeMarkSanitizerLibraryCallNoBuiltin(Call, TLI);
3353     }
3354     IRBuilder<> IRB(&CB);
3355 
3356     unsigned ArgOffset = 0;
3357     LLVM_DEBUG(dbgs() << "  CallSite: " << CB << "\n");
3358     for (auto ArgIt = CB.arg_begin(), End = CB.arg_end(); ArgIt != End;
3359          ++ArgIt) {
3360       Value *A = *ArgIt;
3361       unsigned i = ArgIt - CB.arg_begin();
3362       if (!A->getType()->isSized()) {
3363         LLVM_DEBUG(dbgs() << "Arg " << i << " is not sized: " << CB << "\n");
3364         continue;
3365       }
3366       unsigned Size = 0;
3367       Value *Store = nullptr;
3368       // Compute the Shadow for arg even if it is ByVal, because
3369       // in that case getShadow() will copy the actual arg shadow to
3370       // __msan_param_tls.
3371       Value *ArgShadow = getShadow(A);
3372       Value *ArgShadowBase = getShadowPtrForArgument(A, IRB, ArgOffset);
3373       LLVM_DEBUG(dbgs() << "  Arg#" << i << ": " << *A
3374                         << " Shadow: " << *ArgShadow << "\n");
3375       bool ArgIsInitialized = false;
3376       const DataLayout &DL = F.getParent()->getDataLayout();
3377       if (CB.paramHasAttr(i, Attribute::ByVal)) {
3378         assert(A->getType()->isPointerTy() &&
3379                "ByVal argument is not a pointer!");
3380         Size = DL.getTypeAllocSize(CB.getParamByValType(i));
3381         if (ArgOffset + Size > kParamTLSSize) break;
3382         const MaybeAlign ParamAlignment(CB.getParamAlign(i));
3383         MaybeAlign Alignment = llvm::None;
3384         if (ParamAlignment)
3385           Alignment = std::min(*ParamAlignment, kShadowTLSAlignment);
3386         Value *AShadowPtr =
3387             getShadowOriginPtr(A, IRB, IRB.getInt8Ty(), Alignment,
3388                                /*isStore*/ false)
3389                 .first;
3390 
3391         Store = IRB.CreateMemCpy(ArgShadowBase, Alignment, AShadowPtr,
3392                                  Alignment, Size);
3393         // TODO(glider): need to copy origins.
3394       } else {
3395         Size = DL.getTypeAllocSize(A->getType());
3396         if (ArgOffset + Size > kParamTLSSize) break;
3397         Store = IRB.CreateAlignedStore(ArgShadow, ArgShadowBase,
3398                                        kShadowTLSAlignment);
3399         Constant *Cst = dyn_cast<Constant>(ArgShadow);
3400         if (Cst && Cst->isNullValue()) ArgIsInitialized = true;
3401       }
3402       if (MS.TrackOrigins && !ArgIsInitialized)
3403         IRB.CreateStore(getOrigin(A),
3404                         getOriginPtrForArgument(A, IRB, ArgOffset));
3405       (void)Store;
3406       assert(Size != 0 && Store != nullptr);
3407       LLVM_DEBUG(dbgs() << "  Param:" << *Store << "\n");
3408       ArgOffset += alignTo(Size, 8);
3409     }
3410     LLVM_DEBUG(dbgs() << "  done with call args\n");
3411 
3412     FunctionType *FT = CB.getFunctionType();
3413     if (FT->isVarArg()) {
3414       VAHelper->visitCallBase(CB, IRB);
3415     }
3416 
3417     // Now, get the shadow for the RetVal.
3418     if (!CB.getType()->isSized())
3419       return;
3420     // Don't emit the epilogue for musttail call returns.
3421     if (isa<CallInst>(CB) && cast<CallInst>(CB).isMustTailCall())
3422       return;
3423     IRBuilder<> IRBBefore(&CB);
3424     // Until we have full dynamic coverage, make sure the retval shadow is 0.
3425     Value *Base = getShadowPtrForRetval(&CB, IRBBefore);
3426     IRBBefore.CreateAlignedStore(getCleanShadow(&CB), Base,
3427                                  kShadowTLSAlignment);
3428     BasicBlock::iterator NextInsn;
3429     if (isa<CallInst>(CB)) {
3430       NextInsn = ++CB.getIterator();
3431       assert(NextInsn != CB.getParent()->end());
3432     } else {
3433       BasicBlock *NormalDest = cast<InvokeInst>(CB).getNormalDest();
3434       if (!NormalDest->getSinglePredecessor()) {
3435         // FIXME: this case is tricky, so we are just conservative here.
3436         // Perhaps we need to split the edge between this BB and NormalDest,
3437         // but a naive attempt to use SplitEdge leads to a crash.
3438         setShadow(&CB, getCleanShadow(&CB));
3439         setOrigin(&CB, getCleanOrigin());
3440         return;
3441       }
3442       // FIXME: NextInsn is likely in a basic block that has not been visited yet.
3443       // Anything inserted there will be instrumented by MSan later!
3444       NextInsn = NormalDest->getFirstInsertionPt();
3445       assert(NextInsn != NormalDest->end() &&
3446              "Could not find insertion point for retval shadow load");
3447     }
3448     IRBuilder<> IRBAfter(&*NextInsn);
3449     Value *RetvalShadow = IRBAfter.CreateAlignedLoad(
3450         getShadowTy(&CB), getShadowPtrForRetval(&CB, IRBAfter),
3451         kShadowTLSAlignment, "_msret");
3452     setShadow(&CB, RetvalShadow);
3453     if (MS.TrackOrigins)
3454       setOrigin(&CB, IRBAfter.CreateLoad(MS.OriginTy,
3455                                          getOriginPtrForRetval(IRBAfter)));
3456   }
3457 
3458   bool isAMustTailRetVal(Value *RetVal) {
3459     if (auto *I = dyn_cast<BitCastInst>(RetVal)) {
3460       RetVal = I->getOperand(0);
3461     }
3462     if (auto *I = dyn_cast<CallInst>(RetVal)) {
3463       return I->isMustTailCall();
3464     }
3465     return false;
3466   }
3467 
3468   void visitReturnInst(ReturnInst &I) {
3469     IRBuilder<> IRB(&I);
3470     Value *RetVal = I.getReturnValue();
3471     if (!RetVal) return;
3472     // Don't emit the epilogue for musttail call returns.
3473     if (isAMustTailRetVal(RetVal)) return;
3474     Value *ShadowPtr = getShadowPtrForRetval(RetVal, IRB);
3475     if (CheckReturnValue) {
3476       insertShadowCheck(RetVal, &I);
3477       Value *Shadow = getCleanShadow(RetVal);
3478       IRB.CreateAlignedStore(Shadow, ShadowPtr, kShadowTLSAlignment);
3479     } else {
3480       Value *Shadow = getShadow(RetVal);
3481       IRB.CreateAlignedStore(Shadow, ShadowPtr, kShadowTLSAlignment);
3482       if (MS.TrackOrigins)
3483         IRB.CreateStore(getOrigin(RetVal), getOriginPtrForRetval(IRB));
3484     }
3485   }
3486 
3487   void visitPHINode(PHINode &I) {
3488     IRBuilder<> IRB(&I);
3489     if (!PropagateShadow) {
3490       setShadow(&I, getCleanShadow(&I));
3491       setOrigin(&I, getCleanOrigin());
3492       return;
3493     }
3494 
3495     ShadowPHINodes.push_back(&I);
3496     setShadow(&I, IRB.CreatePHI(getShadowTy(&I), I.getNumIncomingValues(),
3497                                 "_msphi_s"));
3498     if (MS.TrackOrigins)
3499       setOrigin(&I, IRB.CreatePHI(MS.OriginTy, I.getNumIncomingValues(),
3500                                   "_msphi_o"));
3501   }
3502 
3503   Value *getLocalVarDescription(AllocaInst &I) {
3504     SmallString<2048> StackDescriptionStorage;
3505     raw_svector_ostream StackDescription(StackDescriptionStorage);
3506     // We create a string with a description of the stack allocation and
3507     // pass it into __msan_set_alloca_origin.
3508     // It will be printed by the run-time if stack-originated UMR is found.
3509     // The first 4 bytes of the string are set to '----' and will be replaced
3510     // by __msan_va_arg_overflow_size_tls at the first call.
3511     StackDescription << "----" << I.getName() << "@" << F.getName();
3512     return createPrivateNonConstGlobalForString(*F.getParent(),
3513                                                 StackDescription.str());
3514   }
3515 
3516   void poisonAllocaUserspace(AllocaInst &I, IRBuilder<> &IRB, Value *Len) {
3517     if (PoisonStack && ClPoisonStackWithCall) {
3518       IRB.CreateCall(MS.MsanPoisonStackFn,
3519                      {IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()), Len});
3520     } else {
3521       Value *ShadowBase, *OriginBase;
3522       std::tie(ShadowBase, OriginBase) = getShadowOriginPtr(
3523           &I, IRB, IRB.getInt8Ty(), Align(1), /*isStore*/ true);
3524 
3525       Value *PoisonValue = IRB.getInt8(PoisonStack ? ClPoisonStackPattern : 0);
3526       IRB.CreateMemSet(ShadowBase, PoisonValue, Len,
3527                        MaybeAlign(I.getAlignment()));
3528     }
3529 
3530     if (PoisonStack && MS.TrackOrigins) {
3531       Value *Descr = getLocalVarDescription(I);
3532       IRB.CreateCall(MS.MsanSetAllocaOrigin4Fn,
3533                      {IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()), Len,
3534                       IRB.CreatePointerCast(Descr, IRB.getInt8PtrTy()),
3535                       IRB.CreatePointerCast(&F, MS.IntptrTy)});
3536     }
3537   }
3538 
3539   void poisonAllocaKmsan(AllocaInst &I, IRBuilder<> &IRB, Value *Len) {
3540     Value *Descr = getLocalVarDescription(I);
3541     if (PoisonStack) {
3542       IRB.CreateCall(MS.MsanPoisonAllocaFn,
3543                      {IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()), Len,
3544                       IRB.CreatePointerCast(Descr, IRB.getInt8PtrTy())});
3545     } else {
3546       IRB.CreateCall(MS.MsanUnpoisonAllocaFn,
3547                      {IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()), Len});
3548     }
3549   }
3550 
3551   void instrumentAlloca(AllocaInst &I, Instruction *InsPoint = nullptr) {
3552     if (!InsPoint)
3553       InsPoint = &I;
3554     IRBuilder<> IRB(InsPoint->getNextNode());
3555     const DataLayout &DL = F.getParent()->getDataLayout();
3556     uint64_t TypeSize = DL.getTypeAllocSize(I.getAllocatedType());
3557     Value *Len = ConstantInt::get(MS.IntptrTy, TypeSize);
3558     if (I.isArrayAllocation())
3559       Len = IRB.CreateMul(Len, I.getArraySize());
3560 
3561     if (MS.CompileKernel)
3562       poisonAllocaKmsan(I, IRB, Len);
3563     else
3564       poisonAllocaUserspace(I, IRB, Len);
3565   }
3566 
3567   void visitAllocaInst(AllocaInst &I) {
3568     setShadow(&I, getCleanShadow(&I));
3569     setOrigin(&I, getCleanOrigin());
3570     // We'll get to this alloca later unless it's poisoned at the corresponding
3571     // llvm.lifetime.start.
3572     AllocaSet.insert(&I);
3573   }
3574 
3575   void visitSelectInst(SelectInst& I) {
3576     IRBuilder<> IRB(&I);
3577     // a = select b, c, d
3578     Value *B = I.getCondition();
3579     Value *C = I.getTrueValue();
3580     Value *D = I.getFalseValue();
3581     Value *Sb = getShadow(B);
3582     Value *Sc = getShadow(C);
3583     Value *Sd = getShadow(D);
3584 
3585     // Result shadow if condition shadow is 0.
3586     Value *Sa0 = IRB.CreateSelect(B, Sc, Sd);
3587     Value *Sa1;
3588     if (I.getType()->isAggregateType()) {
3589       // To avoid "sign extending" i1 to an arbitrary aggregate type, we just do
3590       // an extra "select". This results in much more compact IR.
3591       // Sa = select Sb, poisoned, (select b, Sc, Sd)
3592       Sa1 = getPoisonedShadow(getShadowTy(I.getType()));
3593     } else {
3594       // Sa = select Sb, [ (c^d) | Sc | Sd ], [ b ? Sc : Sd ]
3595       // If Sb (condition is poisoned), look for bits in c and d that are equal
3596       // and both unpoisoned.
3597       // If !Sb (condition is unpoisoned), simply pick one of Sc and Sd.
3598 
3599       // Cast arguments to shadow-compatible type.
3600       C = CreateAppToShadowCast(IRB, C);
3601       D = CreateAppToShadowCast(IRB, D);
3602 
3603       // Result shadow if condition shadow is 1.
3604       Sa1 = IRB.CreateOr({IRB.CreateXor(C, D), Sc, Sd});
3605     }
3606     Value *Sa = IRB.CreateSelect(Sb, Sa1, Sa0, "_msprop_select");
3607     setShadow(&I, Sa);
3608     if (MS.TrackOrigins) {
3609       // Origins are always i32, so any vector conditions must be flattened.
3610       // FIXME: consider tracking vector origins for app vectors?
3611       if (B->getType()->isVectorTy()) {
3612         Type *FlatTy = getShadowTyNoVec(B->getType());
3613         B = IRB.CreateICmpNE(IRB.CreateBitCast(B, FlatTy),
3614                                 ConstantInt::getNullValue(FlatTy));
3615         Sb = IRB.CreateICmpNE(IRB.CreateBitCast(Sb, FlatTy),
3616                                       ConstantInt::getNullValue(FlatTy));
3617       }
3618       // a = select b, c, d
3619       // Oa = Sb ? Ob : (b ? Oc : Od)
3620       setOrigin(
3621           &I, IRB.CreateSelect(Sb, getOrigin(I.getCondition()),
3622                                IRB.CreateSelect(B, getOrigin(I.getTrueValue()),
3623                                                 getOrigin(I.getFalseValue()))));
3624     }
3625   }
3626 
3627   void visitLandingPadInst(LandingPadInst &I) {
3628     // Do nothing.
3629     // See https://github.com/google/sanitizers/issues/504
3630     setShadow(&I, getCleanShadow(&I));
3631     setOrigin(&I, getCleanOrigin());
3632   }
3633 
3634   void visitCatchSwitchInst(CatchSwitchInst &I) {
3635     setShadow(&I, getCleanShadow(&I));
3636     setOrigin(&I, getCleanOrigin());
3637   }
3638 
3639   void visitFuncletPadInst(FuncletPadInst &I) {
3640     setShadow(&I, getCleanShadow(&I));
3641     setOrigin(&I, getCleanOrigin());
3642   }
3643 
3644   void visitGetElementPtrInst(GetElementPtrInst &I) {
3645     handleShadowOr(I);
3646   }
3647 
3648   void visitExtractValueInst(ExtractValueInst &I) {
3649     IRBuilder<> IRB(&I);
3650     Value *Agg = I.getAggregateOperand();
3651     LLVM_DEBUG(dbgs() << "ExtractValue:  " << I << "\n");
3652     Value *AggShadow = getShadow(Agg);
3653     LLVM_DEBUG(dbgs() << "   AggShadow:  " << *AggShadow << "\n");
3654     Value *ResShadow = IRB.CreateExtractValue(AggShadow, I.getIndices());
3655     LLVM_DEBUG(dbgs() << "   ResShadow:  " << *ResShadow << "\n");
3656     setShadow(&I, ResShadow);
3657     setOriginForNaryOp(I);
3658   }
3659 
3660   void visitInsertValueInst(InsertValueInst &I) {
3661     IRBuilder<> IRB(&I);
3662     LLVM_DEBUG(dbgs() << "InsertValue:  " << I << "\n");
3663     Value *AggShadow = getShadow(I.getAggregateOperand());
3664     Value *InsShadow = getShadow(I.getInsertedValueOperand());
3665     LLVM_DEBUG(dbgs() << "   AggShadow:  " << *AggShadow << "\n");
3666     LLVM_DEBUG(dbgs() << "   InsShadow:  " << *InsShadow << "\n");
3667     Value *Res = IRB.CreateInsertValue(AggShadow, InsShadow, I.getIndices());
3668     LLVM_DEBUG(dbgs() << "   Res:        " << *Res << "\n");
3669     setShadow(&I, Res);
3670     setOriginForNaryOp(I);
3671   }
3672 
3673   void dumpInst(Instruction &I) {
3674     if (CallInst *CI = dyn_cast<CallInst>(&I)) {
3675       errs() << "ZZZ call " << CI->getCalledFunction()->getName() << "\n";
3676     } else {
3677       errs() << "ZZZ " << I.getOpcodeName() << "\n";
3678     }
3679     errs() << "QQQ " << I << "\n";
3680   }
3681 
3682   void visitResumeInst(ResumeInst &I) {
3683     LLVM_DEBUG(dbgs() << "Resume: " << I << "\n");
3684     // Nothing to do here.
3685   }
3686 
3687   void visitCleanupReturnInst(CleanupReturnInst &CRI) {
3688     LLVM_DEBUG(dbgs() << "CleanupReturn: " << CRI << "\n");
3689     // Nothing to do here.
3690   }
3691 
3692   void visitCatchReturnInst(CatchReturnInst &CRI) {
3693     LLVM_DEBUG(dbgs() << "CatchReturn: " << CRI << "\n");
3694     // Nothing to do here.
3695   }
3696 
3697   void instrumentAsmArgument(Value *Operand, Instruction &I, IRBuilder<> &IRB,
3698                              const DataLayout &DL, bool isOutput) {
3699     // For each assembly argument, we check its value for being initialized.
3700     // If the argument is a pointer, we assume it points to a single element
3701     // of the corresponding type (or to a 8-byte word, if the type is unsized).
3702     // Each such pointer is instrumented with a call to the runtime library.
3703     Type *OpType = Operand->getType();
3704     // Check the operand value itself.
3705     insertShadowCheck(Operand, &I);
3706     if (!OpType->isPointerTy() || !isOutput) {
3707       assert(!isOutput);
3708       return;
3709     }
3710     Type *ElType = OpType->getPointerElementType();
3711     if (!ElType->isSized())
3712       return;
3713     int Size = DL.getTypeStoreSize(ElType);
3714     Value *Ptr = IRB.CreatePointerCast(Operand, IRB.getInt8PtrTy());
3715     Value *SizeVal = ConstantInt::get(MS.IntptrTy, Size);
3716     IRB.CreateCall(MS.MsanInstrumentAsmStoreFn, {Ptr, SizeVal});
3717   }
3718 
3719   /// Get the number of output arguments returned by pointers.
3720   int getNumOutputArgs(InlineAsm *IA, CallBase *CB) {
3721     int NumRetOutputs = 0;
3722     int NumOutputs = 0;
3723     Type *RetTy = cast<Value>(CB)->getType();
3724     if (!RetTy->isVoidTy()) {
3725       // Register outputs are returned via the CallInst return value.
3726       auto *ST = dyn_cast<StructType>(RetTy);
3727       if (ST)
3728         NumRetOutputs = ST->getNumElements();
3729       else
3730         NumRetOutputs = 1;
3731     }
3732     InlineAsm::ConstraintInfoVector Constraints = IA->ParseConstraints();
3733     for (size_t i = 0, n = Constraints.size(); i < n; i++) {
3734       InlineAsm::ConstraintInfo Info = Constraints[i];
3735       switch (Info.Type) {
3736       case InlineAsm::isOutput:
3737         NumOutputs++;
3738         break;
3739       default:
3740         break;
3741       }
3742     }
3743     return NumOutputs - NumRetOutputs;
3744   }
3745 
3746   void visitAsmInstruction(Instruction &I) {
3747     // Conservative inline assembly handling: check for poisoned shadow of
3748     // asm() arguments, then unpoison the result and all the memory locations
3749     // pointed to by those arguments.
3750     // An inline asm() statement in C++ contains lists of input and output
3751     // arguments used by the assembly code. These are mapped to operands of the
3752     // CallInst as follows:
3753     //  - nR register outputs ("=r) are returned by value in a single structure
3754     //  (SSA value of the CallInst);
3755     //  - nO other outputs ("=m" and others) are returned by pointer as first
3756     // nO operands of the CallInst;
3757     //  - nI inputs ("r", "m" and others) are passed to CallInst as the
3758     // remaining nI operands.
3759     // The total number of asm() arguments in the source is nR+nO+nI, and the
3760     // corresponding CallInst has nO+nI+1 operands (the last operand is the
3761     // function to be called).
3762     const DataLayout &DL = F.getParent()->getDataLayout();
3763     CallBase *CB = cast<CallBase>(&I);
3764     IRBuilder<> IRB(&I);
3765     InlineAsm *IA = cast<InlineAsm>(CB->getCalledOperand());
3766     int OutputArgs = getNumOutputArgs(IA, CB);
3767     // The last operand of a CallInst is the function itself.
3768     int NumOperands = CB->getNumOperands() - 1;
3769 
3770     // Check input arguments. Doing so before unpoisoning output arguments, so
3771     // that we won't overwrite uninit values before checking them.
3772     for (int i = OutputArgs; i < NumOperands; i++) {
3773       Value *Operand = CB->getOperand(i);
3774       instrumentAsmArgument(Operand, I, IRB, DL, /*isOutput*/ false);
3775     }
3776     // Unpoison output arguments. This must happen before the actual InlineAsm
3777     // call, so that the shadow for memory published in the asm() statement
3778     // remains valid.
3779     for (int i = 0; i < OutputArgs; i++) {
3780       Value *Operand = CB->getOperand(i);
3781       instrumentAsmArgument(Operand, I, IRB, DL, /*isOutput*/ true);
3782     }
3783 
3784     setShadow(&I, getCleanShadow(&I));
3785     setOrigin(&I, getCleanOrigin());
3786   }
3787 
3788   void visitInstruction(Instruction &I) {
3789     // Everything else: stop propagating and check for poisoned shadow.
3790     if (ClDumpStrictInstructions)
3791       dumpInst(I);
3792     LLVM_DEBUG(dbgs() << "DEFAULT: " << I << "\n");
3793     for (size_t i = 0, n = I.getNumOperands(); i < n; i++) {
3794       Value *Operand = I.getOperand(i);
3795       if (Operand->getType()->isSized())
3796         insertShadowCheck(Operand, &I);
3797     }
3798     setShadow(&I, getCleanShadow(&I));
3799     setOrigin(&I, getCleanOrigin());
3800   }
3801 };
3802 
3803 /// AMD64-specific implementation of VarArgHelper.
3804 struct VarArgAMD64Helper : public VarArgHelper {
3805   // An unfortunate workaround for asymmetric lowering of va_arg stuff.
3806   // See a comment in visitCallBase for more details.
3807   static const unsigned AMD64GpEndOffset = 48;  // AMD64 ABI Draft 0.99.6 p3.5.7
3808   static const unsigned AMD64FpEndOffsetSSE = 176;
3809   // If SSE is disabled, fp_offset in va_list is zero.
3810   static const unsigned AMD64FpEndOffsetNoSSE = AMD64GpEndOffset;
3811 
3812   unsigned AMD64FpEndOffset;
3813   Function &F;
3814   MemorySanitizer &MS;
3815   MemorySanitizerVisitor &MSV;
3816   Value *VAArgTLSCopy = nullptr;
3817   Value *VAArgTLSOriginCopy = nullptr;
3818   Value *VAArgOverflowSize = nullptr;
3819 
3820   SmallVector<CallInst*, 16> VAStartInstrumentationList;
3821 
3822   enum ArgKind { AK_GeneralPurpose, AK_FloatingPoint, AK_Memory };
3823 
3824   VarArgAMD64Helper(Function &F, MemorySanitizer &MS,
3825                     MemorySanitizerVisitor &MSV)
3826       : F(F), MS(MS), MSV(MSV) {
3827     AMD64FpEndOffset = AMD64FpEndOffsetSSE;
3828     for (const auto &Attr : F.getAttributes().getFnAttributes()) {
3829       if (Attr.isStringAttribute() &&
3830           (Attr.getKindAsString() == "target-features")) {
3831         if (Attr.getValueAsString().contains("-sse"))
3832           AMD64FpEndOffset = AMD64FpEndOffsetNoSSE;
3833         break;
3834       }
3835     }
3836   }
3837 
3838   ArgKind classifyArgument(Value* arg) {
3839     // A very rough approximation of X86_64 argument classification rules.
3840     Type *T = arg->getType();
3841     if (T->isFPOrFPVectorTy() || T->isX86_MMXTy())
3842       return AK_FloatingPoint;
3843     if (T->isIntegerTy() && T->getPrimitiveSizeInBits() <= 64)
3844       return AK_GeneralPurpose;
3845     if (T->isPointerTy())
3846       return AK_GeneralPurpose;
3847     return AK_Memory;
3848   }
3849 
3850   // For VarArg functions, store the argument shadow in an ABI-specific format
3851   // that corresponds to va_list layout.
3852   // We do this because Clang lowers va_arg in the frontend, and this pass
3853   // only sees the low level code that deals with va_list internals.
3854   // A much easier alternative (provided that Clang emits va_arg instructions)
3855   // would have been to associate each live instance of va_list with a copy of
3856   // MSanParamTLS, and extract shadow on va_arg() call in the argument list
3857   // order.
3858   void visitCallBase(CallBase &CB, IRBuilder<> &IRB) override {
3859     unsigned GpOffset = 0;
3860     unsigned FpOffset = AMD64GpEndOffset;
3861     unsigned OverflowOffset = AMD64FpEndOffset;
3862     const DataLayout &DL = F.getParent()->getDataLayout();
3863     for (auto ArgIt = CB.arg_begin(), End = CB.arg_end(); ArgIt != End;
3864          ++ArgIt) {
3865       Value *A = *ArgIt;
3866       unsigned ArgNo = CB.getArgOperandNo(ArgIt);
3867       bool IsFixed = ArgNo < CB.getFunctionType()->getNumParams();
3868       bool IsByVal = CB.paramHasAttr(ArgNo, Attribute::ByVal);
3869       if (IsByVal) {
3870         // ByVal arguments always go to the overflow area.
3871         // Fixed arguments passed through the overflow area will be stepped
3872         // over by va_start, so don't count them towards the offset.
3873         if (IsFixed)
3874           continue;
3875         assert(A->getType()->isPointerTy());
3876         Type *RealTy = CB.getParamByValType(ArgNo);
3877         uint64_t ArgSize = DL.getTypeAllocSize(RealTy);
3878         Value *ShadowBase = getShadowPtrForVAArgument(
3879             RealTy, IRB, OverflowOffset, alignTo(ArgSize, 8));
3880         Value *OriginBase = nullptr;
3881         if (MS.TrackOrigins)
3882           OriginBase = getOriginPtrForVAArgument(RealTy, IRB, OverflowOffset);
3883         OverflowOffset += alignTo(ArgSize, 8);
3884         if (!ShadowBase)
3885           continue;
3886         Value *ShadowPtr, *OriginPtr;
3887         std::tie(ShadowPtr, OriginPtr) =
3888             MSV.getShadowOriginPtr(A, IRB, IRB.getInt8Ty(), kShadowTLSAlignment,
3889                                    /*isStore*/ false);
3890 
3891         IRB.CreateMemCpy(ShadowBase, kShadowTLSAlignment, ShadowPtr,
3892                          kShadowTLSAlignment, ArgSize);
3893         if (MS.TrackOrigins)
3894           IRB.CreateMemCpy(OriginBase, kShadowTLSAlignment, OriginPtr,
3895                            kShadowTLSAlignment, ArgSize);
3896       } else {
3897         ArgKind AK = classifyArgument(A);
3898         if (AK == AK_GeneralPurpose && GpOffset >= AMD64GpEndOffset)
3899           AK = AK_Memory;
3900         if (AK == AK_FloatingPoint && FpOffset >= AMD64FpEndOffset)
3901           AK = AK_Memory;
3902         Value *ShadowBase, *OriginBase = nullptr;
3903         switch (AK) {
3904           case AK_GeneralPurpose:
3905             ShadowBase =
3906                 getShadowPtrForVAArgument(A->getType(), IRB, GpOffset, 8);
3907             if (MS.TrackOrigins)
3908               OriginBase =
3909                   getOriginPtrForVAArgument(A->getType(), IRB, GpOffset);
3910             GpOffset += 8;
3911             break;
3912           case AK_FloatingPoint:
3913             ShadowBase =
3914                 getShadowPtrForVAArgument(A->getType(), IRB, FpOffset, 16);
3915             if (MS.TrackOrigins)
3916               OriginBase =
3917                   getOriginPtrForVAArgument(A->getType(), IRB, FpOffset);
3918             FpOffset += 16;
3919             break;
3920           case AK_Memory:
3921             if (IsFixed)
3922               continue;
3923             uint64_t ArgSize = DL.getTypeAllocSize(A->getType());
3924             ShadowBase =
3925                 getShadowPtrForVAArgument(A->getType(), IRB, OverflowOffset, 8);
3926             if (MS.TrackOrigins)
3927               OriginBase =
3928                   getOriginPtrForVAArgument(A->getType(), IRB, OverflowOffset);
3929             OverflowOffset += alignTo(ArgSize, 8);
3930         }
3931         // Take fixed arguments into account for GpOffset and FpOffset,
3932         // but don't actually store shadows for them.
3933         // TODO(glider): don't call get*PtrForVAArgument() for them.
3934         if (IsFixed)
3935           continue;
3936         if (!ShadowBase)
3937           continue;
3938         Value *Shadow = MSV.getShadow(A);
3939         IRB.CreateAlignedStore(Shadow, ShadowBase, kShadowTLSAlignment);
3940         if (MS.TrackOrigins) {
3941           Value *Origin = MSV.getOrigin(A);
3942           unsigned StoreSize = DL.getTypeStoreSize(Shadow->getType());
3943           MSV.paintOrigin(IRB, Origin, OriginBase, StoreSize,
3944                           std::max(kShadowTLSAlignment, kMinOriginAlignment));
3945         }
3946       }
3947     }
3948     Constant *OverflowSize =
3949       ConstantInt::get(IRB.getInt64Ty(), OverflowOffset - AMD64FpEndOffset);
3950     IRB.CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS);
3951   }
3952 
3953   /// Compute the shadow address for a given va_arg.
3954   Value *getShadowPtrForVAArgument(Type *Ty, IRBuilder<> &IRB,
3955                                    unsigned ArgOffset, unsigned ArgSize) {
3956     // Make sure we don't overflow __msan_va_arg_tls.
3957     if (ArgOffset + ArgSize > kParamTLSSize)
3958       return nullptr;
3959     Value *Base = IRB.CreatePointerCast(MS.VAArgTLS, MS.IntptrTy);
3960     Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
3961     return IRB.CreateIntToPtr(Base, PointerType::get(MSV.getShadowTy(Ty), 0),
3962                               "_msarg_va_s");
3963   }
3964 
3965   /// Compute the origin address for a given va_arg.
3966   Value *getOriginPtrForVAArgument(Type *Ty, IRBuilder<> &IRB, int ArgOffset) {
3967     Value *Base = IRB.CreatePointerCast(MS.VAArgOriginTLS, MS.IntptrTy);
3968     // getOriginPtrForVAArgument() is always called after
3969     // getShadowPtrForVAArgument(), so __msan_va_arg_origin_tls can never
3970     // overflow.
3971     Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
3972     return IRB.CreateIntToPtr(Base, PointerType::get(MS.OriginTy, 0),
3973                               "_msarg_va_o");
3974   }
3975 
3976   void unpoisonVAListTagForInst(IntrinsicInst &I) {
3977     IRBuilder<> IRB(&I);
3978     Value *VAListTag = I.getArgOperand(0);
3979     Value *ShadowPtr, *OriginPtr;
3980     const Align Alignment = Align(8);
3981     std::tie(ShadowPtr, OriginPtr) =
3982         MSV.getShadowOriginPtr(VAListTag, IRB, IRB.getInt8Ty(), Alignment,
3983                                /*isStore*/ true);
3984 
3985     // Unpoison the whole __va_list_tag.
3986     // FIXME: magic ABI constants.
3987     IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
3988                      /* size */ 24, Alignment, false);
3989     // We shouldn't need to zero out the origins, as they're only checked for
3990     // nonzero shadow.
3991   }
3992 
3993   void visitVAStartInst(VAStartInst &I) override {
3994     if (F.getCallingConv() == CallingConv::Win64)
3995       return;
3996     VAStartInstrumentationList.push_back(&I);
3997     unpoisonVAListTagForInst(I);
3998   }
3999 
4000   void visitVACopyInst(VACopyInst &I) override {
4001     if (F.getCallingConv() == CallingConv::Win64) return;
4002     unpoisonVAListTagForInst(I);
4003   }
4004 
4005   void finalizeInstrumentation() override {
4006     assert(!VAArgOverflowSize && !VAArgTLSCopy &&
4007            "finalizeInstrumentation called twice");
4008     if (!VAStartInstrumentationList.empty()) {
4009       // If there is a va_start in this function, make a backup copy of
4010       // va_arg_tls somewhere in the function entry block.
4011       IRBuilder<> IRB(MSV.ActualFnStart->getFirstNonPHI());
4012       VAArgOverflowSize =
4013           IRB.CreateLoad(IRB.getInt64Ty(), MS.VAArgOverflowSizeTLS);
4014       Value *CopySize =
4015         IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, AMD64FpEndOffset),
4016                       VAArgOverflowSize);
4017       VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
4018       IRB.CreateMemCpy(VAArgTLSCopy, Align(8), MS.VAArgTLS, Align(8), CopySize);
4019       if (MS.TrackOrigins) {
4020         VAArgTLSOriginCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
4021         IRB.CreateMemCpy(VAArgTLSOriginCopy, Align(8), MS.VAArgOriginTLS,
4022                          Align(8), CopySize);
4023       }
4024     }
4025 
4026     // Instrument va_start.
4027     // Copy va_list shadow from the backup copy of the TLS contents.
4028     for (size_t i = 0, n = VAStartInstrumentationList.size(); i < n; i++) {
4029       CallInst *OrigInst = VAStartInstrumentationList[i];
4030       IRBuilder<> IRB(OrigInst->getNextNode());
4031       Value *VAListTag = OrigInst->getArgOperand(0);
4032 
4033       Type *RegSaveAreaPtrTy = Type::getInt64PtrTy(*MS.C);
4034       Value *RegSaveAreaPtrPtr = IRB.CreateIntToPtr(
4035           IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
4036                         ConstantInt::get(MS.IntptrTy, 16)),
4037           PointerType::get(RegSaveAreaPtrTy, 0));
4038       Value *RegSaveAreaPtr =
4039           IRB.CreateLoad(RegSaveAreaPtrTy, RegSaveAreaPtrPtr);
4040       Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
4041       const Align Alignment = Align(16);
4042       std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
4043           MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.getInt8Ty(),
4044                                  Alignment, /*isStore*/ true);
4045       IRB.CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
4046                        AMD64FpEndOffset);
4047       if (MS.TrackOrigins)
4048         IRB.CreateMemCpy(RegSaveAreaOriginPtr, Alignment, VAArgTLSOriginCopy,
4049                          Alignment, AMD64FpEndOffset);
4050       Type *OverflowArgAreaPtrTy = Type::getInt64PtrTy(*MS.C);
4051       Value *OverflowArgAreaPtrPtr = IRB.CreateIntToPtr(
4052           IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
4053                         ConstantInt::get(MS.IntptrTy, 8)),
4054           PointerType::get(OverflowArgAreaPtrTy, 0));
4055       Value *OverflowArgAreaPtr =
4056           IRB.CreateLoad(OverflowArgAreaPtrTy, OverflowArgAreaPtrPtr);
4057       Value *OverflowArgAreaShadowPtr, *OverflowArgAreaOriginPtr;
4058       std::tie(OverflowArgAreaShadowPtr, OverflowArgAreaOriginPtr) =
4059           MSV.getShadowOriginPtr(OverflowArgAreaPtr, IRB, IRB.getInt8Ty(),
4060                                  Alignment, /*isStore*/ true);
4061       Value *SrcPtr = IRB.CreateConstGEP1_32(IRB.getInt8Ty(), VAArgTLSCopy,
4062                                              AMD64FpEndOffset);
4063       IRB.CreateMemCpy(OverflowArgAreaShadowPtr, Alignment, SrcPtr, Alignment,
4064                        VAArgOverflowSize);
4065       if (MS.TrackOrigins) {
4066         SrcPtr = IRB.CreateConstGEP1_32(IRB.getInt8Ty(), VAArgTLSOriginCopy,
4067                                         AMD64FpEndOffset);
4068         IRB.CreateMemCpy(OverflowArgAreaOriginPtr, Alignment, SrcPtr, Alignment,
4069                          VAArgOverflowSize);
4070       }
4071     }
4072   }
4073 };
4074 
4075 /// MIPS64-specific implementation of VarArgHelper.
4076 struct VarArgMIPS64Helper : public VarArgHelper {
4077   Function &F;
4078   MemorySanitizer &MS;
4079   MemorySanitizerVisitor &MSV;
4080   Value *VAArgTLSCopy = nullptr;
4081   Value *VAArgSize = nullptr;
4082 
4083   SmallVector<CallInst*, 16> VAStartInstrumentationList;
4084 
4085   VarArgMIPS64Helper(Function &F, MemorySanitizer &MS,
4086                     MemorySanitizerVisitor &MSV) : F(F), MS(MS), MSV(MSV) {}
4087 
4088   void visitCallBase(CallBase &CB, IRBuilder<> &IRB) override {
4089     unsigned VAArgOffset = 0;
4090     const DataLayout &DL = F.getParent()->getDataLayout();
4091     for (auto ArgIt = CB.arg_begin() + CB.getFunctionType()->getNumParams(),
4092               End = CB.arg_end();
4093          ArgIt != End; ++ArgIt) {
4094       Triple TargetTriple(F.getParent()->getTargetTriple());
4095       Value *A = *ArgIt;
4096       Value *Base;
4097       uint64_t ArgSize = DL.getTypeAllocSize(A->getType());
4098       if (TargetTriple.getArch() == Triple::mips64) {
4099         // Adjusting the shadow for argument with size < 8 to match the placement
4100         // of bits in big endian system
4101         if (ArgSize < 8)
4102           VAArgOffset += (8 - ArgSize);
4103       }
4104       Base = getShadowPtrForVAArgument(A->getType(), IRB, VAArgOffset, ArgSize);
4105       VAArgOffset += ArgSize;
4106       VAArgOffset = alignTo(VAArgOffset, 8);
4107       if (!Base)
4108         continue;
4109       IRB.CreateAlignedStore(MSV.getShadow(A), Base, kShadowTLSAlignment);
4110     }
4111 
4112     Constant *TotalVAArgSize = ConstantInt::get(IRB.getInt64Ty(), VAArgOffset);
4113     // Here using VAArgOverflowSizeTLS as VAArgSizeTLS to avoid creation of
4114     // a new class member i.e. it is the total size of all VarArgs.
4115     IRB.CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS);
4116   }
4117 
4118   /// Compute the shadow address for a given va_arg.
4119   Value *getShadowPtrForVAArgument(Type *Ty, IRBuilder<> &IRB,
4120                                    unsigned ArgOffset, unsigned ArgSize) {
4121     // Make sure we don't overflow __msan_va_arg_tls.
4122     if (ArgOffset + ArgSize > kParamTLSSize)
4123       return nullptr;
4124     Value *Base = IRB.CreatePointerCast(MS.VAArgTLS, MS.IntptrTy);
4125     Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
4126     return IRB.CreateIntToPtr(Base, PointerType::get(MSV.getShadowTy(Ty), 0),
4127                               "_msarg");
4128   }
4129 
4130   void visitVAStartInst(VAStartInst &I) override {
4131     IRBuilder<> IRB(&I);
4132     VAStartInstrumentationList.push_back(&I);
4133     Value *VAListTag = I.getArgOperand(0);
4134     Value *ShadowPtr, *OriginPtr;
4135     const Align Alignment = Align(8);
4136     std::tie(ShadowPtr, OriginPtr) = MSV.getShadowOriginPtr(
4137         VAListTag, IRB, IRB.getInt8Ty(), Alignment, /*isStore*/ true);
4138     IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
4139                      /* size */ 8, Alignment, false);
4140   }
4141 
4142   void visitVACopyInst(VACopyInst &I) override {
4143     IRBuilder<> IRB(&I);
4144     VAStartInstrumentationList.push_back(&I);
4145     Value *VAListTag = I.getArgOperand(0);
4146     Value *ShadowPtr, *OriginPtr;
4147     const Align Alignment = Align(8);
4148     std::tie(ShadowPtr, OriginPtr) = MSV.getShadowOriginPtr(
4149         VAListTag, IRB, IRB.getInt8Ty(), Alignment, /*isStore*/ true);
4150     IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
4151                      /* size */ 8, Alignment, false);
4152   }
4153 
4154   void finalizeInstrumentation() override {
4155     assert(!VAArgSize && !VAArgTLSCopy &&
4156            "finalizeInstrumentation called twice");
4157     IRBuilder<> IRB(MSV.ActualFnStart->getFirstNonPHI());
4158     VAArgSize = IRB.CreateLoad(IRB.getInt64Ty(), MS.VAArgOverflowSizeTLS);
4159     Value *CopySize = IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, 0),
4160                                     VAArgSize);
4161 
4162     if (!VAStartInstrumentationList.empty()) {
4163       // If there is a va_start in this function, make a backup copy of
4164       // va_arg_tls somewhere in the function entry block.
4165       VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
4166       IRB.CreateMemCpy(VAArgTLSCopy, Align(8), MS.VAArgTLS, Align(8), CopySize);
4167     }
4168 
4169     // Instrument va_start.
4170     // Copy va_list shadow from the backup copy of the TLS contents.
4171     for (size_t i = 0, n = VAStartInstrumentationList.size(); i < n; i++) {
4172       CallInst *OrigInst = VAStartInstrumentationList[i];
4173       IRBuilder<> IRB(OrigInst->getNextNode());
4174       Value *VAListTag = OrigInst->getArgOperand(0);
4175       Type *RegSaveAreaPtrTy = Type::getInt64PtrTy(*MS.C);
4176       Value *RegSaveAreaPtrPtr =
4177           IRB.CreateIntToPtr(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
4178                              PointerType::get(RegSaveAreaPtrTy, 0));
4179       Value *RegSaveAreaPtr =
4180           IRB.CreateLoad(RegSaveAreaPtrTy, RegSaveAreaPtrPtr);
4181       Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
4182       const Align Alignment = Align(8);
4183       std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
4184           MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.getInt8Ty(),
4185                                  Alignment, /*isStore*/ true);
4186       IRB.CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
4187                        CopySize);
4188     }
4189   }
4190 };
4191 
4192 /// AArch64-specific implementation of VarArgHelper.
4193 struct VarArgAArch64Helper : public VarArgHelper {
4194   static const unsigned kAArch64GrArgSize = 64;
4195   static const unsigned kAArch64VrArgSize = 128;
4196 
4197   static const unsigned AArch64GrBegOffset = 0;
4198   static const unsigned AArch64GrEndOffset = kAArch64GrArgSize;
4199   // Make VR space aligned to 16 bytes.
4200   static const unsigned AArch64VrBegOffset = AArch64GrEndOffset;
4201   static const unsigned AArch64VrEndOffset = AArch64VrBegOffset
4202                                              + kAArch64VrArgSize;
4203   static const unsigned AArch64VAEndOffset = AArch64VrEndOffset;
4204 
4205   Function &F;
4206   MemorySanitizer &MS;
4207   MemorySanitizerVisitor &MSV;
4208   Value *VAArgTLSCopy = nullptr;
4209   Value *VAArgOverflowSize = nullptr;
4210 
4211   SmallVector<CallInst*, 16> VAStartInstrumentationList;
4212 
4213   enum ArgKind { AK_GeneralPurpose, AK_FloatingPoint, AK_Memory };
4214 
4215   VarArgAArch64Helper(Function &F, MemorySanitizer &MS,
4216                     MemorySanitizerVisitor &MSV) : F(F), MS(MS), MSV(MSV) {}
4217 
4218   ArgKind classifyArgument(Value* arg) {
4219     Type *T = arg->getType();
4220     if (T->isFPOrFPVectorTy())
4221       return AK_FloatingPoint;
4222     if ((T->isIntegerTy() && T->getPrimitiveSizeInBits() <= 64)
4223         || (T->isPointerTy()))
4224       return AK_GeneralPurpose;
4225     return AK_Memory;
4226   }
4227 
4228   // The instrumentation stores the argument shadow in a non ABI-specific
4229   // format because it does not know which argument is named (since Clang,
4230   // like x86_64 case, lowers the va_args in the frontend and this pass only
4231   // sees the low level code that deals with va_list internals).
4232   // The first seven GR registers are saved in the first 56 bytes of the
4233   // va_arg tls arra, followers by the first 8 FP/SIMD registers, and then
4234   // the remaining arguments.
4235   // Using constant offset within the va_arg TLS array allows fast copy
4236   // in the finalize instrumentation.
4237   void visitCallBase(CallBase &CB, IRBuilder<> &IRB) override {
4238     unsigned GrOffset = AArch64GrBegOffset;
4239     unsigned VrOffset = AArch64VrBegOffset;
4240     unsigned OverflowOffset = AArch64VAEndOffset;
4241 
4242     const DataLayout &DL = F.getParent()->getDataLayout();
4243     for (auto ArgIt = CB.arg_begin(), End = CB.arg_end(); ArgIt != End;
4244          ++ArgIt) {
4245       Value *A = *ArgIt;
4246       unsigned ArgNo = CB.getArgOperandNo(ArgIt);
4247       bool IsFixed = ArgNo < CB.getFunctionType()->getNumParams();
4248       ArgKind AK = classifyArgument(A);
4249       if (AK == AK_GeneralPurpose && GrOffset >= AArch64GrEndOffset)
4250         AK = AK_Memory;
4251       if (AK == AK_FloatingPoint && VrOffset >= AArch64VrEndOffset)
4252         AK = AK_Memory;
4253       Value *Base;
4254       switch (AK) {
4255         case AK_GeneralPurpose:
4256           Base = getShadowPtrForVAArgument(A->getType(), IRB, GrOffset, 8);
4257           GrOffset += 8;
4258           break;
4259         case AK_FloatingPoint:
4260           Base = getShadowPtrForVAArgument(A->getType(), IRB, VrOffset, 8);
4261           VrOffset += 16;
4262           break;
4263         case AK_Memory:
4264           // Don't count fixed arguments in the overflow area - va_start will
4265           // skip right over them.
4266           if (IsFixed)
4267             continue;
4268           uint64_t ArgSize = DL.getTypeAllocSize(A->getType());
4269           Base = getShadowPtrForVAArgument(A->getType(), IRB, OverflowOffset,
4270                                            alignTo(ArgSize, 8));
4271           OverflowOffset += alignTo(ArgSize, 8);
4272           break;
4273       }
4274       // Count Gp/Vr fixed arguments to their respective offsets, but don't
4275       // bother to actually store a shadow.
4276       if (IsFixed)
4277         continue;
4278       if (!Base)
4279         continue;
4280       IRB.CreateAlignedStore(MSV.getShadow(A), Base, kShadowTLSAlignment);
4281     }
4282     Constant *OverflowSize =
4283       ConstantInt::get(IRB.getInt64Ty(), OverflowOffset - AArch64VAEndOffset);
4284     IRB.CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS);
4285   }
4286 
4287   /// Compute the shadow address for a given va_arg.
4288   Value *getShadowPtrForVAArgument(Type *Ty, IRBuilder<> &IRB,
4289                                    unsigned ArgOffset, unsigned ArgSize) {
4290     // Make sure we don't overflow __msan_va_arg_tls.
4291     if (ArgOffset + ArgSize > kParamTLSSize)
4292       return nullptr;
4293     Value *Base = IRB.CreatePointerCast(MS.VAArgTLS, MS.IntptrTy);
4294     Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
4295     return IRB.CreateIntToPtr(Base, PointerType::get(MSV.getShadowTy(Ty), 0),
4296                               "_msarg");
4297   }
4298 
4299   void visitVAStartInst(VAStartInst &I) override {
4300     IRBuilder<> IRB(&I);
4301     VAStartInstrumentationList.push_back(&I);
4302     Value *VAListTag = I.getArgOperand(0);
4303     Value *ShadowPtr, *OriginPtr;
4304     const Align Alignment = Align(8);
4305     std::tie(ShadowPtr, OriginPtr) = MSV.getShadowOriginPtr(
4306         VAListTag, IRB, IRB.getInt8Ty(), Alignment, /*isStore*/ true);
4307     IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
4308                      /* size */ 32, Alignment, false);
4309   }
4310 
4311   void visitVACopyInst(VACopyInst &I) override {
4312     IRBuilder<> IRB(&I);
4313     VAStartInstrumentationList.push_back(&I);
4314     Value *VAListTag = I.getArgOperand(0);
4315     Value *ShadowPtr, *OriginPtr;
4316     const Align Alignment = Align(8);
4317     std::tie(ShadowPtr, OriginPtr) = MSV.getShadowOriginPtr(
4318         VAListTag, IRB, IRB.getInt8Ty(), Alignment, /*isStore*/ true);
4319     IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
4320                      /* size */ 32, Alignment, false);
4321   }
4322 
4323   // Retrieve a va_list field of 'void*' size.
4324   Value* getVAField64(IRBuilder<> &IRB, Value *VAListTag, int offset) {
4325     Value *SaveAreaPtrPtr =
4326       IRB.CreateIntToPtr(
4327         IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
4328                       ConstantInt::get(MS.IntptrTy, offset)),
4329         Type::getInt64PtrTy(*MS.C));
4330     return IRB.CreateLoad(Type::getInt64Ty(*MS.C), SaveAreaPtrPtr);
4331   }
4332 
4333   // Retrieve a va_list field of 'int' size.
4334   Value* getVAField32(IRBuilder<> &IRB, Value *VAListTag, int offset) {
4335     Value *SaveAreaPtr =
4336       IRB.CreateIntToPtr(
4337         IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
4338                       ConstantInt::get(MS.IntptrTy, offset)),
4339         Type::getInt32PtrTy(*MS.C));
4340     Value *SaveArea32 = IRB.CreateLoad(IRB.getInt32Ty(), SaveAreaPtr);
4341     return IRB.CreateSExt(SaveArea32, MS.IntptrTy);
4342   }
4343 
4344   void finalizeInstrumentation() override {
4345     assert(!VAArgOverflowSize && !VAArgTLSCopy &&
4346            "finalizeInstrumentation called twice");
4347     if (!VAStartInstrumentationList.empty()) {
4348       // If there is a va_start in this function, make a backup copy of
4349       // va_arg_tls somewhere in the function entry block.
4350       IRBuilder<> IRB(MSV.ActualFnStart->getFirstNonPHI());
4351       VAArgOverflowSize =
4352           IRB.CreateLoad(IRB.getInt64Ty(), MS.VAArgOverflowSizeTLS);
4353       Value *CopySize =
4354         IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, AArch64VAEndOffset),
4355                       VAArgOverflowSize);
4356       VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
4357       IRB.CreateMemCpy(VAArgTLSCopy, Align(8), MS.VAArgTLS, Align(8), CopySize);
4358     }
4359 
4360     Value *GrArgSize = ConstantInt::get(MS.IntptrTy, kAArch64GrArgSize);
4361     Value *VrArgSize = ConstantInt::get(MS.IntptrTy, kAArch64VrArgSize);
4362 
4363     // Instrument va_start, copy va_list shadow from the backup copy of
4364     // the TLS contents.
4365     for (size_t i = 0, n = VAStartInstrumentationList.size(); i < n; i++) {
4366       CallInst *OrigInst = VAStartInstrumentationList[i];
4367       IRBuilder<> IRB(OrigInst->getNextNode());
4368 
4369       Value *VAListTag = OrigInst->getArgOperand(0);
4370 
4371       // The variadic ABI for AArch64 creates two areas to save the incoming
4372       // argument registers (one for 64-bit general register xn-x7 and another
4373       // for 128-bit FP/SIMD vn-v7).
4374       // We need then to propagate the shadow arguments on both regions
4375       // 'va::__gr_top + va::__gr_offs' and 'va::__vr_top + va::__vr_offs'.
4376       // The remaining arguments are saved on shadow for 'va::stack'.
4377       // One caveat is it requires only to propagate the non-named arguments,
4378       // however on the call site instrumentation 'all' the arguments are
4379       // saved. So to copy the shadow values from the va_arg TLS array
4380       // we need to adjust the offset for both GR and VR fields based on
4381       // the __{gr,vr}_offs value (since they are stores based on incoming
4382       // named arguments).
4383 
4384       // Read the stack pointer from the va_list.
4385       Value *StackSaveAreaPtr = getVAField64(IRB, VAListTag, 0);
4386 
4387       // Read both the __gr_top and __gr_off and add them up.
4388       Value *GrTopSaveAreaPtr = getVAField64(IRB, VAListTag, 8);
4389       Value *GrOffSaveArea = getVAField32(IRB, VAListTag, 24);
4390 
4391       Value *GrRegSaveAreaPtr = IRB.CreateAdd(GrTopSaveAreaPtr, GrOffSaveArea);
4392 
4393       // Read both the __vr_top and __vr_off and add them up.
4394       Value *VrTopSaveAreaPtr = getVAField64(IRB, VAListTag, 16);
4395       Value *VrOffSaveArea = getVAField32(IRB, VAListTag, 28);
4396 
4397       Value *VrRegSaveAreaPtr = IRB.CreateAdd(VrTopSaveAreaPtr, VrOffSaveArea);
4398 
4399       // It does not know how many named arguments is being used and, on the
4400       // callsite all the arguments were saved.  Since __gr_off is defined as
4401       // '0 - ((8 - named_gr) * 8)', the idea is to just propagate the variadic
4402       // argument by ignoring the bytes of shadow from named arguments.
4403       Value *GrRegSaveAreaShadowPtrOff =
4404         IRB.CreateAdd(GrArgSize, GrOffSaveArea);
4405 
4406       Value *GrRegSaveAreaShadowPtr =
4407           MSV.getShadowOriginPtr(GrRegSaveAreaPtr, IRB, IRB.getInt8Ty(),
4408                                  Align(8), /*isStore*/ true)
4409               .first;
4410 
4411       Value *GrSrcPtr = IRB.CreateInBoundsGEP(IRB.getInt8Ty(), VAArgTLSCopy,
4412                                               GrRegSaveAreaShadowPtrOff);
4413       Value *GrCopySize = IRB.CreateSub(GrArgSize, GrRegSaveAreaShadowPtrOff);
4414 
4415       IRB.CreateMemCpy(GrRegSaveAreaShadowPtr, Align(8), GrSrcPtr, Align(8),
4416                        GrCopySize);
4417 
4418       // Again, but for FP/SIMD values.
4419       Value *VrRegSaveAreaShadowPtrOff =
4420           IRB.CreateAdd(VrArgSize, VrOffSaveArea);
4421 
4422       Value *VrRegSaveAreaShadowPtr =
4423           MSV.getShadowOriginPtr(VrRegSaveAreaPtr, IRB, IRB.getInt8Ty(),
4424                                  Align(8), /*isStore*/ true)
4425               .first;
4426 
4427       Value *VrSrcPtr = IRB.CreateInBoundsGEP(
4428         IRB.getInt8Ty(),
4429         IRB.CreateInBoundsGEP(IRB.getInt8Ty(), VAArgTLSCopy,
4430                               IRB.getInt32(AArch64VrBegOffset)),
4431         VrRegSaveAreaShadowPtrOff);
4432       Value *VrCopySize = IRB.CreateSub(VrArgSize, VrRegSaveAreaShadowPtrOff);
4433 
4434       IRB.CreateMemCpy(VrRegSaveAreaShadowPtr, Align(8), VrSrcPtr, Align(8),
4435                        VrCopySize);
4436 
4437       // And finally for remaining arguments.
4438       Value *StackSaveAreaShadowPtr =
4439           MSV.getShadowOriginPtr(StackSaveAreaPtr, IRB, IRB.getInt8Ty(),
4440                                  Align(16), /*isStore*/ true)
4441               .first;
4442 
4443       Value *StackSrcPtr =
4444         IRB.CreateInBoundsGEP(IRB.getInt8Ty(), VAArgTLSCopy,
4445                               IRB.getInt32(AArch64VAEndOffset));
4446 
4447       IRB.CreateMemCpy(StackSaveAreaShadowPtr, Align(16), StackSrcPtr,
4448                        Align(16), VAArgOverflowSize);
4449     }
4450   }
4451 };
4452 
4453 /// PowerPC64-specific implementation of VarArgHelper.
4454 struct VarArgPowerPC64Helper : public VarArgHelper {
4455   Function &F;
4456   MemorySanitizer &MS;
4457   MemorySanitizerVisitor &MSV;
4458   Value *VAArgTLSCopy = nullptr;
4459   Value *VAArgSize = nullptr;
4460 
4461   SmallVector<CallInst*, 16> VAStartInstrumentationList;
4462 
4463   VarArgPowerPC64Helper(Function &F, MemorySanitizer &MS,
4464                     MemorySanitizerVisitor &MSV) : F(F), MS(MS), MSV(MSV) {}
4465 
4466   void visitCallBase(CallBase &CB, IRBuilder<> &IRB) override {
4467     // For PowerPC, we need to deal with alignment of stack arguments -
4468     // they are mostly aligned to 8 bytes, but vectors and i128 arrays
4469     // are aligned to 16 bytes, byvals can be aligned to 8 or 16 bytes,
4470     // and QPX vectors are aligned to 32 bytes.  For that reason, we
4471     // compute current offset from stack pointer (which is always properly
4472     // aligned), and offset for the first vararg, then subtract them.
4473     unsigned VAArgBase;
4474     Triple TargetTriple(F.getParent()->getTargetTriple());
4475     // Parameter save area starts at 48 bytes from frame pointer for ABIv1,
4476     // and 32 bytes for ABIv2.  This is usually determined by target
4477     // endianness, but in theory could be overridden by function attribute.
4478     // For simplicity, we ignore it here (it'd only matter for QPX vectors).
4479     if (TargetTriple.getArch() == Triple::ppc64)
4480       VAArgBase = 48;
4481     else
4482       VAArgBase = 32;
4483     unsigned VAArgOffset = VAArgBase;
4484     const DataLayout &DL = F.getParent()->getDataLayout();
4485     for (auto ArgIt = CB.arg_begin(), End = CB.arg_end(); ArgIt != End;
4486          ++ArgIt) {
4487       Value *A = *ArgIt;
4488       unsigned ArgNo = CB.getArgOperandNo(ArgIt);
4489       bool IsFixed = ArgNo < CB.getFunctionType()->getNumParams();
4490       bool IsByVal = CB.paramHasAttr(ArgNo, Attribute::ByVal);
4491       if (IsByVal) {
4492         assert(A->getType()->isPointerTy());
4493         Type *RealTy = CB.getParamByValType(ArgNo);
4494         uint64_t ArgSize = DL.getTypeAllocSize(RealTy);
4495         MaybeAlign ArgAlign = CB.getParamAlign(ArgNo);
4496         if (!ArgAlign || *ArgAlign < Align(8))
4497           ArgAlign = Align(8);
4498         VAArgOffset = alignTo(VAArgOffset, ArgAlign);
4499         if (!IsFixed) {
4500           Value *Base = getShadowPtrForVAArgument(
4501               RealTy, IRB, VAArgOffset - VAArgBase, ArgSize);
4502           if (Base) {
4503             Value *AShadowPtr, *AOriginPtr;
4504             std::tie(AShadowPtr, AOriginPtr) =
4505                 MSV.getShadowOriginPtr(A, IRB, IRB.getInt8Ty(),
4506                                        kShadowTLSAlignment, /*isStore*/ false);
4507 
4508             IRB.CreateMemCpy(Base, kShadowTLSAlignment, AShadowPtr,
4509                              kShadowTLSAlignment, ArgSize);
4510           }
4511         }
4512         VAArgOffset += alignTo(ArgSize, 8);
4513       } else {
4514         Value *Base;
4515         uint64_t ArgSize = DL.getTypeAllocSize(A->getType());
4516         uint64_t ArgAlign = 8;
4517         if (A->getType()->isArrayTy()) {
4518           // Arrays are aligned to element size, except for long double
4519           // arrays, which are aligned to 8 bytes.
4520           Type *ElementTy = A->getType()->getArrayElementType();
4521           if (!ElementTy->isPPC_FP128Ty())
4522             ArgAlign = DL.getTypeAllocSize(ElementTy);
4523         } else if (A->getType()->isVectorTy()) {
4524           // Vectors are naturally aligned.
4525           ArgAlign = DL.getTypeAllocSize(A->getType());
4526         }
4527         if (ArgAlign < 8)
4528           ArgAlign = 8;
4529         VAArgOffset = alignTo(VAArgOffset, ArgAlign);
4530         if (DL.isBigEndian()) {
4531           // Adjusting the shadow for argument with size < 8 to match the placement
4532           // of bits in big endian system
4533           if (ArgSize < 8)
4534             VAArgOffset += (8 - ArgSize);
4535         }
4536         if (!IsFixed) {
4537           Base = getShadowPtrForVAArgument(A->getType(), IRB,
4538                                            VAArgOffset - VAArgBase, ArgSize);
4539           if (Base)
4540             IRB.CreateAlignedStore(MSV.getShadow(A), Base, kShadowTLSAlignment);
4541         }
4542         VAArgOffset += ArgSize;
4543         VAArgOffset = alignTo(VAArgOffset, 8);
4544       }
4545       if (IsFixed)
4546         VAArgBase = VAArgOffset;
4547     }
4548 
4549     Constant *TotalVAArgSize = ConstantInt::get(IRB.getInt64Ty(),
4550                                                 VAArgOffset - VAArgBase);
4551     // Here using VAArgOverflowSizeTLS as VAArgSizeTLS to avoid creation of
4552     // a new class member i.e. it is the total size of all VarArgs.
4553     IRB.CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS);
4554   }
4555 
4556   /// Compute the shadow address for a given va_arg.
4557   Value *getShadowPtrForVAArgument(Type *Ty, IRBuilder<> &IRB,
4558                                    unsigned ArgOffset, unsigned ArgSize) {
4559     // Make sure we don't overflow __msan_va_arg_tls.
4560     if (ArgOffset + ArgSize > kParamTLSSize)
4561       return nullptr;
4562     Value *Base = IRB.CreatePointerCast(MS.VAArgTLS, MS.IntptrTy);
4563     Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
4564     return IRB.CreateIntToPtr(Base, PointerType::get(MSV.getShadowTy(Ty), 0),
4565                               "_msarg");
4566   }
4567 
4568   void visitVAStartInst(VAStartInst &I) override {
4569     IRBuilder<> IRB(&I);
4570     VAStartInstrumentationList.push_back(&I);
4571     Value *VAListTag = I.getArgOperand(0);
4572     Value *ShadowPtr, *OriginPtr;
4573     const Align Alignment = Align(8);
4574     std::tie(ShadowPtr, OriginPtr) = MSV.getShadowOriginPtr(
4575         VAListTag, IRB, IRB.getInt8Ty(), Alignment, /*isStore*/ true);
4576     IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
4577                      /* size */ 8, Alignment, false);
4578   }
4579 
4580   void visitVACopyInst(VACopyInst &I) override {
4581     IRBuilder<> IRB(&I);
4582     Value *VAListTag = I.getArgOperand(0);
4583     Value *ShadowPtr, *OriginPtr;
4584     const Align Alignment = Align(8);
4585     std::tie(ShadowPtr, OriginPtr) = MSV.getShadowOriginPtr(
4586         VAListTag, IRB, IRB.getInt8Ty(), Alignment, /*isStore*/ true);
4587     // Unpoison the whole __va_list_tag.
4588     // FIXME: magic ABI constants.
4589     IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
4590                      /* size */ 8, Alignment, false);
4591   }
4592 
4593   void finalizeInstrumentation() override {
4594     assert(!VAArgSize && !VAArgTLSCopy &&
4595            "finalizeInstrumentation called twice");
4596     IRBuilder<> IRB(MSV.ActualFnStart->getFirstNonPHI());
4597     VAArgSize = IRB.CreateLoad(IRB.getInt64Ty(), MS.VAArgOverflowSizeTLS);
4598     Value *CopySize = IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, 0),
4599                                     VAArgSize);
4600 
4601     if (!VAStartInstrumentationList.empty()) {
4602       // If there is a va_start in this function, make a backup copy of
4603       // va_arg_tls somewhere in the function entry block.
4604       VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
4605       IRB.CreateMemCpy(VAArgTLSCopy, Align(8), MS.VAArgTLS, Align(8), CopySize);
4606     }
4607 
4608     // Instrument va_start.
4609     // Copy va_list shadow from the backup copy of the TLS contents.
4610     for (size_t i = 0, n = VAStartInstrumentationList.size(); i < n; i++) {
4611       CallInst *OrigInst = VAStartInstrumentationList[i];
4612       IRBuilder<> IRB(OrigInst->getNextNode());
4613       Value *VAListTag = OrigInst->getArgOperand(0);
4614       Type *RegSaveAreaPtrTy = Type::getInt64PtrTy(*MS.C);
4615       Value *RegSaveAreaPtrPtr =
4616           IRB.CreateIntToPtr(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
4617                              PointerType::get(RegSaveAreaPtrTy, 0));
4618       Value *RegSaveAreaPtr =
4619           IRB.CreateLoad(RegSaveAreaPtrTy, RegSaveAreaPtrPtr);
4620       Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
4621       const Align Alignment = Align(8);
4622       std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
4623           MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.getInt8Ty(),
4624                                  Alignment, /*isStore*/ true);
4625       IRB.CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
4626                        CopySize);
4627     }
4628   }
4629 };
4630 
4631 /// SystemZ-specific implementation of VarArgHelper.
4632 struct VarArgSystemZHelper : public VarArgHelper {
4633   static const unsigned SystemZGpOffset = 16;
4634   static const unsigned SystemZGpEndOffset = 56;
4635   static const unsigned SystemZFpOffset = 128;
4636   static const unsigned SystemZFpEndOffset = 160;
4637   static const unsigned SystemZMaxVrArgs = 8;
4638   static const unsigned SystemZRegSaveAreaSize = 160;
4639   static const unsigned SystemZOverflowOffset = 160;
4640   static const unsigned SystemZVAListTagSize = 32;
4641   static const unsigned SystemZOverflowArgAreaPtrOffset = 16;
4642   static const unsigned SystemZRegSaveAreaPtrOffset = 24;
4643 
4644   Function &F;
4645   MemorySanitizer &MS;
4646   MemorySanitizerVisitor &MSV;
4647   Value *VAArgTLSCopy = nullptr;
4648   Value *VAArgTLSOriginCopy = nullptr;
4649   Value *VAArgOverflowSize = nullptr;
4650 
4651   SmallVector<CallInst *, 16> VAStartInstrumentationList;
4652 
4653   enum class ArgKind {
4654     GeneralPurpose,
4655     FloatingPoint,
4656     Vector,
4657     Memory,
4658     Indirect,
4659   };
4660 
4661   enum class ShadowExtension { None, Zero, Sign };
4662 
4663   VarArgSystemZHelper(Function &F, MemorySanitizer &MS,
4664                       MemorySanitizerVisitor &MSV)
4665       : F(F), MS(MS), MSV(MSV) {}
4666 
4667   ArgKind classifyArgument(Type *T, bool IsSoftFloatABI) {
4668     // T is a SystemZABIInfo::classifyArgumentType() output, and there are
4669     // only a few possibilities of what it can be. In particular, enums, single
4670     // element structs and large types have already been taken care of.
4671 
4672     // Some i128 and fp128 arguments are converted to pointers only in the
4673     // back end.
4674     if (T->isIntegerTy(128) || T->isFP128Ty())
4675       return ArgKind::Indirect;
4676     if (T->isFloatingPointTy())
4677       return IsSoftFloatABI ? ArgKind::GeneralPurpose : ArgKind::FloatingPoint;
4678     if (T->isIntegerTy() || T->isPointerTy())
4679       return ArgKind::GeneralPurpose;
4680     if (T->isVectorTy())
4681       return ArgKind::Vector;
4682     return ArgKind::Memory;
4683   }
4684 
4685   ShadowExtension getShadowExtension(const CallBase &CB, unsigned ArgNo) {
4686     // ABI says: "One of the simple integer types no more than 64 bits wide.
4687     // ... If such an argument is shorter than 64 bits, replace it by a full
4688     // 64-bit integer representing the same number, using sign or zero
4689     // extension". Shadow for an integer argument has the same type as the
4690     // argument itself, so it can be sign or zero extended as well.
4691     bool ZExt = CB.paramHasAttr(ArgNo, Attribute::ZExt);
4692     bool SExt = CB.paramHasAttr(ArgNo, Attribute::SExt);
4693     if (ZExt) {
4694       assert(!SExt);
4695       return ShadowExtension::Zero;
4696     }
4697     if (SExt) {
4698       assert(!ZExt);
4699       return ShadowExtension::Sign;
4700     }
4701     return ShadowExtension::None;
4702   }
4703 
4704   void visitCallBase(CallBase &CB, IRBuilder<> &IRB) override {
4705     bool IsSoftFloatABI = CB.getCalledFunction()
4706                               ->getFnAttribute("use-soft-float")
4707                               .getValueAsString() == "true";
4708     unsigned GpOffset = SystemZGpOffset;
4709     unsigned FpOffset = SystemZFpOffset;
4710     unsigned VrIndex = 0;
4711     unsigned OverflowOffset = SystemZOverflowOffset;
4712     const DataLayout &DL = F.getParent()->getDataLayout();
4713     for (auto ArgIt = CB.arg_begin(), End = CB.arg_end(); ArgIt != End;
4714          ++ArgIt) {
4715       Value *A = *ArgIt;
4716       unsigned ArgNo = CB.getArgOperandNo(ArgIt);
4717       bool IsFixed = ArgNo < CB.getFunctionType()->getNumParams();
4718       // SystemZABIInfo does not produce ByVal parameters.
4719       assert(!CB.paramHasAttr(ArgNo, Attribute::ByVal));
4720       Type *T = A->getType();
4721       ArgKind AK = classifyArgument(T, IsSoftFloatABI);
4722       if (AK == ArgKind::Indirect) {
4723         T = PointerType::get(T, 0);
4724         AK = ArgKind::GeneralPurpose;
4725       }
4726       if (AK == ArgKind::GeneralPurpose && GpOffset >= SystemZGpEndOffset)
4727         AK = ArgKind::Memory;
4728       if (AK == ArgKind::FloatingPoint && FpOffset >= SystemZFpEndOffset)
4729         AK = ArgKind::Memory;
4730       if (AK == ArgKind::Vector && (VrIndex >= SystemZMaxVrArgs || !IsFixed))
4731         AK = ArgKind::Memory;
4732       Value *ShadowBase = nullptr;
4733       Value *OriginBase = nullptr;
4734       ShadowExtension SE = ShadowExtension::None;
4735       switch (AK) {
4736       case ArgKind::GeneralPurpose: {
4737         // Always keep track of GpOffset, but store shadow only for varargs.
4738         uint64_t ArgSize = 8;
4739         if (GpOffset + ArgSize <= kParamTLSSize) {
4740           if (!IsFixed) {
4741             SE = getShadowExtension(CB, ArgNo);
4742             uint64_t GapSize = 0;
4743             if (SE == ShadowExtension::None) {
4744               uint64_t ArgAllocSize = DL.getTypeAllocSize(T);
4745               assert(ArgAllocSize <= ArgSize);
4746               GapSize = ArgSize - ArgAllocSize;
4747             }
4748             ShadowBase = getShadowAddrForVAArgument(IRB, GpOffset + GapSize);
4749             if (MS.TrackOrigins)
4750               OriginBase = getOriginPtrForVAArgument(IRB, GpOffset + GapSize);
4751           }
4752           GpOffset += ArgSize;
4753         } else {
4754           GpOffset = kParamTLSSize;
4755         }
4756         break;
4757       }
4758       case ArgKind::FloatingPoint: {
4759         // Always keep track of FpOffset, but store shadow only for varargs.
4760         uint64_t ArgSize = 8;
4761         if (FpOffset + ArgSize <= kParamTLSSize) {
4762           if (!IsFixed) {
4763             // PoP says: "A short floating-point datum requires only the
4764             // left-most 32 bit positions of a floating-point register".
4765             // Therefore, in contrast to AK_GeneralPurpose and AK_Memory,
4766             // don't extend shadow and don't mind the gap.
4767             ShadowBase = getShadowAddrForVAArgument(IRB, FpOffset);
4768             if (MS.TrackOrigins)
4769               OriginBase = getOriginPtrForVAArgument(IRB, FpOffset);
4770           }
4771           FpOffset += ArgSize;
4772         } else {
4773           FpOffset = kParamTLSSize;
4774         }
4775         break;
4776       }
4777       case ArgKind::Vector: {
4778         // Keep track of VrIndex. No need to store shadow, since vector varargs
4779         // go through AK_Memory.
4780         assert(IsFixed);
4781         VrIndex++;
4782         break;
4783       }
4784       case ArgKind::Memory: {
4785         // Keep track of OverflowOffset and store shadow only for varargs.
4786         // Ignore fixed args, since we need to copy only the vararg portion of
4787         // the overflow area shadow.
4788         if (!IsFixed) {
4789           uint64_t ArgAllocSize = DL.getTypeAllocSize(T);
4790           uint64_t ArgSize = alignTo(ArgAllocSize, 8);
4791           if (OverflowOffset + ArgSize <= kParamTLSSize) {
4792             SE = getShadowExtension(CB, ArgNo);
4793             uint64_t GapSize =
4794                 SE == ShadowExtension::None ? ArgSize - ArgAllocSize : 0;
4795             ShadowBase =
4796                 getShadowAddrForVAArgument(IRB, OverflowOffset + GapSize);
4797             if (MS.TrackOrigins)
4798               OriginBase =
4799                   getOriginPtrForVAArgument(IRB, OverflowOffset + GapSize);
4800             OverflowOffset += ArgSize;
4801           } else {
4802             OverflowOffset = kParamTLSSize;
4803           }
4804         }
4805         break;
4806       }
4807       case ArgKind::Indirect:
4808         llvm_unreachable("Indirect must be converted to GeneralPurpose");
4809       }
4810       if (ShadowBase == nullptr)
4811         continue;
4812       Value *Shadow = MSV.getShadow(A);
4813       if (SE != ShadowExtension::None)
4814         Shadow = MSV.CreateShadowCast(IRB, Shadow, IRB.getInt64Ty(),
4815                                       /*Signed*/ SE == ShadowExtension::Sign);
4816       ShadowBase = IRB.CreateIntToPtr(
4817           ShadowBase, PointerType::get(Shadow->getType(), 0), "_msarg_va_s");
4818       IRB.CreateStore(Shadow, ShadowBase);
4819       if (MS.TrackOrigins) {
4820         Value *Origin = MSV.getOrigin(A);
4821         unsigned StoreSize = DL.getTypeStoreSize(Shadow->getType());
4822         MSV.paintOrigin(IRB, Origin, OriginBase, StoreSize,
4823                         kMinOriginAlignment);
4824       }
4825     }
4826     Constant *OverflowSize = ConstantInt::get(
4827         IRB.getInt64Ty(), OverflowOffset - SystemZOverflowOffset);
4828     IRB.CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS);
4829   }
4830 
4831   Value *getShadowAddrForVAArgument(IRBuilder<> &IRB, unsigned ArgOffset) {
4832     Value *Base = IRB.CreatePointerCast(MS.VAArgTLS, MS.IntptrTy);
4833     return IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
4834   }
4835 
4836   Value *getOriginPtrForVAArgument(IRBuilder<> &IRB, int ArgOffset) {
4837     Value *Base = IRB.CreatePointerCast(MS.VAArgOriginTLS, MS.IntptrTy);
4838     Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
4839     return IRB.CreateIntToPtr(Base, PointerType::get(MS.OriginTy, 0),
4840                               "_msarg_va_o");
4841   }
4842 
4843   void unpoisonVAListTagForInst(IntrinsicInst &I) {
4844     IRBuilder<> IRB(&I);
4845     Value *VAListTag = I.getArgOperand(0);
4846     Value *ShadowPtr, *OriginPtr;
4847     const Align Alignment = Align(8);
4848     std::tie(ShadowPtr, OriginPtr) =
4849         MSV.getShadowOriginPtr(VAListTag, IRB, IRB.getInt8Ty(), Alignment,
4850                                /*isStore*/ true);
4851     IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
4852                      SystemZVAListTagSize, Alignment, false);
4853   }
4854 
4855   void visitVAStartInst(VAStartInst &I) override {
4856     VAStartInstrumentationList.push_back(&I);
4857     unpoisonVAListTagForInst(I);
4858   }
4859 
4860   void visitVACopyInst(VACopyInst &I) override { unpoisonVAListTagForInst(I); }
4861 
4862   void copyRegSaveArea(IRBuilder<> &IRB, Value *VAListTag) {
4863     Type *RegSaveAreaPtrTy = Type::getInt64PtrTy(*MS.C);
4864     Value *RegSaveAreaPtrPtr = IRB.CreateIntToPtr(
4865         IRB.CreateAdd(
4866             IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
4867             ConstantInt::get(MS.IntptrTy, SystemZRegSaveAreaPtrOffset)),
4868         PointerType::get(RegSaveAreaPtrTy, 0));
4869     Value *RegSaveAreaPtr = IRB.CreateLoad(RegSaveAreaPtrTy, RegSaveAreaPtrPtr);
4870     Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
4871     const Align Alignment = Align(8);
4872     std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
4873         MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.getInt8Ty(), Alignment,
4874                                /*isStore*/ true);
4875     // TODO(iii): copy only fragments filled by visitCallBase()
4876     IRB.CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
4877                      SystemZRegSaveAreaSize);
4878     if (MS.TrackOrigins)
4879       IRB.CreateMemCpy(RegSaveAreaOriginPtr, Alignment, VAArgTLSOriginCopy,
4880                        Alignment, SystemZRegSaveAreaSize);
4881   }
4882 
4883   void copyOverflowArea(IRBuilder<> &IRB, Value *VAListTag) {
4884     Type *OverflowArgAreaPtrTy = Type::getInt64PtrTy(*MS.C);
4885     Value *OverflowArgAreaPtrPtr = IRB.CreateIntToPtr(
4886         IRB.CreateAdd(
4887             IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
4888             ConstantInt::get(MS.IntptrTy, SystemZOverflowArgAreaPtrOffset)),
4889         PointerType::get(OverflowArgAreaPtrTy, 0));
4890     Value *OverflowArgAreaPtr =
4891         IRB.CreateLoad(OverflowArgAreaPtrTy, OverflowArgAreaPtrPtr);
4892     Value *OverflowArgAreaShadowPtr, *OverflowArgAreaOriginPtr;
4893     const Align Alignment = Align(8);
4894     std::tie(OverflowArgAreaShadowPtr, OverflowArgAreaOriginPtr) =
4895         MSV.getShadowOriginPtr(OverflowArgAreaPtr, IRB, IRB.getInt8Ty(),
4896                                Alignment, /*isStore*/ true);
4897     Value *SrcPtr = IRB.CreateConstGEP1_32(IRB.getInt8Ty(), VAArgTLSCopy,
4898                                            SystemZOverflowOffset);
4899     IRB.CreateMemCpy(OverflowArgAreaShadowPtr, Alignment, SrcPtr, Alignment,
4900                      VAArgOverflowSize);
4901     if (MS.TrackOrigins) {
4902       SrcPtr = IRB.CreateConstGEP1_32(IRB.getInt8Ty(), VAArgTLSOriginCopy,
4903                                       SystemZOverflowOffset);
4904       IRB.CreateMemCpy(OverflowArgAreaOriginPtr, Alignment, SrcPtr, Alignment,
4905                        VAArgOverflowSize);
4906     }
4907   }
4908 
4909   void finalizeInstrumentation() override {
4910     assert(!VAArgOverflowSize && !VAArgTLSCopy &&
4911            "finalizeInstrumentation called twice");
4912     if (!VAStartInstrumentationList.empty()) {
4913       // If there is a va_start in this function, make a backup copy of
4914       // va_arg_tls somewhere in the function entry block.
4915       IRBuilder<> IRB(MSV.ActualFnStart->getFirstNonPHI());
4916       VAArgOverflowSize =
4917           IRB.CreateLoad(IRB.getInt64Ty(), MS.VAArgOverflowSizeTLS);
4918       Value *CopySize =
4919           IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, SystemZOverflowOffset),
4920                         VAArgOverflowSize);
4921       VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
4922       IRB.CreateMemCpy(VAArgTLSCopy, Align(8), MS.VAArgTLS, Align(8), CopySize);
4923       if (MS.TrackOrigins) {
4924         VAArgTLSOriginCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
4925         IRB.CreateMemCpy(VAArgTLSOriginCopy, Align(8), MS.VAArgOriginTLS,
4926                          Align(8), CopySize);
4927       }
4928     }
4929 
4930     // Instrument va_start.
4931     // Copy va_list shadow from the backup copy of the TLS contents.
4932     for (size_t VaStartNo = 0, VaStartNum = VAStartInstrumentationList.size();
4933          VaStartNo < VaStartNum; VaStartNo++) {
4934       CallInst *OrigInst = VAStartInstrumentationList[VaStartNo];
4935       IRBuilder<> IRB(OrigInst->getNextNode());
4936       Value *VAListTag = OrigInst->getArgOperand(0);
4937       copyRegSaveArea(IRB, VAListTag);
4938       copyOverflowArea(IRB, VAListTag);
4939     }
4940   }
4941 };
4942 
4943 /// A no-op implementation of VarArgHelper.
4944 struct VarArgNoOpHelper : public VarArgHelper {
4945   VarArgNoOpHelper(Function &F, MemorySanitizer &MS,
4946                    MemorySanitizerVisitor &MSV) {}
4947 
4948   void visitCallBase(CallBase &CB, IRBuilder<> &IRB) override {}
4949 
4950   void visitVAStartInst(VAStartInst &I) override {}
4951 
4952   void visitVACopyInst(VACopyInst &I) override {}
4953 
4954   void finalizeInstrumentation() override {}
4955 };
4956 
4957 } // end anonymous namespace
4958 
4959 static VarArgHelper *CreateVarArgHelper(Function &Func, MemorySanitizer &Msan,
4960                                         MemorySanitizerVisitor &Visitor) {
4961   // VarArg handling is only implemented on AMD64. False positives are possible
4962   // on other platforms.
4963   Triple TargetTriple(Func.getParent()->getTargetTriple());
4964   if (TargetTriple.getArch() == Triple::x86_64)
4965     return new VarArgAMD64Helper(Func, Msan, Visitor);
4966   else if (TargetTriple.isMIPS64())
4967     return new VarArgMIPS64Helper(Func, Msan, Visitor);
4968   else if (TargetTriple.getArch() == Triple::aarch64)
4969     return new VarArgAArch64Helper(Func, Msan, Visitor);
4970   else if (TargetTriple.getArch() == Triple::ppc64 ||
4971            TargetTriple.getArch() == Triple::ppc64le)
4972     return new VarArgPowerPC64Helper(Func, Msan, Visitor);
4973   else if (TargetTriple.getArch() == Triple::systemz)
4974     return new VarArgSystemZHelper(Func, Msan, Visitor);
4975   else
4976     return new VarArgNoOpHelper(Func, Msan, Visitor);
4977 }
4978 
4979 bool MemorySanitizer::sanitizeFunction(Function &F, TargetLibraryInfo &TLI) {
4980   if (!CompileKernel && F.getName() == kMsanModuleCtorName)
4981     return false;
4982 
4983   MemorySanitizerVisitor Visitor(F, *this, TLI);
4984 
4985   // Clear out readonly/readnone attributes.
4986   AttrBuilder B;
4987   B.addAttribute(Attribute::ReadOnly)
4988       .addAttribute(Attribute::ReadNone)
4989       .addAttribute(Attribute::WriteOnly)
4990       .addAttribute(Attribute::ArgMemOnly)
4991       .addAttribute(Attribute::Speculatable);
4992   F.removeAttributes(AttributeList::FunctionIndex, B);
4993 
4994   return Visitor.runOnFunction();
4995 }
4996