1 //===- DataFlowSanitizer.cpp - dynamic data flow analysis -----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// This file is a part of DataFlowSanitizer, a generalised dynamic data flow
11 /// analysis.
12 ///
13 /// Unlike other Sanitizer tools, this tool is not designed to detect a specific
14 /// class of bugs on its own.  Instead, it provides a generic dynamic data flow
15 /// analysis framework to be used by clients to help detect application-specific
16 /// issues within their own code.
17 ///
18 /// The analysis is based on automatic propagation of data flow labels (also
19 /// known as taint labels) through a program as it performs computation.  Each
20 /// byte of application memory is backed by two bytes of shadow memory which
21 /// hold the label.  On Linux/x86_64, memory is laid out as follows:
22 ///
23 /// +--------------------+ 0x800000000000 (top of memory)
24 /// | application memory |
25 /// +--------------------+ 0x700000008000 (kAppAddr)
26 /// |                    |
27 /// |       unused       |
28 /// |                    |
29 /// +--------------------+ 0x300200000000 (kUnusedAddr)
30 /// |    union table     |
31 /// +--------------------+ 0x300000000000 (kUnionTableAddr)
32 /// |       origin       |
33 /// +--------------------+ 0x200000008000 (kOriginAddr)
34 /// |   shadow memory    |
35 /// +--------------------+ 0x000000010000 (kShadowAddr)
36 /// | reserved by kernel |
37 /// +--------------------+ 0x000000000000
38 ///
39 /// To derive a shadow memory address from an application memory address,
40 /// bits 44-46 are cleared to bring the address into the range
41 /// [0x000000008000,0x100000000000).  Then the address is shifted left by 1 to
42 /// account for the double byte representation of shadow labels and move the
43 /// address into the shadow memory range.  See the function
44 /// DataFlowSanitizer::getShadowAddress below.
45 ///
46 /// For more information, please refer to the design document:
47 /// http://clang.llvm.org/docs/DataFlowSanitizerDesign.html
48 //
49 //===----------------------------------------------------------------------===//
50 
51 #include "llvm/Transforms/Instrumentation/DataFlowSanitizer.h"
52 #include "llvm/ADT/DenseMap.h"
53 #include "llvm/ADT/DenseSet.h"
54 #include "llvm/ADT/DepthFirstIterator.h"
55 #include "llvm/ADT/None.h"
56 #include "llvm/ADT/SmallPtrSet.h"
57 #include "llvm/ADT/SmallVector.h"
58 #include "llvm/ADT/StringExtras.h"
59 #include "llvm/ADT/StringRef.h"
60 #include "llvm/ADT/Triple.h"
61 #include "llvm/ADT/iterator.h"
62 #include "llvm/Analysis/ValueTracking.h"
63 #include "llvm/IR/Argument.h"
64 #include "llvm/IR/Attributes.h"
65 #include "llvm/IR/BasicBlock.h"
66 #include "llvm/IR/Constant.h"
67 #include "llvm/IR/Constants.h"
68 #include "llvm/IR/DataLayout.h"
69 #include "llvm/IR/DerivedTypes.h"
70 #include "llvm/IR/Dominators.h"
71 #include "llvm/IR/Function.h"
72 #include "llvm/IR/GlobalAlias.h"
73 #include "llvm/IR/GlobalValue.h"
74 #include "llvm/IR/GlobalVariable.h"
75 #include "llvm/IR/IRBuilder.h"
76 #include "llvm/IR/InlineAsm.h"
77 #include "llvm/IR/InstVisitor.h"
78 #include "llvm/IR/InstrTypes.h"
79 #include "llvm/IR/Instruction.h"
80 #include "llvm/IR/Instructions.h"
81 #include "llvm/IR/IntrinsicInst.h"
82 #include "llvm/IR/LLVMContext.h"
83 #include "llvm/IR/MDBuilder.h"
84 #include "llvm/IR/Module.h"
85 #include "llvm/IR/PassManager.h"
86 #include "llvm/IR/Type.h"
87 #include "llvm/IR/User.h"
88 #include "llvm/IR/Value.h"
89 #include "llvm/InitializePasses.h"
90 #include "llvm/Pass.h"
91 #include "llvm/Support/Alignment.h"
92 #include "llvm/Support/Casting.h"
93 #include "llvm/Support/CommandLine.h"
94 #include "llvm/Support/ErrorHandling.h"
95 #include "llvm/Support/SpecialCaseList.h"
96 #include "llvm/Support/VirtualFileSystem.h"
97 #include "llvm/Transforms/Instrumentation.h"
98 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
99 #include "llvm/Transforms/Utils/Local.h"
100 #include <algorithm>
101 #include <cassert>
102 #include <cstddef>
103 #include <cstdint>
104 #include <iterator>
105 #include <memory>
106 #include <set>
107 #include <string>
108 #include <utility>
109 #include <vector>
110 
111 using namespace llvm;
112 
113 // This must be consistent with ShadowWidthBits.
114 static const Align ShadowTLSAlignment = Align(2);
115 
116 static const Align MinOriginAlignment = Align(4);
117 
118 // The size of TLS variables. These constants must be kept in sync with the ones
119 // in dfsan.cpp.
120 static const unsigned ArgTLSSize = 800;
121 static const unsigned RetvalTLSSize = 800;
122 
123 // External symbol to be used when generating the shadow address for
124 // architectures with multiple VMAs. Instead of using a constant integer
125 // the runtime will set the external mask based on the VMA range.
126 const char DFSanExternShadowPtrMask[] = "__dfsan_shadow_ptr_mask";
127 
128 // The -dfsan-preserve-alignment flag controls whether this pass assumes that
129 // alignment requirements provided by the input IR are correct.  For example,
130 // if the input IR contains a load with alignment 8, this flag will cause
131 // the shadow load to have alignment 16.  This flag is disabled by default as
132 // we have unfortunately encountered too much code (including Clang itself;
133 // see PR14291) which performs misaligned access.
134 static cl::opt<bool> ClPreserveAlignment(
135     "dfsan-preserve-alignment",
136     cl::desc("respect alignment requirements provided by input IR"), cl::Hidden,
137     cl::init(false));
138 
139 // The ABI list files control how shadow parameters are passed. The pass treats
140 // every function labelled "uninstrumented" in the ABI list file as conforming
141 // to the "native" (i.e. unsanitized) ABI.  Unless the ABI list contains
142 // additional annotations for those functions, a call to one of those functions
143 // will produce a warning message, as the labelling behaviour of the function is
144 // unknown.  The other supported annotations are "functional" and "discard",
145 // which are described below under DataFlowSanitizer::WrapperKind.
146 static cl::list<std::string> ClABIListFiles(
147     "dfsan-abilist",
148     cl::desc("File listing native ABI functions and how the pass treats them"),
149     cl::Hidden);
150 
151 // Controls whether the pass uses IA_Args or IA_TLS as the ABI for instrumented
152 // functions (see DataFlowSanitizer::InstrumentedABI below).
153 static cl::opt<bool>
154     ClArgsABI("dfsan-args-abi",
155               cl::desc("Use the argument ABI rather than the TLS ABI"),
156               cl::Hidden);
157 
158 // Controls whether the pass includes or ignores the labels of pointers in load
159 // instructions.
160 static cl::opt<bool> ClCombinePointerLabelsOnLoad(
161     "dfsan-combine-pointer-labels-on-load",
162     cl::desc("Combine the label of the pointer with the label of the data when "
163              "loading from memory."),
164     cl::Hidden, cl::init(true));
165 
166 // Controls whether the pass includes or ignores the labels of pointers in
167 // stores instructions.
168 static cl::opt<bool> ClCombinePointerLabelsOnStore(
169     "dfsan-combine-pointer-labels-on-store",
170     cl::desc("Combine the label of the pointer with the label of the data when "
171              "storing in memory."),
172     cl::Hidden, cl::init(false));
173 
174 static cl::opt<bool> ClDebugNonzeroLabels(
175     "dfsan-debug-nonzero-labels",
176     cl::desc("Insert calls to __dfsan_nonzero_label on observing a parameter, "
177              "load or return with a nonzero label"),
178     cl::Hidden);
179 
180 // Experimental feature that inserts callbacks for certain data events.
181 // Currently callbacks are only inserted for loads, stores, memory transfers
182 // (i.e. memcpy and memmove), and comparisons.
183 //
184 // If this flag is set to true, the user must provide definitions for the
185 // following callback functions:
186 //   void __dfsan_load_callback(dfsan_label Label, void* addr);
187 //   void __dfsan_store_callback(dfsan_label Label, void* addr);
188 //   void __dfsan_mem_transfer_callback(dfsan_label *Start, size_t Len);
189 //   void __dfsan_cmp_callback(dfsan_label CombinedLabel);
190 static cl::opt<bool> ClEventCallbacks(
191     "dfsan-event-callbacks",
192     cl::desc("Insert calls to __dfsan_*_callback functions on data events."),
193     cl::Hidden, cl::init(false));
194 
195 // Use a distinct bit for each base label, enabling faster unions with less
196 // instrumentation.  Limits the max number of base labels to 16.
197 static cl::opt<bool> ClFast16Labels(
198     "dfsan-fast-16-labels",
199     cl::desc("Use more efficient instrumentation, limiting the number of "
200              "labels to 16."),
201     cl::Hidden, cl::init(false));
202 
203 // Controls whether the pass tracks the control flow of select instructions.
204 static cl::opt<bool> ClTrackSelectControlFlow(
205     "dfsan-track-select-control-flow",
206     cl::desc("Propagate labels from condition values of select instructions "
207              "to results."),
208     cl::Hidden, cl::init(true));
209 
210 // Controls how to track origins.
211 // * 0: do not track origins.
212 // * 1: track origins at memory store operations.
213 // * 2: TODO: track origins at memory store operations and callsites.
214 static cl::opt<int> ClTrackOrigins("dfsan-track-origins",
215                                    cl::desc("Track origins of labels"),
216                                    cl::Hidden, cl::init(0));
217 
218 static StringRef getGlobalTypeString(const GlobalValue &G) {
219   // Types of GlobalVariables are always pointer types.
220   Type *GType = G.getValueType();
221   // For now we support excluding struct types only.
222   if (StructType *SGType = dyn_cast<StructType>(GType)) {
223     if (!SGType->isLiteral())
224       return SGType->getName();
225   }
226   return "<unknown type>";
227 }
228 
229 namespace {
230 
231 class DFSanABIList {
232   std::unique_ptr<SpecialCaseList> SCL;
233 
234 public:
235   DFSanABIList() = default;
236 
237   void set(std::unique_ptr<SpecialCaseList> List) { SCL = std::move(List); }
238 
239   /// Returns whether either this function or its source file are listed in the
240   /// given category.
241   bool isIn(const Function &F, StringRef Category) const {
242     return isIn(*F.getParent(), Category) ||
243            SCL->inSection("dataflow", "fun", F.getName(), Category);
244   }
245 
246   /// Returns whether this global alias is listed in the given category.
247   ///
248   /// If GA aliases a function, the alias's name is matched as a function name
249   /// would be.  Similarly, aliases of globals are matched like globals.
250   bool isIn(const GlobalAlias &GA, StringRef Category) const {
251     if (isIn(*GA.getParent(), Category))
252       return true;
253 
254     if (isa<FunctionType>(GA.getValueType()))
255       return SCL->inSection("dataflow", "fun", GA.getName(), Category);
256 
257     return SCL->inSection("dataflow", "global", GA.getName(), Category) ||
258            SCL->inSection("dataflow", "type", getGlobalTypeString(GA),
259                           Category);
260   }
261 
262   /// Returns whether this module is listed in the given category.
263   bool isIn(const Module &M, StringRef Category) const {
264     return SCL->inSection("dataflow", "src", M.getModuleIdentifier(), Category);
265   }
266 };
267 
268 /// TransformedFunction is used to express the result of transforming one
269 /// function type into another.  This struct is immutable.  It holds metadata
270 /// useful for updating calls of the old function to the new type.
271 struct TransformedFunction {
272   TransformedFunction(FunctionType *OriginalType, FunctionType *TransformedType,
273                       std::vector<unsigned> ArgumentIndexMapping)
274       : OriginalType(OriginalType), TransformedType(TransformedType),
275         ArgumentIndexMapping(ArgumentIndexMapping) {}
276 
277   // Disallow copies.
278   TransformedFunction(const TransformedFunction &) = delete;
279   TransformedFunction &operator=(const TransformedFunction &) = delete;
280 
281   // Allow moves.
282   TransformedFunction(TransformedFunction &&) = default;
283   TransformedFunction &operator=(TransformedFunction &&) = default;
284 
285   /// Type of the function before the transformation.
286   FunctionType *OriginalType;
287 
288   /// Type of the function after the transformation.
289   FunctionType *TransformedType;
290 
291   /// Transforming a function may change the position of arguments.  This
292   /// member records the mapping from each argument's old position to its new
293   /// position.  Argument positions are zero-indexed.  If the transformation
294   /// from F to F' made the first argument of F into the third argument of F',
295   /// then ArgumentIndexMapping[0] will equal 2.
296   std::vector<unsigned> ArgumentIndexMapping;
297 };
298 
299 /// Given function attributes from a call site for the original function,
300 /// return function attributes appropriate for a call to the transformed
301 /// function.
302 AttributeList
303 transformFunctionAttributes(const TransformedFunction &TransformedFunction,
304                             LLVMContext &Ctx, AttributeList CallSiteAttrs) {
305 
306   // Construct a vector of AttributeSet for each function argument.
307   std::vector<llvm::AttributeSet> ArgumentAttributes(
308       TransformedFunction.TransformedType->getNumParams());
309 
310   // Copy attributes from the parameter of the original function to the
311   // transformed version.  'ArgumentIndexMapping' holds the mapping from
312   // old argument position to new.
313   for (unsigned I = 0, IE = TransformedFunction.ArgumentIndexMapping.size();
314        I < IE; ++I) {
315     unsigned TransformedIndex = TransformedFunction.ArgumentIndexMapping[I];
316     ArgumentAttributes[TransformedIndex] = CallSiteAttrs.getParamAttributes(I);
317   }
318 
319   // Copy annotations on varargs arguments.
320   for (unsigned I = TransformedFunction.OriginalType->getNumParams(),
321                 IE = CallSiteAttrs.getNumAttrSets();
322        I < IE; ++I) {
323     ArgumentAttributes.push_back(CallSiteAttrs.getParamAttributes(I));
324   }
325 
326   return AttributeList::get(Ctx, CallSiteAttrs.getFnAttributes(),
327                             CallSiteAttrs.getRetAttributes(),
328                             llvm::makeArrayRef(ArgumentAttributes));
329 }
330 
331 class DataFlowSanitizer {
332   friend struct DFSanFunction;
333   friend class DFSanVisitor;
334 
335   enum {
336     ShadowWidthBits = 16,
337     ShadowWidthBytes = ShadowWidthBits / 8,
338     OriginWidthBits = 32,
339     OriginWidthBytes = OriginWidthBits / 8
340   };
341 
342   /// Which ABI should be used for instrumented functions?
343   enum InstrumentedABI {
344     /// Argument and return value labels are passed through additional
345     /// arguments and by modifying the return type.
346     IA_Args,
347 
348     /// Argument and return value labels are passed through TLS variables
349     /// __dfsan_arg_tls and __dfsan_retval_tls.
350     IA_TLS
351   };
352 
353   /// How should calls to uninstrumented functions be handled?
354   enum WrapperKind {
355     /// This function is present in an uninstrumented form but we don't know
356     /// how it should be handled.  Print a warning and call the function anyway.
357     /// Don't label the return value.
358     WK_Warning,
359 
360     /// This function does not write to (user-accessible) memory, and its return
361     /// value is unlabelled.
362     WK_Discard,
363 
364     /// This function does not write to (user-accessible) memory, and the label
365     /// of its return value is the union of the label of its arguments.
366     WK_Functional,
367 
368     /// Instead of calling the function, a custom wrapper __dfsw_F is called,
369     /// where F is the name of the function.  This function may wrap the
370     /// original function or provide its own implementation.  This is similar to
371     /// the IA_Args ABI, except that IA_Args uses a struct return type to
372     /// pass the return value shadow in a register, while WK_Custom uses an
373     /// extra pointer argument to return the shadow.  This allows the wrapped
374     /// form of the function type to be expressed in C.
375     WK_Custom
376   };
377 
378   Module *Mod;
379   LLVMContext *Ctx;
380   Type *Int8Ptr;
381   IntegerType *OriginTy;
382   PointerType *OriginPtrTy;
383   ConstantInt *OriginBase;
384   ConstantInt *ZeroOrigin;
385   /// The shadow type for all primitive types and vector types.
386   IntegerType *PrimitiveShadowTy;
387   PointerType *PrimitiveShadowPtrTy;
388   IntegerType *IntptrTy;
389   ConstantInt *ZeroPrimitiveShadow;
390   ConstantInt *ShadowPtrMask;
391   ConstantInt *ShadowPtrMul;
392   Constant *ArgTLS;
393   ArrayType *ArgOriginTLSTy;
394   Constant *ArgOriginTLS;
395   Constant *RetvalTLS;
396   Constant *RetvalOriginTLS;
397   Constant *ExternalShadowMask;
398   FunctionType *DFSanUnionFnTy;
399   FunctionType *DFSanUnionLoadFnTy;
400   FunctionType *DFSanLoadLabelAndOriginFnTy;
401   FunctionType *DFSanUnimplementedFnTy;
402   FunctionType *DFSanSetLabelFnTy;
403   FunctionType *DFSanNonzeroLabelFnTy;
404   FunctionType *DFSanVarargWrapperFnTy;
405   FunctionType *DFSanCmpCallbackFnTy;
406   FunctionType *DFSanLoadStoreCallbackFnTy;
407   FunctionType *DFSanMemTransferCallbackFnTy;
408   FunctionType *DFSanChainOriginFnTy;
409   FunctionType *DFSanMemOriginTransferFnTy;
410   FunctionType *DFSanMaybeStoreOriginFnTy;
411   FunctionCallee DFSanUnionFn;
412   FunctionCallee DFSanCheckedUnionFn;
413   FunctionCallee DFSanUnionLoadFn;
414   FunctionCallee DFSanUnionLoadFast16LabelsFn;
415   FunctionCallee DFSanLoadLabelAndOriginFn;
416   FunctionCallee DFSanUnimplementedFn;
417   FunctionCallee DFSanSetLabelFn;
418   FunctionCallee DFSanNonzeroLabelFn;
419   FunctionCallee DFSanVarargWrapperFn;
420   FunctionCallee DFSanLoadCallbackFn;
421   FunctionCallee DFSanStoreCallbackFn;
422   FunctionCallee DFSanMemTransferCallbackFn;
423   FunctionCallee DFSanCmpCallbackFn;
424   FunctionCallee DFSanChainOriginFn;
425   FunctionCallee DFSanMemOriginTransferFn;
426   FunctionCallee DFSanMaybeStoreOriginFn;
427   SmallPtrSet<Value *, 16> DFSanRuntimeFunctions;
428   MDNode *ColdCallWeights;
429   DFSanABIList ABIList;
430   DenseMap<Value *, Function *> UnwrappedFnMap;
431   AttrBuilder ReadOnlyNoneAttrs;
432   bool DFSanRuntimeShadowMask = false;
433 
434   Value *getShadowOffset(Value *Addr, IRBuilder<> &IRB);
435   Value *getShadowAddress(Value *Addr, Instruction *Pos);
436   std::pair<Value *, Value *>
437   getShadowOriginAddress(Value *Addr, Align InstAlignment, Instruction *Pos);
438   bool isInstrumented(const Function *F);
439   bool isInstrumented(const GlobalAlias *GA);
440   FunctionType *getArgsFunctionType(FunctionType *T);
441   FunctionType *getTrampolineFunctionType(FunctionType *T);
442   TransformedFunction getCustomFunctionType(FunctionType *T);
443   InstrumentedABI getInstrumentedABI();
444   WrapperKind getWrapperKind(Function *F);
445   void addGlobalNamePrefix(GlobalValue *GV);
446   Function *buildWrapperFunction(Function *F, StringRef NewFName,
447                                  GlobalValue::LinkageTypes NewFLink,
448                                  FunctionType *NewFT);
449   Constant *getOrBuildTrampolineFunction(FunctionType *FT, StringRef FName);
450   void initializeCallbackFunctions(Module &M);
451   void initializeRuntimeFunctions(Module &M);
452   void injectMetadataGlobals(Module &M);
453 
454   bool init(Module &M);
455 
456   /// Returns whether the pass tracks origins. Support only fast16 mode in TLS
457   /// ABI mode.
458   bool shouldTrackOrigins();
459 
460   /// Returns whether the pass tracks labels for struct fields and array
461   /// indices. Support only fast16 mode in TLS ABI mode.
462   bool shouldTrackFieldsAndIndices();
463 
464   /// Returns a zero constant with the shadow type of OrigTy.
465   ///
466   /// getZeroShadow({T1,T2,...}) = {getZeroShadow(T1),getZeroShadow(T2,...}
467   /// getZeroShadow([n x T]) = [n x getZeroShadow(T)]
468   /// getZeroShadow(other type) = i16(0)
469   ///
470   /// Note that a zero shadow is always i16(0) when shouldTrackFieldsAndIndices
471   /// returns false.
472   Constant *getZeroShadow(Type *OrigTy);
473   /// Returns a zero constant with the shadow type of V's type.
474   Constant *getZeroShadow(Value *V);
475 
476   /// Checks if V is a zero shadow.
477   bool isZeroShadow(Value *V);
478 
479   /// Returns the shadow type of OrigTy.
480   ///
481   /// getShadowTy({T1,T2,...}) = {getShadowTy(T1),getShadowTy(T2),...}
482   /// getShadowTy([n x T]) = [n x getShadowTy(T)]
483   /// getShadowTy(other type) = i16
484   ///
485   /// Note that a shadow type is always i16 when shouldTrackFieldsAndIndices
486   /// returns false.
487   Type *getShadowTy(Type *OrigTy);
488   /// Returns the shadow type of of V's type.
489   Type *getShadowTy(Value *V);
490 
491   const uint64_t NumOfElementsInArgOrgTLS = ArgTLSSize / OriginWidthBytes;
492 
493 public:
494   DataFlowSanitizer(const std::vector<std::string> &ABIListFiles);
495 
496   bool runImpl(Module &M);
497 };
498 
499 struct DFSanFunction {
500   DataFlowSanitizer &DFS;
501   Function *F;
502   DominatorTree DT;
503   DataFlowSanitizer::InstrumentedABI IA;
504   bool IsNativeABI;
505   AllocaInst *LabelReturnAlloca = nullptr;
506   AllocaInst *OriginReturnAlloca = nullptr;
507   DenseMap<Value *, Value *> ValShadowMap;
508   DenseMap<Value *, Value *> ValOriginMap;
509   DenseMap<AllocaInst *, AllocaInst *> AllocaShadowMap;
510   DenseMap<AllocaInst *, AllocaInst *> AllocaOriginMap;
511 
512   std::vector<std::pair<PHINode *, PHINode *>> PHIFixups;
513   DenseSet<Instruction *> SkipInsts;
514   std::vector<Value *> NonZeroChecks;
515   bool AvoidNewBlocks;
516 
517   struct CachedShadow {
518     BasicBlock *Block; // The block where Shadow is defined.
519     Value *Shadow;
520   };
521   /// Maps a value to its latest shadow value in terms of domination tree.
522   DenseMap<std::pair<Value *, Value *>, CachedShadow> CachedShadows;
523   /// Maps a value to its latest collapsed shadow value it was converted to in
524   /// terms of domination tree. When ClDebugNonzeroLabels is on, this cache is
525   /// used at a post process where CFG blocks are split. So it does not cache
526   /// BasicBlock like CachedShadows, but uses domination between values.
527   DenseMap<Value *, Value *> CachedCollapsedShadows;
528   DenseMap<Value *, std::set<Value *>> ShadowElements;
529 
530   DFSanFunction(DataFlowSanitizer &DFS, Function *F, bool IsNativeABI)
531       : DFS(DFS), F(F), IA(DFS.getInstrumentedABI()), IsNativeABI(IsNativeABI) {
532     DT.recalculate(*F);
533     // FIXME: Need to track down the register allocator issue which causes poor
534     // performance in pathological cases with large numbers of basic blocks.
535     AvoidNewBlocks = F->size() > 1000;
536   }
537 
538   /// Computes the shadow address for a given function argument.
539   ///
540   /// Shadow = ArgTLS+ArgOffset.
541   Value *getArgTLS(Type *T, unsigned ArgOffset, IRBuilder<> &IRB);
542 
543   /// Computes the shadow address for a return value.
544   Value *getRetvalTLS(Type *T, IRBuilder<> &IRB);
545 
546   /// Computes the origin address for a given function argument.
547   ///
548   /// Origin = ArgOriginTLS[ArgNo].
549   Value *getArgOriginTLS(unsigned ArgNo, IRBuilder<> &IRB);
550 
551   /// Computes the origin address for a return value.
552   Value *getRetvalOriginTLS();
553 
554   Value *getOrigin(Value *V);
555   void setOrigin(Instruction *I, Value *Origin);
556   /// Generates IR to compute the origin of the last operand with a taint label.
557   Value *combineOperandOrigins(Instruction *Inst);
558   /// Before the instruction Pos, generates IR to compute the last origin with a
559   /// taint label. Labels and origins are from vectors Shadows and Origins
560   /// correspondingly. The generated IR is like
561   ///   Sn-1 != Zero ? On-1: ... S2 != Zero ? O2: S1 != Zero ? O1: O0
562   /// When Zero is nullptr, it uses ZeroPrimitiveShadow. Otherwise it can be
563   /// zeros with other bitwidths.
564   Value *combineOrigins(const std::vector<Value *> &Shadows,
565                         const std::vector<Value *> &Origins, Instruction *Pos,
566                         ConstantInt *Zero = nullptr);
567 
568   Value *getShadow(Value *V);
569   void setShadow(Instruction *I, Value *Shadow);
570   /// Generates IR to compute the union of the two given shadows, inserting it
571   /// before Pos. The combined value is with primitive type.
572   Value *combineShadows(Value *V1, Value *V2, Instruction *Pos);
573   /// Combines the shadow values of V1 and V2, then converts the combined value
574   /// with primitive type into a shadow value with the original type T.
575   Value *combineShadowsThenConvert(Type *T, Value *V1, Value *V2,
576                                    Instruction *Pos);
577   Value *combineOperandShadows(Instruction *Inst);
578   std::pair<Value *, Value *> loadShadowOrigin(Value *ShadowAddr, uint64_t Size,
579                                                Align InstAlignment,
580                                                Instruction *Pos);
581   void storePrimitiveShadow(Value *Addr, uint64_t Size, Align Alignment,
582                             Value *PrimitiveShadow, Instruction *Pos);
583   /// Applies PrimitiveShadow to all primitive subtypes of T, returning
584   /// the expanded shadow value.
585   ///
586   /// EFP({T1,T2, ...}, PS) = {EFP(T1,PS),EFP(T2,PS),...}
587   /// EFP([n x T], PS) = [n x EFP(T,PS)]
588   /// EFP(other types, PS) = PS
589   Value *expandFromPrimitiveShadow(Type *T, Value *PrimitiveShadow,
590                                    Instruction *Pos);
591   /// Collapses Shadow into a single primitive shadow value, unioning all
592   /// primitive shadow values in the process. Returns the final primitive
593   /// shadow value.
594   ///
595   /// CTP({V1,V2, ...}) = UNION(CFP(V1,PS),CFP(V2,PS),...)
596   /// CTP([V1,V2,...]) = UNION(CFP(V1,PS),CFP(V2,PS),...)
597   /// CTP(other types, PS) = PS
598   Value *collapseToPrimitiveShadow(Value *Shadow, Instruction *Pos);
599 
600   void storeZeroPrimitiveShadow(Value *Addr, uint64_t Size, Align ShadowAlign,
601                                 Instruction *Pos);
602 
603   Align getShadowAlign(Align InstAlignment);
604 
605 private:
606   /// Collapses the shadow with aggregate type into a single primitive shadow
607   /// value.
608   template <class AggregateType>
609   Value *collapseAggregateShadow(AggregateType *AT, Value *Shadow,
610                                  IRBuilder<> &IRB);
611 
612   Value *collapseToPrimitiveShadow(Value *Shadow, IRBuilder<> &IRB);
613 
614   /// Returns the shadow value of an argument A.
615   Value *getShadowForTLSArgument(Argument *A);
616 
617   /// The fast path of loading shadow in legacy mode.
618   Value *loadLegacyShadowFast(Value *ShadowAddr, uint64_t Size,
619                               Align ShadowAlign, Instruction *Pos);
620 
621   /// The fast path of loading shadow in fast-16-label mode.
622   std::pair<Value *, Value *>
623   loadFast16ShadowFast(Value *ShadowAddr, Value *OriginAddr, uint64_t Size,
624                        Align ShadowAlign, Align OriginAlign, Value *FirstOrigin,
625                        Instruction *Pos);
626 
627   Align getOriginAlign(Align InstAlignment);
628 
629   /// Because 4 contiguous bytes share one 4-byte origin, the most accurate load
630   /// is __dfsan_load_label_and_origin. This function returns the union of all
631   /// labels and the origin of the first taint label. However this is an
632   /// additional call with many instructions. To ensure common cases are fast,
633   /// checks if it is possible to load labels and origins without using the
634   /// callback function.
635   bool useCallbackLoadLabelAndOrigin(uint64_t Size, Align InstAlignment);
636 };
637 
638 class DFSanVisitor : public InstVisitor<DFSanVisitor> {
639 public:
640   DFSanFunction &DFSF;
641 
642   DFSanVisitor(DFSanFunction &DFSF) : DFSF(DFSF) {}
643 
644   const DataLayout &getDataLayout() const {
645     return DFSF.F->getParent()->getDataLayout();
646   }
647 
648   // Combines shadow values and origins for all of I's operands.
649   void visitInstOperands(Instruction &I);
650 
651   void visitUnaryOperator(UnaryOperator &UO);
652   void visitBinaryOperator(BinaryOperator &BO);
653   void visitCastInst(CastInst &CI);
654   void visitCmpInst(CmpInst &CI);
655   void visitGetElementPtrInst(GetElementPtrInst &GEPI);
656   void visitLoadInst(LoadInst &LI);
657   void visitStoreInst(StoreInst &SI);
658   void visitAtomicRMWInst(AtomicRMWInst &I);
659   void visitAtomicCmpXchgInst(AtomicCmpXchgInst &I);
660   void visitReturnInst(ReturnInst &RI);
661   void visitCallBase(CallBase &CB);
662   void visitPHINode(PHINode &PN);
663   void visitExtractElementInst(ExtractElementInst &I);
664   void visitInsertElementInst(InsertElementInst &I);
665   void visitShuffleVectorInst(ShuffleVectorInst &I);
666   void visitExtractValueInst(ExtractValueInst &I);
667   void visitInsertValueInst(InsertValueInst &I);
668   void visitAllocaInst(AllocaInst &I);
669   void visitSelectInst(SelectInst &I);
670   void visitMemSetInst(MemSetInst &I);
671   void visitMemTransferInst(MemTransferInst &I);
672 
673 private:
674   void visitCASOrRMW(Align InstAlignment, Instruction &I);
675 
676   // Returns false when this is an invoke of a custom function.
677   bool visitWrappedCallBase(Function &F, CallBase &CB);
678 
679   // Combines origins for all of I's operands.
680   void visitInstOperandOrigins(Instruction &I);
681 
682   void addShadowArguments(Function &F, CallBase &CB, std::vector<Value *> &Args,
683                           IRBuilder<> &IRB);
684 
685   void addOriginArguments(Function &F, CallBase &CB, std::vector<Value *> &Args,
686                           IRBuilder<> &IRB);
687 };
688 
689 } // end anonymous namespace
690 
691 DataFlowSanitizer::DataFlowSanitizer(
692     const std::vector<std::string> &ABIListFiles) {
693   std::vector<std::string> AllABIListFiles(std::move(ABIListFiles));
694   llvm::append_range(AllABIListFiles, ClABIListFiles);
695   // FIXME: should we propagate vfs::FileSystem to this constructor?
696   ABIList.set(
697       SpecialCaseList::createOrDie(AllABIListFiles, *vfs::getRealFileSystem()));
698 }
699 
700 FunctionType *DataFlowSanitizer::getArgsFunctionType(FunctionType *T) {
701   SmallVector<Type *, 4> ArgTypes(T->param_begin(), T->param_end());
702   ArgTypes.append(T->getNumParams(), PrimitiveShadowTy);
703   if (T->isVarArg())
704     ArgTypes.push_back(PrimitiveShadowPtrTy);
705   Type *RetType = T->getReturnType();
706   if (!RetType->isVoidTy())
707     RetType = StructType::get(RetType, PrimitiveShadowTy);
708   return FunctionType::get(RetType, ArgTypes, T->isVarArg());
709 }
710 
711 FunctionType *DataFlowSanitizer::getTrampolineFunctionType(FunctionType *T) {
712   assert(!T->isVarArg());
713   SmallVector<Type *, 4> ArgTypes;
714   ArgTypes.push_back(T->getPointerTo());
715   ArgTypes.append(T->param_begin(), T->param_end());
716   ArgTypes.append(T->getNumParams(), PrimitiveShadowTy);
717   Type *RetType = T->getReturnType();
718   if (!RetType->isVoidTy())
719     ArgTypes.push_back(PrimitiveShadowPtrTy);
720 
721   if (shouldTrackOrigins()) {
722     ArgTypes.append(T->getNumParams(), OriginTy);
723     if (!RetType->isVoidTy())
724       ArgTypes.push_back(OriginPtrTy);
725   }
726 
727   return FunctionType::get(T->getReturnType(), ArgTypes, false);
728 }
729 
730 TransformedFunction DataFlowSanitizer::getCustomFunctionType(FunctionType *T) {
731   SmallVector<Type *, 4> ArgTypes;
732 
733   // Some parameters of the custom function being constructed are
734   // parameters of T.  Record the mapping from parameters of T to
735   // parameters of the custom function, so that parameter attributes
736   // at call sites can be updated.
737   std::vector<unsigned> ArgumentIndexMapping;
738   for (unsigned I = 0, E = T->getNumParams(); I != E; ++I) {
739     Type *ParamType = T->getParamType(I);
740     FunctionType *FT;
741     if (isa<PointerType>(ParamType) &&
742         (FT = dyn_cast<FunctionType>(ParamType->getPointerElementType()))) {
743       ArgumentIndexMapping.push_back(ArgTypes.size());
744       ArgTypes.push_back(getTrampolineFunctionType(FT)->getPointerTo());
745       ArgTypes.push_back(Type::getInt8PtrTy(*Ctx));
746     } else {
747       ArgumentIndexMapping.push_back(ArgTypes.size());
748       ArgTypes.push_back(ParamType);
749     }
750   }
751   for (unsigned I = 0, E = T->getNumParams(); I != E; ++I)
752     ArgTypes.push_back(PrimitiveShadowTy);
753   if (T->isVarArg())
754     ArgTypes.push_back(PrimitiveShadowPtrTy);
755   Type *RetType = T->getReturnType();
756   if (!RetType->isVoidTy())
757     ArgTypes.push_back(PrimitiveShadowPtrTy);
758 
759   if (shouldTrackOrigins()) {
760     for (unsigned I = 0, E = T->getNumParams(); I != E; ++I)
761       ArgTypes.push_back(OriginTy);
762     if (T->isVarArg())
763       ArgTypes.push_back(OriginPtrTy);
764     if (!RetType->isVoidTy())
765       ArgTypes.push_back(OriginPtrTy);
766   }
767 
768   return TransformedFunction(
769       T, FunctionType::get(T->getReturnType(), ArgTypes, T->isVarArg()),
770       ArgumentIndexMapping);
771 }
772 
773 bool DataFlowSanitizer::isZeroShadow(Value *V) {
774   if (!shouldTrackFieldsAndIndices())
775     return ZeroPrimitiveShadow == V;
776 
777   Type *T = V->getType();
778   if (!isa<ArrayType>(T) && !isa<StructType>(T)) {
779     if (const ConstantInt *CI = dyn_cast<ConstantInt>(V))
780       return CI->isZero();
781     return false;
782   }
783 
784   return isa<ConstantAggregateZero>(V);
785 }
786 
787 bool DataFlowSanitizer::shouldTrackOrigins() {
788   static const bool ShouldTrackOrigins =
789       ClTrackOrigins && getInstrumentedABI() == DataFlowSanitizer::IA_TLS &&
790       ClFast16Labels;
791   return ShouldTrackOrigins;
792 }
793 
794 bool DataFlowSanitizer::shouldTrackFieldsAndIndices() {
795   return getInstrumentedABI() == DataFlowSanitizer::IA_TLS && ClFast16Labels;
796 }
797 
798 Constant *DataFlowSanitizer::getZeroShadow(Type *OrigTy) {
799   if (!shouldTrackFieldsAndIndices())
800     return ZeroPrimitiveShadow;
801 
802   if (!isa<ArrayType>(OrigTy) && !isa<StructType>(OrigTy))
803     return ZeroPrimitiveShadow;
804   Type *ShadowTy = getShadowTy(OrigTy);
805   return ConstantAggregateZero::get(ShadowTy);
806 }
807 
808 Constant *DataFlowSanitizer::getZeroShadow(Value *V) {
809   return getZeroShadow(V->getType());
810 }
811 
812 static Value *expandFromPrimitiveShadowRecursive(
813     Value *Shadow, SmallVector<unsigned, 4> &Indices, Type *SubShadowTy,
814     Value *PrimitiveShadow, IRBuilder<> &IRB) {
815   if (!isa<ArrayType>(SubShadowTy) && !isa<StructType>(SubShadowTy))
816     return IRB.CreateInsertValue(Shadow, PrimitiveShadow, Indices);
817 
818   if (ArrayType *AT = dyn_cast<ArrayType>(SubShadowTy)) {
819     for (unsigned Idx = 0; Idx < AT->getNumElements(); Idx++) {
820       Indices.push_back(Idx);
821       Shadow = expandFromPrimitiveShadowRecursive(
822           Shadow, Indices, AT->getElementType(), PrimitiveShadow, IRB);
823       Indices.pop_back();
824     }
825     return Shadow;
826   }
827 
828   if (StructType *ST = dyn_cast<StructType>(SubShadowTy)) {
829     for (unsigned Idx = 0; Idx < ST->getNumElements(); Idx++) {
830       Indices.push_back(Idx);
831       Shadow = expandFromPrimitiveShadowRecursive(
832           Shadow, Indices, ST->getElementType(Idx), PrimitiveShadow, IRB);
833       Indices.pop_back();
834     }
835     return Shadow;
836   }
837   llvm_unreachable("Unexpected shadow type");
838 }
839 
840 Value *DFSanFunction::expandFromPrimitiveShadow(Type *T, Value *PrimitiveShadow,
841                                                 Instruction *Pos) {
842   Type *ShadowTy = DFS.getShadowTy(T);
843 
844   if (!isa<ArrayType>(ShadowTy) && !isa<StructType>(ShadowTy))
845     return PrimitiveShadow;
846 
847   if (DFS.isZeroShadow(PrimitiveShadow))
848     return DFS.getZeroShadow(ShadowTy);
849 
850   IRBuilder<> IRB(Pos);
851   SmallVector<unsigned, 4> Indices;
852   Value *Shadow = UndefValue::get(ShadowTy);
853   Shadow = expandFromPrimitiveShadowRecursive(Shadow, Indices, ShadowTy,
854                                               PrimitiveShadow, IRB);
855 
856   // Caches the primitive shadow value that built the shadow value.
857   CachedCollapsedShadows[Shadow] = PrimitiveShadow;
858   return Shadow;
859 }
860 
861 template <class AggregateType>
862 Value *DFSanFunction::collapseAggregateShadow(AggregateType *AT, Value *Shadow,
863                                               IRBuilder<> &IRB) {
864   if (!AT->getNumElements())
865     return DFS.ZeroPrimitiveShadow;
866 
867   Value *FirstItem = IRB.CreateExtractValue(Shadow, 0);
868   Value *Aggregator = collapseToPrimitiveShadow(FirstItem, IRB);
869 
870   for (unsigned Idx = 1; Idx < AT->getNumElements(); Idx++) {
871     Value *ShadowItem = IRB.CreateExtractValue(Shadow, Idx);
872     Value *ShadowInner = collapseToPrimitiveShadow(ShadowItem, IRB);
873     Aggregator = IRB.CreateOr(Aggregator, ShadowInner);
874   }
875   return Aggregator;
876 }
877 
878 Value *DFSanFunction::collapseToPrimitiveShadow(Value *Shadow,
879                                                 IRBuilder<> &IRB) {
880   Type *ShadowTy = Shadow->getType();
881   if (!isa<ArrayType>(ShadowTy) && !isa<StructType>(ShadowTy))
882     return Shadow;
883   if (ArrayType *AT = dyn_cast<ArrayType>(ShadowTy))
884     return collapseAggregateShadow<>(AT, Shadow, IRB);
885   if (StructType *ST = dyn_cast<StructType>(ShadowTy))
886     return collapseAggregateShadow<>(ST, Shadow, IRB);
887   llvm_unreachable("Unexpected shadow type");
888 }
889 
890 Value *DFSanFunction::collapseToPrimitiveShadow(Value *Shadow,
891                                                 Instruction *Pos) {
892   Type *ShadowTy = Shadow->getType();
893   if (!isa<ArrayType>(ShadowTy) && !isa<StructType>(ShadowTy))
894     return Shadow;
895 
896   assert(DFS.shouldTrackFieldsAndIndices());
897 
898   // Checks if the cached collapsed shadow value dominates Pos.
899   Value *&CS = CachedCollapsedShadows[Shadow];
900   if (CS && DT.dominates(CS, Pos))
901     return CS;
902 
903   IRBuilder<> IRB(Pos);
904   Value *PrimitiveShadow = collapseToPrimitiveShadow(Shadow, IRB);
905   // Caches the converted primitive shadow value.
906   CS = PrimitiveShadow;
907   return PrimitiveShadow;
908 }
909 
910 Type *DataFlowSanitizer::getShadowTy(Type *OrigTy) {
911   if (!shouldTrackFieldsAndIndices())
912     return PrimitiveShadowTy;
913 
914   if (!OrigTy->isSized())
915     return PrimitiveShadowTy;
916   if (isa<IntegerType>(OrigTy))
917     return PrimitiveShadowTy;
918   if (isa<VectorType>(OrigTy))
919     return PrimitiveShadowTy;
920   if (ArrayType *AT = dyn_cast<ArrayType>(OrigTy))
921     return ArrayType::get(getShadowTy(AT->getElementType()),
922                           AT->getNumElements());
923   if (StructType *ST = dyn_cast<StructType>(OrigTy)) {
924     SmallVector<Type *, 4> Elements;
925     for (unsigned I = 0, N = ST->getNumElements(); I < N; ++I)
926       Elements.push_back(getShadowTy(ST->getElementType(I)));
927     return StructType::get(*Ctx, Elements);
928   }
929   return PrimitiveShadowTy;
930 }
931 
932 Type *DataFlowSanitizer::getShadowTy(Value *V) {
933   return getShadowTy(V->getType());
934 }
935 
936 bool DataFlowSanitizer::init(Module &M) {
937   Triple TargetTriple(M.getTargetTriple());
938   const DataLayout &DL = M.getDataLayout();
939 
940   Mod = &M;
941   Ctx = &M.getContext();
942   Int8Ptr = Type::getInt8PtrTy(*Ctx);
943   OriginTy = IntegerType::get(*Ctx, OriginWidthBits);
944   OriginPtrTy = PointerType::getUnqual(OriginTy);
945   PrimitiveShadowTy = IntegerType::get(*Ctx, ShadowWidthBits);
946   PrimitiveShadowPtrTy = PointerType::getUnqual(PrimitiveShadowTy);
947   IntptrTy = DL.getIntPtrType(*Ctx);
948   ZeroPrimitiveShadow = ConstantInt::getSigned(PrimitiveShadowTy, 0);
949   ShadowPtrMul = ConstantInt::getSigned(IntptrTy, ShadowWidthBytes);
950   OriginBase = ConstantInt::get(IntptrTy, 0x200000000000LL);
951   ZeroOrigin = ConstantInt::getSigned(OriginTy, 0);
952 
953   switch (TargetTriple.getArch()) {
954   case Triple::x86_64:
955     ShadowPtrMask = ConstantInt::getSigned(IntptrTy, ~0x700000000000LL);
956     break;
957   case Triple::mips64:
958   case Triple::mips64el:
959     ShadowPtrMask = ConstantInt::getSigned(IntptrTy, ~0xF000000000LL);
960     break;
961   case Triple::aarch64:
962   case Triple::aarch64_be:
963     // AArch64 supports multiple VMAs and the shadow mask is set at runtime.
964     DFSanRuntimeShadowMask = true;
965     break;
966   default:
967     report_fatal_error("unsupported triple");
968   }
969 
970   Type *DFSanUnionArgs[2] = {PrimitiveShadowTy, PrimitiveShadowTy};
971   DFSanUnionFnTy =
972       FunctionType::get(PrimitiveShadowTy, DFSanUnionArgs, /*isVarArg=*/false);
973   Type *DFSanUnionLoadArgs[2] = {PrimitiveShadowPtrTy, IntptrTy};
974   DFSanUnionLoadFnTy = FunctionType::get(PrimitiveShadowTy, DFSanUnionLoadArgs,
975                                          /*isVarArg=*/false);
976   Type *DFSanLoadLabelAndOriginArgs[2] = {Int8Ptr, IntptrTy};
977   DFSanLoadLabelAndOriginFnTy =
978       FunctionType::get(IntegerType::get(*Ctx, 64), DFSanLoadLabelAndOriginArgs,
979                         /*isVarArg=*/false);
980   DFSanUnimplementedFnTy = FunctionType::get(
981       Type::getVoidTy(*Ctx), Type::getInt8PtrTy(*Ctx), /*isVarArg=*/false);
982   Type *DFSanSetLabelArgs[4] = {PrimitiveShadowTy, OriginTy,
983                                 Type::getInt8PtrTy(*Ctx), IntptrTy};
984   DFSanSetLabelFnTy = FunctionType::get(Type::getVoidTy(*Ctx),
985                                         DFSanSetLabelArgs, /*isVarArg=*/false);
986   DFSanNonzeroLabelFnTy =
987       FunctionType::get(Type::getVoidTy(*Ctx), None, /*isVarArg=*/false);
988   DFSanVarargWrapperFnTy = FunctionType::get(
989       Type::getVoidTy(*Ctx), Type::getInt8PtrTy(*Ctx), /*isVarArg=*/false);
990   DFSanCmpCallbackFnTy =
991       FunctionType::get(Type::getVoidTy(*Ctx), PrimitiveShadowTy,
992                         /*isVarArg=*/false);
993   DFSanChainOriginFnTy =
994       FunctionType::get(OriginTy, OriginTy, /*isVarArg=*/false);
995   Type *DFSanMaybeStoreOriginArgs[4] = {IntegerType::get(*Ctx, ShadowWidthBits),
996                                         Int8Ptr, IntptrTy, OriginTy};
997   DFSanMaybeStoreOriginFnTy = FunctionType::get(
998       Type::getVoidTy(*Ctx), DFSanMaybeStoreOriginArgs, /*isVarArg=*/false);
999   Type *DFSanMemOriginTransferArgs[3] = {Int8Ptr, Int8Ptr, IntptrTy};
1000   DFSanMemOriginTransferFnTy = FunctionType::get(
1001       Type::getVoidTy(*Ctx), DFSanMemOriginTransferArgs, /*isVarArg=*/false);
1002   Type *DFSanLoadStoreCallbackArgs[2] = {PrimitiveShadowTy, Int8Ptr};
1003   DFSanLoadStoreCallbackFnTy =
1004       FunctionType::get(Type::getVoidTy(*Ctx), DFSanLoadStoreCallbackArgs,
1005                         /*isVarArg=*/false);
1006   Type *DFSanMemTransferCallbackArgs[2] = {PrimitiveShadowPtrTy, IntptrTy};
1007   DFSanMemTransferCallbackFnTy =
1008       FunctionType::get(Type::getVoidTy(*Ctx), DFSanMemTransferCallbackArgs,
1009                         /*isVarArg=*/false);
1010 
1011   ColdCallWeights = MDBuilder(*Ctx).createBranchWeights(1, 1000);
1012   return true;
1013 }
1014 
1015 bool DataFlowSanitizer::isInstrumented(const Function *F) {
1016   return !ABIList.isIn(*F, "uninstrumented");
1017 }
1018 
1019 bool DataFlowSanitizer::isInstrumented(const GlobalAlias *GA) {
1020   return !ABIList.isIn(*GA, "uninstrumented");
1021 }
1022 
1023 DataFlowSanitizer::InstrumentedABI DataFlowSanitizer::getInstrumentedABI() {
1024   return ClArgsABI ? IA_Args : IA_TLS;
1025 }
1026 
1027 DataFlowSanitizer::WrapperKind DataFlowSanitizer::getWrapperKind(Function *F) {
1028   if (ABIList.isIn(*F, "functional"))
1029     return WK_Functional;
1030   if (ABIList.isIn(*F, "discard"))
1031     return WK_Discard;
1032   if (ABIList.isIn(*F, "custom"))
1033     return WK_Custom;
1034 
1035   return WK_Warning;
1036 }
1037 
1038 void DataFlowSanitizer::addGlobalNamePrefix(GlobalValue *GV) {
1039   std::string GVName = std::string(GV->getName()), Prefix = "dfs$";
1040   GV->setName(Prefix + GVName);
1041 
1042   // Try to change the name of the function in module inline asm.  We only do
1043   // this for specific asm directives, currently only ".symver", to try to avoid
1044   // corrupting asm which happens to contain the symbol name as a substring.
1045   // Note that the substitution for .symver assumes that the versioned symbol
1046   // also has an instrumented name.
1047   std::string Asm = GV->getParent()->getModuleInlineAsm();
1048   std::string SearchStr = ".symver " + GVName + ",";
1049   size_t Pos = Asm.find(SearchStr);
1050   if (Pos != std::string::npos) {
1051     Asm.replace(Pos, SearchStr.size(),
1052                 ".symver " + Prefix + GVName + "," + Prefix);
1053     GV->getParent()->setModuleInlineAsm(Asm);
1054   }
1055 }
1056 
1057 Function *
1058 DataFlowSanitizer::buildWrapperFunction(Function *F, StringRef NewFName,
1059                                         GlobalValue::LinkageTypes NewFLink,
1060                                         FunctionType *NewFT) {
1061   FunctionType *FT = F->getFunctionType();
1062   Function *NewF = Function::Create(NewFT, NewFLink, F->getAddressSpace(),
1063                                     NewFName, F->getParent());
1064   NewF->copyAttributesFrom(F);
1065   NewF->removeAttributes(
1066       AttributeList::ReturnIndex,
1067       AttributeFuncs::typeIncompatible(NewFT->getReturnType()));
1068 
1069   BasicBlock *BB = BasicBlock::Create(*Ctx, "entry", NewF);
1070   if (F->isVarArg()) {
1071     NewF->removeAttributes(AttributeList::FunctionIndex,
1072                            AttrBuilder().addAttribute("split-stack"));
1073     CallInst::Create(DFSanVarargWrapperFn,
1074                      IRBuilder<>(BB).CreateGlobalStringPtr(F->getName()), "",
1075                      BB);
1076     new UnreachableInst(*Ctx, BB);
1077   } else {
1078     auto ArgIt = pointer_iterator<Argument *>(NewF->arg_begin());
1079     std::vector<Value *> Args(ArgIt, ArgIt + FT->getNumParams());
1080 
1081     CallInst *CI = CallInst::Create(F, Args, "", BB);
1082     if (FT->getReturnType()->isVoidTy())
1083       ReturnInst::Create(*Ctx, BB);
1084     else
1085       ReturnInst::Create(*Ctx, CI, BB);
1086   }
1087 
1088   return NewF;
1089 }
1090 
1091 Constant *DataFlowSanitizer::getOrBuildTrampolineFunction(FunctionType *FT,
1092                                                           StringRef FName) {
1093   FunctionType *FTT = getTrampolineFunctionType(FT);
1094   FunctionCallee C = Mod->getOrInsertFunction(FName, FTT);
1095   Function *F = dyn_cast<Function>(C.getCallee());
1096   if (F && F->isDeclaration()) {
1097     F->setLinkage(GlobalValue::LinkOnceODRLinkage);
1098     BasicBlock *BB = BasicBlock::Create(*Ctx, "entry", F);
1099     std::vector<Value *> Args;
1100     Function::arg_iterator AI = F->arg_begin() + 1;
1101     for (unsigned N = FT->getNumParams(); N != 0; ++AI, --N)
1102       Args.push_back(&*AI);
1103     CallInst *CI = CallInst::Create(FT, &*F->arg_begin(), Args, "", BB);
1104     Type *RetType = FT->getReturnType();
1105     ReturnInst *RI = RetType->isVoidTy() ? ReturnInst::Create(*Ctx, BB)
1106                                          : ReturnInst::Create(*Ctx, CI, BB);
1107 
1108     // F is called by a wrapped custom function with primitive shadows. So
1109     // its arguments and return value need conversion.
1110     DFSanFunction DFSF(*this, F, /*IsNativeABI=*/true);
1111     Function::arg_iterator ValAI = F->arg_begin(), ShadowAI = AI;
1112     ++ValAI;
1113     for (unsigned N = FT->getNumParams(); N != 0; ++ValAI, ++ShadowAI, --N) {
1114       Value *Shadow =
1115           DFSF.expandFromPrimitiveShadow(ValAI->getType(), &*ShadowAI, CI);
1116       DFSF.ValShadowMap[&*ValAI] = Shadow;
1117     }
1118     Function::arg_iterator RetShadowAI = ShadowAI;
1119     const bool ShouldTrackOrigins = shouldTrackOrigins();
1120     if (ShouldTrackOrigins) {
1121       ValAI = F->arg_begin();
1122       ++ValAI;
1123       Function::arg_iterator OriginAI = ShadowAI;
1124       if (!RetType->isVoidTy())
1125         ++OriginAI;
1126       for (unsigned N = FT->getNumParams(); N != 0; ++ValAI, ++OriginAI, --N) {
1127         DFSF.ValOriginMap[&*ValAI] = &*OriginAI;
1128       }
1129     }
1130     DFSanVisitor(DFSF).visitCallInst(*CI);
1131     if (!RetType->isVoidTy()) {
1132       Value *PrimitiveShadow = DFSF.collapseToPrimitiveShadow(
1133           DFSF.getShadow(RI->getReturnValue()), RI);
1134       new StoreInst(PrimitiveShadow, &*RetShadowAI, RI);
1135       if (ShouldTrackOrigins) {
1136         Value *Origin = DFSF.getOrigin(RI->getReturnValue());
1137         new StoreInst(Origin, &*std::prev(F->arg_end()), RI);
1138       }
1139     }
1140   }
1141 
1142   return cast<Constant>(C.getCallee());
1143 }
1144 
1145 // Initialize DataFlowSanitizer runtime functions and declare them in the module
1146 void DataFlowSanitizer::initializeRuntimeFunctions(Module &M) {
1147   {
1148     AttributeList AL;
1149     AL = AL.addAttribute(M.getContext(), AttributeList::FunctionIndex,
1150                          Attribute::NoUnwind);
1151     AL = AL.addAttribute(M.getContext(), AttributeList::FunctionIndex,
1152                          Attribute::ReadNone);
1153     AL = AL.addAttribute(M.getContext(), AttributeList::ReturnIndex,
1154                          Attribute::ZExt);
1155     AL = AL.addParamAttribute(M.getContext(), 0, Attribute::ZExt);
1156     AL = AL.addParamAttribute(M.getContext(), 1, Attribute::ZExt);
1157     DFSanUnionFn =
1158         Mod->getOrInsertFunction("__dfsan_union", DFSanUnionFnTy, AL);
1159   }
1160   {
1161     AttributeList AL;
1162     AL = AL.addAttribute(M.getContext(), AttributeList::FunctionIndex,
1163                          Attribute::NoUnwind);
1164     AL = AL.addAttribute(M.getContext(), AttributeList::FunctionIndex,
1165                          Attribute::ReadNone);
1166     AL = AL.addAttribute(M.getContext(), AttributeList::ReturnIndex,
1167                          Attribute::ZExt);
1168     AL = AL.addParamAttribute(M.getContext(), 0, Attribute::ZExt);
1169     AL = AL.addParamAttribute(M.getContext(), 1, Attribute::ZExt);
1170     DFSanCheckedUnionFn =
1171         Mod->getOrInsertFunction("dfsan_union", DFSanUnionFnTy, AL);
1172   }
1173   {
1174     AttributeList AL;
1175     AL = AL.addAttribute(M.getContext(), AttributeList::FunctionIndex,
1176                          Attribute::NoUnwind);
1177     AL = AL.addAttribute(M.getContext(), AttributeList::FunctionIndex,
1178                          Attribute::ReadOnly);
1179     AL = AL.addAttribute(M.getContext(), AttributeList::ReturnIndex,
1180                          Attribute::ZExt);
1181     DFSanUnionLoadFn =
1182         Mod->getOrInsertFunction("__dfsan_union_load", DFSanUnionLoadFnTy, AL);
1183   }
1184   {
1185     AttributeList AL;
1186     AL = AL.addAttribute(M.getContext(), AttributeList::FunctionIndex,
1187                          Attribute::NoUnwind);
1188     AL = AL.addAttribute(M.getContext(), AttributeList::FunctionIndex,
1189                          Attribute::ReadOnly);
1190     AL = AL.addAttribute(M.getContext(), AttributeList::ReturnIndex,
1191                          Attribute::ZExt);
1192     DFSanUnionLoadFast16LabelsFn = Mod->getOrInsertFunction(
1193         "__dfsan_union_load_fast16labels", DFSanUnionLoadFnTy, AL);
1194   }
1195   {
1196     AttributeList AL;
1197     AL = AL.addAttribute(M.getContext(), AttributeList::FunctionIndex,
1198                          Attribute::NoUnwind);
1199     AL = AL.addAttribute(M.getContext(), AttributeList::FunctionIndex,
1200                          Attribute::ReadOnly);
1201     AL = AL.addAttribute(M.getContext(), AttributeList::ReturnIndex,
1202                          Attribute::ZExt);
1203     DFSanLoadLabelAndOriginFn = Mod->getOrInsertFunction(
1204         "__dfsan_load_label_and_origin", DFSanLoadLabelAndOriginFnTy, AL);
1205   }
1206   DFSanUnimplementedFn =
1207       Mod->getOrInsertFunction("__dfsan_unimplemented", DFSanUnimplementedFnTy);
1208   {
1209     AttributeList AL;
1210     AL = AL.addParamAttribute(M.getContext(), 0, Attribute::ZExt);
1211     AL = AL.addParamAttribute(M.getContext(), 1, Attribute::ZExt);
1212     DFSanSetLabelFn =
1213         Mod->getOrInsertFunction("__dfsan_set_label", DFSanSetLabelFnTy, AL);
1214   }
1215   DFSanNonzeroLabelFn =
1216       Mod->getOrInsertFunction("__dfsan_nonzero_label", DFSanNonzeroLabelFnTy);
1217   DFSanVarargWrapperFn = Mod->getOrInsertFunction("__dfsan_vararg_wrapper",
1218                                                   DFSanVarargWrapperFnTy);
1219   {
1220     AttributeList AL;
1221     AL = AL.addParamAttribute(M.getContext(), 0, Attribute::ZExt);
1222     AL = AL.addAttribute(M.getContext(), AttributeList::ReturnIndex,
1223                          Attribute::ZExt);
1224     DFSanChainOriginFn = Mod->getOrInsertFunction("__dfsan_chain_origin",
1225                                                   DFSanChainOriginFnTy, AL);
1226   }
1227   DFSanMemOriginTransferFn = Mod->getOrInsertFunction(
1228       "__dfsan_mem_origin_transfer", DFSanMemOriginTransferFnTy);
1229 
1230   {
1231     AttributeList AL;
1232     AL = AL.addParamAttribute(M.getContext(), 0, Attribute::ZExt);
1233     AL = AL.addParamAttribute(M.getContext(), 3, Attribute::ZExt);
1234     DFSanMaybeStoreOriginFn = Mod->getOrInsertFunction(
1235         "__dfsan_maybe_store_origin", DFSanMaybeStoreOriginFnTy, AL);
1236   }
1237 
1238   DFSanRuntimeFunctions.insert(DFSanUnionFn.getCallee()->stripPointerCasts());
1239   DFSanRuntimeFunctions.insert(
1240       DFSanCheckedUnionFn.getCallee()->stripPointerCasts());
1241   DFSanRuntimeFunctions.insert(
1242       DFSanUnionLoadFn.getCallee()->stripPointerCasts());
1243   DFSanRuntimeFunctions.insert(
1244       DFSanUnionLoadFast16LabelsFn.getCallee()->stripPointerCasts());
1245   DFSanRuntimeFunctions.insert(
1246       DFSanLoadLabelAndOriginFn.getCallee()->stripPointerCasts());
1247   DFSanRuntimeFunctions.insert(
1248       DFSanUnimplementedFn.getCallee()->stripPointerCasts());
1249   DFSanRuntimeFunctions.insert(
1250       DFSanSetLabelFn.getCallee()->stripPointerCasts());
1251   DFSanRuntimeFunctions.insert(
1252       DFSanNonzeroLabelFn.getCallee()->stripPointerCasts());
1253   DFSanRuntimeFunctions.insert(
1254       DFSanVarargWrapperFn.getCallee()->stripPointerCasts());
1255   DFSanRuntimeFunctions.insert(
1256       DFSanLoadCallbackFn.getCallee()->stripPointerCasts());
1257   DFSanRuntimeFunctions.insert(
1258       DFSanStoreCallbackFn.getCallee()->stripPointerCasts());
1259   DFSanRuntimeFunctions.insert(
1260       DFSanMemTransferCallbackFn.getCallee()->stripPointerCasts());
1261   DFSanRuntimeFunctions.insert(
1262       DFSanCmpCallbackFn.getCallee()->stripPointerCasts());
1263   DFSanRuntimeFunctions.insert(
1264       DFSanChainOriginFn.getCallee()->stripPointerCasts());
1265   DFSanRuntimeFunctions.insert(
1266       DFSanMemOriginTransferFn.getCallee()->stripPointerCasts());
1267   DFSanRuntimeFunctions.insert(
1268       DFSanMaybeStoreOriginFn.getCallee()->stripPointerCasts());
1269 }
1270 
1271 // Initializes event callback functions and declare them in the module
1272 void DataFlowSanitizer::initializeCallbackFunctions(Module &M) {
1273   DFSanLoadCallbackFn = Mod->getOrInsertFunction("__dfsan_load_callback",
1274                                                  DFSanLoadStoreCallbackFnTy);
1275   DFSanStoreCallbackFn = Mod->getOrInsertFunction("__dfsan_store_callback",
1276                                                   DFSanLoadStoreCallbackFnTy);
1277   DFSanMemTransferCallbackFn = Mod->getOrInsertFunction(
1278       "__dfsan_mem_transfer_callback", DFSanMemTransferCallbackFnTy);
1279   DFSanCmpCallbackFn =
1280       Mod->getOrInsertFunction("__dfsan_cmp_callback", DFSanCmpCallbackFnTy);
1281 }
1282 
1283 void DataFlowSanitizer::injectMetadataGlobals(Module &M) {
1284   // These variables can be used:
1285   // - by the runtime (to discover what the shadow width was, during
1286   //   compilation)
1287   // - in testing (to avoid hardcoding the shadow width and type but instead
1288   //   extract them by pattern matching)
1289   Type *IntTy = Type::getInt32Ty(*Ctx);
1290   (void)Mod->getOrInsertGlobal("__dfsan_shadow_width_bits", IntTy, [&] {
1291     return new GlobalVariable(
1292         M, IntTy, /*isConstant=*/true, GlobalValue::WeakODRLinkage,
1293         ConstantInt::get(IntTy, ShadowWidthBits), "__dfsan_shadow_width_bits");
1294   });
1295   (void)Mod->getOrInsertGlobal("__dfsan_shadow_width_bytes", IntTy, [&] {
1296     return new GlobalVariable(M, IntTy, /*isConstant=*/true,
1297                               GlobalValue::WeakODRLinkage,
1298                               ConstantInt::get(IntTy, ShadowWidthBytes),
1299                               "__dfsan_shadow_width_bytes");
1300   });
1301 }
1302 
1303 bool DataFlowSanitizer::runImpl(Module &M) {
1304   init(M);
1305 
1306   if (ABIList.isIn(M, "skip"))
1307     return false;
1308 
1309   const unsigned InitialGlobalSize = M.global_size();
1310   const unsigned InitialModuleSize = M.size();
1311 
1312   bool Changed = false;
1313 
1314   auto GetOrInsertGlobal = [this, &Changed](StringRef Name,
1315                                             Type *Ty) -> Constant * {
1316     Constant *C = Mod->getOrInsertGlobal(Name, Ty);
1317     if (GlobalVariable *G = dyn_cast<GlobalVariable>(C)) {
1318       Changed |= G->getThreadLocalMode() != GlobalVariable::InitialExecTLSModel;
1319       G->setThreadLocalMode(GlobalVariable::InitialExecTLSModel);
1320     }
1321     return C;
1322   };
1323 
1324   // These globals must be kept in sync with the ones in dfsan.cpp.
1325   ArgTLS =
1326       GetOrInsertGlobal("__dfsan_arg_tls",
1327                         ArrayType::get(Type::getInt64Ty(*Ctx), ArgTLSSize / 8));
1328   RetvalTLS = GetOrInsertGlobal(
1329       "__dfsan_retval_tls",
1330       ArrayType::get(Type::getInt64Ty(*Ctx), RetvalTLSSize / 8));
1331   ArgOriginTLSTy = ArrayType::get(OriginTy, NumOfElementsInArgOrgTLS);
1332   ArgOriginTLS = GetOrInsertGlobal("__dfsan_arg_origin_tls", ArgOriginTLSTy);
1333   RetvalOriginTLS = GetOrInsertGlobal("__dfsan_retval_origin_tls", OriginTy);
1334 
1335   (void)Mod->getOrInsertGlobal("__dfsan_track_origins", OriginTy, [&] {
1336     Changed = true;
1337     return new GlobalVariable(
1338         M, OriginTy, true, GlobalValue::WeakODRLinkage,
1339         ConstantInt::getSigned(OriginTy, shouldTrackOrigins()),
1340         "__dfsan_track_origins");
1341   });
1342 
1343   injectMetadataGlobals(M);
1344 
1345   ExternalShadowMask =
1346       Mod->getOrInsertGlobal(DFSanExternShadowPtrMask, IntptrTy);
1347 
1348   initializeCallbackFunctions(M);
1349   initializeRuntimeFunctions(M);
1350 
1351   std::vector<Function *> FnsToInstrument;
1352   SmallPtrSet<Function *, 2> FnsWithNativeABI;
1353   for (Function &F : M)
1354     if (!F.isIntrinsic() && !DFSanRuntimeFunctions.contains(&F))
1355       FnsToInstrument.push_back(&F);
1356 
1357   // Give function aliases prefixes when necessary, and build wrappers where the
1358   // instrumentedness is inconsistent.
1359   for (Module::alias_iterator AI = M.alias_begin(), AE = M.alias_end();
1360        AI != AE;) {
1361     GlobalAlias *GA = &*AI;
1362     ++AI;
1363     // Don't stop on weak.  We assume people aren't playing games with the
1364     // instrumentedness of overridden weak aliases.
1365     auto *F = dyn_cast<Function>(GA->getBaseObject());
1366     if (!F)
1367       continue;
1368 
1369     bool GAInst = isInstrumented(GA), FInst = isInstrumented(F);
1370     if (GAInst && FInst) {
1371       addGlobalNamePrefix(GA);
1372     } else if (GAInst != FInst) {
1373       // Non-instrumented alias of an instrumented function, or vice versa.
1374       // Replace the alias with a native-ABI wrapper of the aliasee.  The pass
1375       // below will take care of instrumenting it.
1376       Function *NewF =
1377           buildWrapperFunction(F, "", GA->getLinkage(), F->getFunctionType());
1378       GA->replaceAllUsesWith(ConstantExpr::getBitCast(NewF, GA->getType()));
1379       NewF->takeName(GA);
1380       GA->eraseFromParent();
1381       FnsToInstrument.push_back(NewF);
1382     }
1383   }
1384 
1385   ReadOnlyNoneAttrs.addAttribute(Attribute::ReadOnly)
1386       .addAttribute(Attribute::ReadNone);
1387 
1388   // First, change the ABI of every function in the module.  ABI-listed
1389   // functions keep their original ABI and get a wrapper function.
1390   for (std::vector<Function *>::iterator FI = FnsToInstrument.begin(),
1391                                          FE = FnsToInstrument.end();
1392        FI != FE; ++FI) {
1393     Function &F = **FI;
1394     FunctionType *FT = F.getFunctionType();
1395 
1396     bool IsZeroArgsVoidRet = (FT->getNumParams() == 0 && !FT->isVarArg() &&
1397                               FT->getReturnType()->isVoidTy());
1398 
1399     if (isInstrumented(&F)) {
1400       // Instrumented functions get a 'dfs$' prefix.  This allows us to more
1401       // easily identify cases of mismatching ABIs.
1402       if (getInstrumentedABI() == IA_Args && !IsZeroArgsVoidRet) {
1403         FunctionType *NewFT = getArgsFunctionType(FT);
1404         Function *NewF = Function::Create(NewFT, F.getLinkage(),
1405                                           F.getAddressSpace(), "", &M);
1406         NewF->copyAttributesFrom(&F);
1407         NewF->removeAttributes(
1408             AttributeList::ReturnIndex,
1409             AttributeFuncs::typeIncompatible(NewFT->getReturnType()));
1410         for (Function::arg_iterator FArg = F.arg_begin(),
1411                                     NewFArg = NewF->arg_begin(),
1412                                     FArgEnd = F.arg_end();
1413              FArg != FArgEnd; ++FArg, ++NewFArg) {
1414           FArg->replaceAllUsesWith(&*NewFArg);
1415         }
1416         NewF->getBasicBlockList().splice(NewF->begin(), F.getBasicBlockList());
1417 
1418         for (Function::user_iterator UI = F.user_begin(), UE = F.user_end();
1419              UI != UE;) {
1420           BlockAddress *BA = dyn_cast<BlockAddress>(*UI);
1421           ++UI;
1422           if (BA) {
1423             BA->replaceAllUsesWith(
1424                 BlockAddress::get(NewF, BA->getBasicBlock()));
1425             delete BA;
1426           }
1427         }
1428         F.replaceAllUsesWith(
1429             ConstantExpr::getBitCast(NewF, PointerType::getUnqual(FT)));
1430         NewF->takeName(&F);
1431         F.eraseFromParent();
1432         *FI = NewF;
1433         addGlobalNamePrefix(NewF);
1434       } else {
1435         addGlobalNamePrefix(&F);
1436       }
1437     } else if (!IsZeroArgsVoidRet || getWrapperKind(&F) == WK_Custom) {
1438       // Build a wrapper function for F.  The wrapper simply calls F, and is
1439       // added to FnsToInstrument so that any instrumentation according to its
1440       // WrapperKind is done in the second pass below.
1441       FunctionType *NewFT =
1442           getInstrumentedABI() == IA_Args ? getArgsFunctionType(FT) : FT;
1443 
1444       // If the function being wrapped has local linkage, then preserve the
1445       // function's linkage in the wrapper function.
1446       GlobalValue::LinkageTypes WrapperLinkage =
1447           F.hasLocalLinkage() ? F.getLinkage()
1448                               : GlobalValue::LinkOnceODRLinkage;
1449 
1450       Function *NewF = buildWrapperFunction(
1451           &F,
1452           (shouldTrackOrigins() ? std::string("dfso$") : std::string("dfsw$")) +
1453               std::string(F.getName()),
1454           WrapperLinkage, NewFT);
1455       if (getInstrumentedABI() == IA_TLS)
1456         NewF->removeAttributes(AttributeList::FunctionIndex, ReadOnlyNoneAttrs);
1457 
1458       Value *WrappedFnCst =
1459           ConstantExpr::getBitCast(NewF, PointerType::getUnqual(FT));
1460       F.replaceAllUsesWith(WrappedFnCst);
1461 
1462       UnwrappedFnMap[WrappedFnCst] = &F;
1463       *FI = NewF;
1464 
1465       if (!F.isDeclaration()) {
1466         // This function is probably defining an interposition of an
1467         // uninstrumented function and hence needs to keep the original ABI.
1468         // But any functions it may call need to use the instrumented ABI, so
1469         // we instrument it in a mode which preserves the original ABI.
1470         FnsWithNativeABI.insert(&F);
1471 
1472         // This code needs to rebuild the iterators, as they may be invalidated
1473         // by the push_back, taking care that the new range does not include
1474         // any functions added by this code.
1475         size_t N = FI - FnsToInstrument.begin(),
1476                Count = FE - FnsToInstrument.begin();
1477         FnsToInstrument.push_back(&F);
1478         FI = FnsToInstrument.begin() + N;
1479         FE = FnsToInstrument.begin() + Count;
1480       }
1481       // Hopefully, nobody will try to indirectly call a vararg
1482       // function... yet.
1483     } else if (FT->isVarArg()) {
1484       UnwrappedFnMap[&F] = &F;
1485       *FI = nullptr;
1486     }
1487   }
1488 
1489   for (Function *F : FnsToInstrument) {
1490     if (!F || F->isDeclaration())
1491       continue;
1492 
1493     removeUnreachableBlocks(*F);
1494 
1495     DFSanFunction DFSF(*this, F, FnsWithNativeABI.count(F));
1496 
1497     // DFSanVisitor may create new basic blocks, which confuses df_iterator.
1498     // Build a copy of the list before iterating over it.
1499     SmallVector<BasicBlock *, 4> BBList(depth_first(&F->getEntryBlock()));
1500 
1501     for (BasicBlock *BB : BBList) {
1502       Instruction *Inst = &BB->front();
1503       while (true) {
1504         // DFSanVisitor may split the current basic block, changing the current
1505         // instruction's next pointer and moving the next instruction to the
1506         // tail block from which we should continue.
1507         Instruction *Next = Inst->getNextNode();
1508         // DFSanVisitor may delete Inst, so keep track of whether it was a
1509         // terminator.
1510         bool IsTerminator = Inst->isTerminator();
1511         if (!DFSF.SkipInsts.count(Inst))
1512           DFSanVisitor(DFSF).visit(Inst);
1513         if (IsTerminator)
1514           break;
1515         Inst = Next;
1516       }
1517     }
1518 
1519     // We will not necessarily be able to compute the shadow for every phi node
1520     // until we have visited every block.  Therefore, the code that handles phi
1521     // nodes adds them to the PHIFixups list so that they can be properly
1522     // handled here.
1523     for (auto PHIFixup : DFSF.PHIFixups) {
1524       PHINode *PN, *ShadowPN;
1525       std::tie(PN, ShadowPN) = PHIFixup;
1526       for (unsigned Val = 0, N = PN->getNumIncomingValues(); Val < N; ++Val) {
1527         ShadowPN->setIncomingValue(Val,
1528                                    DFSF.getShadow(PN->getIncomingValue(Val)));
1529       }
1530     }
1531 
1532     // -dfsan-debug-nonzero-labels will split the CFG in all kinds of crazy
1533     // places (i.e. instructions in basic blocks we haven't even begun visiting
1534     // yet).  To make our life easier, do this work in a pass after the main
1535     // instrumentation.
1536     if (ClDebugNonzeroLabels) {
1537       for (Value *V : DFSF.NonZeroChecks) {
1538         Instruction *Pos;
1539         if (Instruction *I = dyn_cast<Instruction>(V))
1540           Pos = I->getNextNode();
1541         else
1542           Pos = &DFSF.F->getEntryBlock().front();
1543         while (isa<PHINode>(Pos) || isa<AllocaInst>(Pos))
1544           Pos = Pos->getNextNode();
1545         IRBuilder<> IRB(Pos);
1546         Value *PrimitiveShadow = DFSF.collapseToPrimitiveShadow(V, Pos);
1547         Value *Ne =
1548             IRB.CreateICmpNE(PrimitiveShadow, DFSF.DFS.ZeroPrimitiveShadow);
1549         BranchInst *BI = cast<BranchInst>(SplitBlockAndInsertIfThen(
1550             Ne, Pos, /*Unreachable=*/false, ColdCallWeights));
1551         IRBuilder<> ThenIRB(BI);
1552         ThenIRB.CreateCall(DFSF.DFS.DFSanNonzeroLabelFn, {});
1553       }
1554     }
1555   }
1556 
1557   return Changed || !FnsToInstrument.empty() ||
1558          M.global_size() != InitialGlobalSize || M.size() != InitialModuleSize;
1559 }
1560 
1561 Value *DFSanFunction::getArgTLS(Type *T, unsigned ArgOffset, IRBuilder<> &IRB) {
1562   Value *Base = IRB.CreatePointerCast(DFS.ArgTLS, DFS.IntptrTy);
1563   if (ArgOffset)
1564     Base = IRB.CreateAdd(Base, ConstantInt::get(DFS.IntptrTy, ArgOffset));
1565   return IRB.CreateIntToPtr(Base, PointerType::get(DFS.getShadowTy(T), 0),
1566                             "_dfsarg");
1567 }
1568 
1569 Value *DFSanFunction::getRetvalTLS(Type *T, IRBuilder<> &IRB) {
1570   return IRB.CreatePointerCast(
1571       DFS.RetvalTLS, PointerType::get(DFS.getShadowTy(T), 0), "_dfsret");
1572 }
1573 
1574 Value *DFSanFunction::getRetvalOriginTLS() { return DFS.RetvalOriginTLS; }
1575 
1576 Value *DFSanFunction::getArgOriginTLS(unsigned ArgNo, IRBuilder<> &IRB) {
1577   return IRB.CreateConstGEP2_64(DFS.ArgOriginTLSTy, DFS.ArgOriginTLS, 0, ArgNo,
1578                                 "_dfsarg_o");
1579 }
1580 
1581 Value *DFSanFunction::getOrigin(Value *V) {
1582   assert(DFS.shouldTrackOrigins());
1583   if (!isa<Argument>(V) && !isa<Instruction>(V))
1584     return DFS.ZeroOrigin;
1585   Value *&Origin = ValOriginMap[V];
1586   if (!Origin) {
1587     if (Argument *A = dyn_cast<Argument>(V)) {
1588       if (IsNativeABI)
1589         return DFS.ZeroOrigin;
1590       switch (IA) {
1591       case DataFlowSanitizer::IA_TLS: {
1592         if (A->getArgNo() < DFS.NumOfElementsInArgOrgTLS) {
1593           Instruction *ArgOriginTLSPos = &*F->getEntryBlock().begin();
1594           IRBuilder<> IRB(ArgOriginTLSPos);
1595           Value *ArgOriginPtr = getArgOriginTLS(A->getArgNo(), IRB);
1596           Origin = IRB.CreateLoad(DFS.OriginTy, ArgOriginPtr);
1597         } else {
1598           // Overflow
1599           Origin = DFS.ZeroOrigin;
1600         }
1601         break;
1602       }
1603       case DataFlowSanitizer::IA_Args: {
1604         Origin = DFS.ZeroOrigin;
1605         break;
1606       }
1607       }
1608     } else {
1609       Origin = DFS.ZeroOrigin;
1610     }
1611   }
1612   return Origin;
1613 }
1614 
1615 void DFSanFunction::setOrigin(Instruction *I, Value *Origin) {
1616   if (!DFS.shouldTrackOrigins())
1617     return;
1618   assert(!ValOriginMap.count(I));
1619   assert(Origin->getType() == DFS.OriginTy);
1620   ValOriginMap[I] = Origin;
1621 }
1622 
1623 Value *DFSanFunction::getShadowForTLSArgument(Argument *A) {
1624   unsigned ArgOffset = 0;
1625   const DataLayout &DL = F->getParent()->getDataLayout();
1626   for (auto &FArg : F->args()) {
1627     if (!FArg.getType()->isSized()) {
1628       if (A == &FArg)
1629         break;
1630       continue;
1631     }
1632 
1633     unsigned Size = DL.getTypeAllocSize(DFS.getShadowTy(&FArg));
1634     if (A != &FArg) {
1635       ArgOffset += alignTo(Size, ShadowTLSAlignment);
1636       if (ArgOffset > ArgTLSSize)
1637         break; // ArgTLS overflows, uses a zero shadow.
1638       continue;
1639     }
1640 
1641     if (ArgOffset + Size > ArgTLSSize)
1642       break; // ArgTLS overflows, uses a zero shadow.
1643 
1644     Instruction *ArgTLSPos = &*F->getEntryBlock().begin();
1645     IRBuilder<> IRB(ArgTLSPos);
1646     Value *ArgShadowPtr = getArgTLS(FArg.getType(), ArgOffset, IRB);
1647     return IRB.CreateAlignedLoad(DFS.getShadowTy(&FArg), ArgShadowPtr,
1648                                  ShadowTLSAlignment);
1649   }
1650 
1651   return DFS.getZeroShadow(A);
1652 }
1653 
1654 Value *DFSanFunction::getShadow(Value *V) {
1655   if (!isa<Argument>(V) && !isa<Instruction>(V))
1656     return DFS.getZeroShadow(V);
1657   Value *&Shadow = ValShadowMap[V];
1658   if (!Shadow) {
1659     if (Argument *A = dyn_cast<Argument>(V)) {
1660       if (IsNativeABI)
1661         return DFS.getZeroShadow(V);
1662       switch (IA) {
1663       case DataFlowSanitizer::IA_TLS: {
1664         Shadow = getShadowForTLSArgument(A);
1665         break;
1666       }
1667       case DataFlowSanitizer::IA_Args: {
1668         unsigned ArgIdx = A->getArgNo() + F->arg_size() / 2;
1669         Function::arg_iterator Arg = F->arg_begin();
1670         std::advance(Arg, ArgIdx);
1671         Shadow = &*Arg;
1672         assert(Shadow->getType() == DFS.PrimitiveShadowTy);
1673         break;
1674       }
1675       }
1676       NonZeroChecks.push_back(Shadow);
1677     } else {
1678       Shadow = DFS.getZeroShadow(V);
1679     }
1680   }
1681   return Shadow;
1682 }
1683 
1684 void DFSanFunction::setShadow(Instruction *I, Value *Shadow) {
1685   assert(!ValShadowMap.count(I));
1686   assert(DFS.shouldTrackFieldsAndIndices() ||
1687          Shadow->getType() == DFS.PrimitiveShadowTy);
1688   ValShadowMap[I] = Shadow;
1689 }
1690 
1691 Value *DataFlowSanitizer::getShadowOffset(Value *Addr, IRBuilder<> &IRB) {
1692   // Returns Addr & shadow_mask
1693   assert(Addr != RetvalTLS && "Reinstrumenting?");
1694   Value *ShadowPtrMaskValue;
1695   if (DFSanRuntimeShadowMask)
1696     ShadowPtrMaskValue = IRB.CreateLoad(IntptrTy, ExternalShadowMask);
1697   else
1698     ShadowPtrMaskValue = ShadowPtrMask;
1699   return IRB.CreateAnd(IRB.CreatePtrToInt(Addr, IntptrTy),
1700                        IRB.CreatePtrToInt(ShadowPtrMaskValue, IntptrTy));
1701 }
1702 
1703 std::pair<Value *, Value *>
1704 DataFlowSanitizer::getShadowOriginAddress(Value *Addr, Align InstAlignment,
1705                                           Instruction *Pos) {
1706   // Returns ((Addr & shadow_mask) + origin_base) & ~4UL
1707   IRBuilder<> IRB(Pos);
1708   Value *ShadowOffset = getShadowOffset(Addr, IRB);
1709   Value *ShadowPtr = IRB.CreateIntToPtr(
1710       IRB.CreateMul(ShadowOffset, ShadowPtrMul), PrimitiveShadowPtrTy);
1711   Value *OriginPtr = nullptr;
1712   if (shouldTrackOrigins()) {
1713     Value *OriginLong = IRB.CreateAdd(ShadowOffset, OriginBase);
1714     const Align Alignment = llvm::assumeAligned(InstAlignment.value());
1715     // When alignment is >= 4, Addr must be aligned to 4, otherwise it is UB.
1716     // So Mask is unnecessary.
1717     if (Alignment < MinOriginAlignment) {
1718       uint64_t Mask = MinOriginAlignment.value() - 1;
1719       OriginLong = IRB.CreateAnd(OriginLong, ConstantInt::get(IntptrTy, ~Mask));
1720     }
1721     OriginPtr = IRB.CreateIntToPtr(OriginLong, OriginPtrTy);
1722   }
1723   return {ShadowPtr, OriginPtr};
1724 }
1725 
1726 Value *DataFlowSanitizer::getShadowAddress(Value *Addr, Instruction *Pos) {
1727   // Returns (Addr & shadow_mask) x 2
1728   IRBuilder<> IRB(Pos);
1729   Value *ShadowOffset = getShadowOffset(Addr, IRB);
1730   return IRB.CreateIntToPtr(IRB.CreateMul(ShadowOffset, ShadowPtrMul),
1731                             PrimitiveShadowPtrTy);
1732 }
1733 
1734 Value *DFSanFunction::combineShadowsThenConvert(Type *T, Value *V1, Value *V2,
1735                                                 Instruction *Pos) {
1736   Value *PrimitiveValue = combineShadows(V1, V2, Pos);
1737   return expandFromPrimitiveShadow(T, PrimitiveValue, Pos);
1738 }
1739 
1740 // Generates IR to compute the union of the two given shadows, inserting it
1741 // before Pos. The combined value is with primitive type.
1742 Value *DFSanFunction::combineShadows(Value *V1, Value *V2, Instruction *Pos) {
1743   if (DFS.isZeroShadow(V1))
1744     return collapseToPrimitiveShadow(V2, Pos);
1745   if (DFS.isZeroShadow(V2))
1746     return collapseToPrimitiveShadow(V1, Pos);
1747   if (V1 == V2)
1748     return collapseToPrimitiveShadow(V1, Pos);
1749 
1750   auto V1Elems = ShadowElements.find(V1);
1751   auto V2Elems = ShadowElements.find(V2);
1752   if (V1Elems != ShadowElements.end() && V2Elems != ShadowElements.end()) {
1753     if (std::includes(V1Elems->second.begin(), V1Elems->second.end(),
1754                       V2Elems->second.begin(), V2Elems->second.end())) {
1755       return collapseToPrimitiveShadow(V1, Pos);
1756     }
1757     if (std::includes(V2Elems->second.begin(), V2Elems->second.end(),
1758                       V1Elems->second.begin(), V1Elems->second.end())) {
1759       return collapseToPrimitiveShadow(V2, Pos);
1760     }
1761   } else if (V1Elems != ShadowElements.end()) {
1762     if (V1Elems->second.count(V2))
1763       return collapseToPrimitiveShadow(V1, Pos);
1764   } else if (V2Elems != ShadowElements.end()) {
1765     if (V2Elems->second.count(V1))
1766       return collapseToPrimitiveShadow(V2, Pos);
1767   }
1768 
1769   auto Key = std::make_pair(V1, V2);
1770   if (V1 > V2)
1771     std::swap(Key.first, Key.second);
1772   CachedShadow &CCS = CachedShadows[Key];
1773   if (CCS.Block && DT.dominates(CCS.Block, Pos->getParent()))
1774     return CCS.Shadow;
1775 
1776   // Converts inputs shadows to shadows with primitive types.
1777   Value *PV1 = collapseToPrimitiveShadow(V1, Pos);
1778   Value *PV2 = collapseToPrimitiveShadow(V2, Pos);
1779 
1780   IRBuilder<> IRB(Pos);
1781   if (ClFast16Labels) {
1782     CCS.Block = Pos->getParent();
1783     CCS.Shadow = IRB.CreateOr(PV1, PV2);
1784   } else if (AvoidNewBlocks) {
1785     CallInst *Call = IRB.CreateCall(DFS.DFSanCheckedUnionFn, {PV1, PV2});
1786     Call->addAttribute(AttributeList::ReturnIndex, Attribute::ZExt);
1787     Call->addParamAttr(0, Attribute::ZExt);
1788     Call->addParamAttr(1, Attribute::ZExt);
1789 
1790     CCS.Block = Pos->getParent();
1791     CCS.Shadow = Call;
1792   } else {
1793     BasicBlock *Head = Pos->getParent();
1794     Value *Ne = IRB.CreateICmpNE(PV1, PV2);
1795     BranchInst *BI = cast<BranchInst>(SplitBlockAndInsertIfThen(
1796         Ne, Pos, /*Unreachable=*/false, DFS.ColdCallWeights, &DT));
1797     IRBuilder<> ThenIRB(BI);
1798     CallInst *Call = ThenIRB.CreateCall(DFS.DFSanUnionFn, {PV1, PV2});
1799     Call->addAttribute(AttributeList::ReturnIndex, Attribute::ZExt);
1800     Call->addParamAttr(0, Attribute::ZExt);
1801     Call->addParamAttr(1, Attribute::ZExt);
1802 
1803     BasicBlock *Tail = BI->getSuccessor(0);
1804     PHINode *Phi =
1805         PHINode::Create(DFS.PrimitiveShadowTy, 2, "", &Tail->front());
1806     Phi->addIncoming(Call, Call->getParent());
1807     Phi->addIncoming(PV1, Head);
1808 
1809     CCS.Block = Tail;
1810     CCS.Shadow = Phi;
1811   }
1812 
1813   std::set<Value *> UnionElems;
1814   if (V1Elems != ShadowElements.end()) {
1815     UnionElems = V1Elems->second;
1816   } else {
1817     UnionElems.insert(V1);
1818   }
1819   if (V2Elems != ShadowElements.end()) {
1820     UnionElems.insert(V2Elems->second.begin(), V2Elems->second.end());
1821   } else {
1822     UnionElems.insert(V2);
1823   }
1824   ShadowElements[CCS.Shadow] = std::move(UnionElems);
1825 
1826   return CCS.Shadow;
1827 }
1828 
1829 // A convenience function which folds the shadows of each of the operands
1830 // of the provided instruction Inst, inserting the IR before Inst.  Returns
1831 // the computed union Value.
1832 Value *DFSanFunction::combineOperandShadows(Instruction *Inst) {
1833   if (Inst->getNumOperands() == 0)
1834     return DFS.getZeroShadow(Inst);
1835 
1836   Value *Shadow = getShadow(Inst->getOperand(0));
1837   for (unsigned I = 1, N = Inst->getNumOperands(); I < N; ++I)
1838     Shadow = combineShadows(Shadow, getShadow(Inst->getOperand(I)), Inst);
1839 
1840   return expandFromPrimitiveShadow(Inst->getType(), Shadow, Inst);
1841 }
1842 
1843 void DFSanVisitor::visitInstOperands(Instruction &I) {
1844   Value *CombinedShadow = DFSF.combineOperandShadows(&I);
1845   DFSF.setShadow(&I, CombinedShadow);
1846   visitInstOperandOrigins(I);
1847 }
1848 
1849 Value *DFSanFunction::combineOrigins(const std::vector<Value *> &Shadows,
1850                                      const std::vector<Value *> &Origins,
1851                                      Instruction *Pos, ConstantInt *Zero) {
1852   assert(Shadows.size() == Origins.size());
1853   size_t Size = Origins.size();
1854   if (Size == 0)
1855     return DFS.ZeroOrigin;
1856   Value *Origin = nullptr;
1857   if (!Zero)
1858     Zero = DFS.ZeroPrimitiveShadow;
1859   for (size_t I = 0; I != Size; ++I) {
1860     Value *OpOrigin = Origins[I];
1861     Constant *ConstOpOrigin = dyn_cast<Constant>(OpOrigin);
1862     if (ConstOpOrigin && ConstOpOrigin->isNullValue())
1863       continue;
1864     if (!Origin) {
1865       Origin = OpOrigin;
1866       continue;
1867     }
1868     Value *OpShadow = Shadows[I];
1869     Value *PrimitiveShadow = collapseToPrimitiveShadow(OpShadow, Pos);
1870     IRBuilder<> IRB(Pos);
1871     Value *Cond = IRB.CreateICmpNE(PrimitiveShadow, Zero);
1872     Origin = IRB.CreateSelect(Cond, OpOrigin, Origin);
1873   }
1874   return Origin ? Origin : DFS.ZeroOrigin;
1875 }
1876 
1877 Value *DFSanFunction::combineOperandOrigins(Instruction *Inst) {
1878   size_t Size = Inst->getNumOperands();
1879   std::vector<Value *> Shadows(Size);
1880   std::vector<Value *> Origins(Size);
1881   for (unsigned I = 0; I != Size; ++I) {
1882     Shadows[I] = getShadow(Inst->getOperand(I));
1883     Origins[I] = getOrigin(Inst->getOperand(I));
1884   }
1885   return combineOrigins(Shadows, Origins, Inst);
1886 }
1887 
1888 void DFSanVisitor::visitInstOperandOrigins(Instruction &I) {
1889   if (!DFSF.DFS.shouldTrackOrigins())
1890     return;
1891   Value *CombinedOrigin = DFSF.combineOperandOrigins(&I);
1892   DFSF.setOrigin(&I, CombinedOrigin);
1893 }
1894 
1895 Align DFSanFunction::getShadowAlign(Align InstAlignment) {
1896   const Align Alignment = ClPreserveAlignment ? InstAlignment : Align(1);
1897   return Align(Alignment.value() * DFS.ShadowWidthBytes);
1898 }
1899 
1900 Align DFSanFunction::getOriginAlign(Align InstAlignment) {
1901   const Align Alignment = llvm::assumeAligned(InstAlignment.value());
1902   return Align(std::max(MinOriginAlignment, Alignment));
1903 }
1904 
1905 bool DFSanFunction::useCallbackLoadLabelAndOrigin(uint64_t Size,
1906                                                   Align InstAlignment) {
1907   assert(Size != 0);
1908   // * if Size == 1, it is sufficient to load its origin aligned at 4.
1909   // * if Size == 2, we assume most cases Addr % 2 == 0, so it is sufficient to
1910   //   load its origin aligned at 4. If not, although origins may be lost, it
1911   //   should not happen very often.
1912   // * if align >= 4, Addr must be aligned to 4, otherwise it is UB. When
1913   //   Size % 4 == 0, it is more efficient to load origins without callbacks.
1914   // * Otherwise we use __dfsan_load_label_and_origin.
1915   // This should ensure that common cases run efficiently.
1916   if (Size <= 2)
1917     return false;
1918 
1919   const Align Alignment = llvm::assumeAligned(InstAlignment.value());
1920   if (Alignment >= MinOriginAlignment &&
1921       Size % (64 / DFS.ShadowWidthBits) == 0)
1922     return false;
1923 
1924   return true;
1925 }
1926 
1927 std::pair<Value *, Value *> DFSanFunction::loadFast16ShadowFast(
1928     Value *ShadowAddr, Value *OriginAddr, uint64_t Size, Align ShadowAlign,
1929     Align OriginAlign, Value *FirstOrigin, Instruction *Pos) {
1930   // First OR all the WideShadows, then OR individual shadows within the
1931   // combined WideShadow. This is fewer instructions than ORing shadows
1932   // individually.
1933   const bool ShouldTrackOrigins = DFS.shouldTrackOrigins();
1934   std::vector<Value *> Shadows;
1935   std::vector<Value *> Origins;
1936   IRBuilder<> IRB(Pos);
1937   Value *WideAddr =
1938       IRB.CreateBitCast(ShadowAddr, Type::getInt64PtrTy(*DFS.Ctx));
1939   Value *CombinedWideShadow =
1940       IRB.CreateAlignedLoad(IRB.getInt64Ty(), WideAddr, ShadowAlign);
1941   if (ShouldTrackOrigins) {
1942     Shadows.push_back(CombinedWideShadow);
1943     Origins.push_back(FirstOrigin);
1944   }
1945   for (uint64_t Ofs = 64 / DFS.ShadowWidthBits; Ofs != Size;
1946        Ofs += 64 / DFS.ShadowWidthBits) {
1947     WideAddr = IRB.CreateGEP(Type::getInt64Ty(*DFS.Ctx), WideAddr,
1948                              ConstantInt::get(DFS.IntptrTy, 1));
1949     Value *NextWideShadow =
1950         IRB.CreateAlignedLoad(IRB.getInt64Ty(), WideAddr, ShadowAlign);
1951     CombinedWideShadow = IRB.CreateOr(CombinedWideShadow, NextWideShadow);
1952     if (ShouldTrackOrigins) {
1953       Shadows.push_back(NextWideShadow);
1954       OriginAddr = IRB.CreateGEP(DFS.OriginTy, OriginAddr,
1955                                  ConstantInt::get(DFS.IntptrTy, 1));
1956       Origins.push_back(
1957           IRB.CreateAlignedLoad(DFS.OriginTy, OriginAddr, OriginAlign));
1958     }
1959   }
1960   for (unsigned Width = 32; Width >= DFS.ShadowWidthBits; Width >>= 1) {
1961     Value *ShrShadow = IRB.CreateLShr(CombinedWideShadow, Width);
1962     CombinedWideShadow = IRB.CreateOr(CombinedWideShadow, ShrShadow);
1963   }
1964   return {IRB.CreateTrunc(CombinedWideShadow, DFS.PrimitiveShadowTy),
1965           ShouldTrackOrigins
1966               ? combineOrigins(Shadows, Origins, Pos,
1967                                ConstantInt::getSigned(IRB.getInt64Ty(), 0))
1968               : DFS.ZeroOrigin};
1969 }
1970 
1971 Value *DFSanFunction::loadLegacyShadowFast(Value *ShadowAddr, uint64_t Size,
1972                                            Align ShadowAlign,
1973                                            Instruction *Pos) {
1974   // Fast path for the common case where each byte has identical shadow: load
1975   // shadow 64 bits at a time, fall out to a __dfsan_union_load call if any
1976   // shadow is non-equal.
1977   BasicBlock *FallbackBB = BasicBlock::Create(*DFS.Ctx, "", F);
1978   IRBuilder<> FallbackIRB(FallbackBB);
1979   CallInst *FallbackCall = FallbackIRB.CreateCall(
1980       DFS.DFSanUnionLoadFn, {ShadowAddr, ConstantInt::get(DFS.IntptrTy, Size)});
1981   FallbackCall->addAttribute(AttributeList::ReturnIndex, Attribute::ZExt);
1982 
1983   // Compare each of the shadows stored in the loaded 64 bits to each other,
1984   // by computing (WideShadow rotl ShadowWidthBits) == WideShadow.
1985   IRBuilder<> IRB(Pos);
1986   Value *WideAddr =
1987       IRB.CreateBitCast(ShadowAddr, Type::getInt64PtrTy(*DFS.Ctx));
1988   Value *WideShadow =
1989       IRB.CreateAlignedLoad(IRB.getInt64Ty(), WideAddr, ShadowAlign);
1990   Value *TruncShadow = IRB.CreateTrunc(WideShadow, DFS.PrimitiveShadowTy);
1991   Value *ShlShadow = IRB.CreateShl(WideShadow, DFS.ShadowWidthBits);
1992   Value *ShrShadow = IRB.CreateLShr(WideShadow, 64 - DFS.ShadowWidthBits);
1993   Value *RotShadow = IRB.CreateOr(ShlShadow, ShrShadow);
1994   Value *ShadowsEq = IRB.CreateICmpEQ(WideShadow, RotShadow);
1995 
1996   BasicBlock *Head = Pos->getParent();
1997   BasicBlock *Tail = Head->splitBasicBlock(Pos->getIterator());
1998 
1999   if (DomTreeNode *OldNode = DT.getNode(Head)) {
2000     std::vector<DomTreeNode *> Children(OldNode->begin(), OldNode->end());
2001 
2002     DomTreeNode *NewNode = DT.addNewBlock(Tail, Head);
2003     for (auto *Child : Children)
2004       DT.changeImmediateDominator(Child, NewNode);
2005   }
2006 
2007   // In the following code LastBr will refer to the previous basic block's
2008   // conditional branch instruction, whose true successor is fixed up to point
2009   // to the next block during the loop below or to the tail after the final
2010   // iteration.
2011   BranchInst *LastBr = BranchInst::Create(FallbackBB, FallbackBB, ShadowsEq);
2012   ReplaceInstWithInst(Head->getTerminator(), LastBr);
2013   DT.addNewBlock(FallbackBB, Head);
2014 
2015   for (uint64_t Ofs = 64 / DFS.ShadowWidthBits; Ofs != Size;
2016        Ofs += 64 / DFS.ShadowWidthBits) {
2017     BasicBlock *NextBB = BasicBlock::Create(*DFS.Ctx, "", F);
2018     DT.addNewBlock(NextBB, LastBr->getParent());
2019     IRBuilder<> NextIRB(NextBB);
2020     WideAddr = NextIRB.CreateGEP(Type::getInt64Ty(*DFS.Ctx), WideAddr,
2021                                  ConstantInt::get(DFS.IntptrTy, 1));
2022     Value *NextWideShadow =
2023         NextIRB.CreateAlignedLoad(NextIRB.getInt64Ty(), WideAddr, ShadowAlign);
2024     ShadowsEq = NextIRB.CreateICmpEQ(WideShadow, NextWideShadow);
2025     LastBr->setSuccessor(0, NextBB);
2026     LastBr = NextIRB.CreateCondBr(ShadowsEq, FallbackBB, FallbackBB);
2027   }
2028 
2029   LastBr->setSuccessor(0, Tail);
2030   FallbackIRB.CreateBr(Tail);
2031   PHINode *Shadow =
2032       PHINode::Create(DFS.PrimitiveShadowTy, 2, "", &Tail->front());
2033   Shadow->addIncoming(FallbackCall, FallbackBB);
2034   Shadow->addIncoming(TruncShadow, LastBr->getParent());
2035   return Shadow;
2036 }
2037 
2038 // Generates IR to load shadow corresponding to bytes [Addr, Addr+Size), where
2039 // Addr has alignment Align, and take the union of each of those shadows. The
2040 // returned shadow always has primitive type.
2041 std::pair<Value *, Value *> DFSanFunction::loadShadowOrigin(Value *Addr,
2042                                                             uint64_t Size,
2043                                                             Align InstAlignment,
2044                                                             Instruction *Pos) {
2045   const bool ShouldTrackOrigins = DFS.shouldTrackOrigins();
2046 
2047   // Non-escaped loads.
2048   if (AllocaInst *AI = dyn_cast<AllocaInst>(Addr)) {
2049     const auto SI = AllocaShadowMap.find(AI);
2050     if (SI != AllocaShadowMap.end()) {
2051       IRBuilder<> IRB(Pos);
2052       Value *ShadowLI = IRB.CreateLoad(DFS.PrimitiveShadowTy, SI->second);
2053       const auto OI = AllocaOriginMap.find(AI);
2054       assert(!ShouldTrackOrigins || OI != AllocaOriginMap.end());
2055       return {ShadowLI, ShouldTrackOrigins
2056                             ? IRB.CreateLoad(DFS.OriginTy, OI->second)
2057                             : nullptr};
2058     }
2059   }
2060 
2061   // Load from constant addresses.
2062   SmallVector<const Value *, 2> Objs;
2063   getUnderlyingObjects(Addr, Objs);
2064   bool AllConstants = true;
2065   for (const Value *Obj : Objs) {
2066     if (isa<Function>(Obj) || isa<BlockAddress>(Obj))
2067       continue;
2068     if (isa<GlobalVariable>(Obj) && cast<GlobalVariable>(Obj)->isConstant())
2069       continue;
2070 
2071     AllConstants = false;
2072     break;
2073   }
2074   if (AllConstants)
2075     return {DFS.ZeroPrimitiveShadow,
2076             ShouldTrackOrigins ? DFS.ZeroOrigin : nullptr};
2077 
2078   if (Size == 0)
2079     return {DFS.ZeroPrimitiveShadow,
2080             ShouldTrackOrigins ? DFS.ZeroOrigin : nullptr};
2081 
2082   // Use callback to load if this is not an optimizable case for origin
2083   // tracking.
2084   if (ShouldTrackOrigins &&
2085       useCallbackLoadLabelAndOrigin(Size, InstAlignment)) {
2086     IRBuilder<> IRB(Pos);
2087     CallInst *Call =
2088         IRB.CreateCall(DFS.DFSanLoadLabelAndOriginFn,
2089                        {IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()),
2090                         ConstantInt::get(DFS.IntptrTy, Size)});
2091     Call->addAttribute(AttributeList::ReturnIndex, Attribute::ZExt);
2092     return {IRB.CreateTrunc(IRB.CreateLShr(Call, DFS.OriginWidthBits),
2093                             DFS.PrimitiveShadowTy),
2094             IRB.CreateTrunc(Call, DFS.OriginTy)};
2095   }
2096 
2097   // Other cases that support loading shadows or origins in a fast way.
2098   Value *ShadowAddr, *OriginAddr;
2099   std::tie(ShadowAddr, OriginAddr) =
2100       DFS.getShadowOriginAddress(Addr, InstAlignment, Pos);
2101 
2102   const Align ShadowAlign = getShadowAlign(InstAlignment);
2103   const Align OriginAlign = getOriginAlign(InstAlignment);
2104   Value *Origin = nullptr;
2105   if (ShouldTrackOrigins) {
2106     IRBuilder<> IRB(Pos);
2107     Origin = IRB.CreateAlignedLoad(DFS.OriginTy, OriginAddr, OriginAlign);
2108   }
2109 
2110   switch (Size) {
2111   case 1: {
2112     LoadInst *LI = new LoadInst(DFS.PrimitiveShadowTy, ShadowAddr, "", Pos);
2113     LI->setAlignment(ShadowAlign);
2114     return {LI, Origin};
2115   }
2116   case 2: {
2117     IRBuilder<> IRB(Pos);
2118     Value *ShadowAddr1 = IRB.CreateGEP(DFS.PrimitiveShadowTy, ShadowAddr,
2119                                        ConstantInt::get(DFS.IntptrTy, 1));
2120     Value *Load =
2121         IRB.CreateAlignedLoad(DFS.PrimitiveShadowTy, ShadowAddr, ShadowAlign);
2122     Value *Load1 =
2123         IRB.CreateAlignedLoad(DFS.PrimitiveShadowTy, ShadowAddr1, ShadowAlign);
2124     return {combineShadows(Load, Load1, Pos), Origin};
2125   }
2126   }
2127 
2128   if (ClFast16Labels && Size % (64 / DFS.ShadowWidthBits) == 0)
2129     return loadFast16ShadowFast(ShadowAddr, OriginAddr, Size, ShadowAlign,
2130                                 OriginAlign, Origin, Pos);
2131 
2132   if (!AvoidNewBlocks && Size % (64 / DFS.ShadowWidthBits) == 0)
2133     return {loadLegacyShadowFast(ShadowAddr, Size, ShadowAlign, Pos), Origin};
2134 
2135   IRBuilder<> IRB(Pos);
2136   FunctionCallee &UnionLoadFn =
2137       ClFast16Labels ? DFS.DFSanUnionLoadFast16LabelsFn : DFS.DFSanUnionLoadFn;
2138   CallInst *FallbackCall = IRB.CreateCall(
2139       UnionLoadFn, {ShadowAddr, ConstantInt::get(DFS.IntptrTy, Size)});
2140   FallbackCall->addAttribute(AttributeList::ReturnIndex, Attribute::ZExt);
2141   return {FallbackCall, Origin};
2142 }
2143 
2144 static AtomicOrdering addAcquireOrdering(AtomicOrdering AO) {
2145   switch (AO) {
2146   case AtomicOrdering::NotAtomic:
2147     return AtomicOrdering::NotAtomic;
2148   case AtomicOrdering::Unordered:
2149   case AtomicOrdering::Monotonic:
2150   case AtomicOrdering::Acquire:
2151     return AtomicOrdering::Acquire;
2152   case AtomicOrdering::Release:
2153   case AtomicOrdering::AcquireRelease:
2154     return AtomicOrdering::AcquireRelease;
2155   case AtomicOrdering::SequentiallyConsistent:
2156     return AtomicOrdering::SequentiallyConsistent;
2157   }
2158   llvm_unreachable("Unknown ordering");
2159 }
2160 
2161 void DFSanVisitor::visitLoadInst(LoadInst &LI) {
2162   auto &DL = LI.getModule()->getDataLayout();
2163   uint64_t Size = DL.getTypeStoreSize(LI.getType());
2164   if (Size == 0) {
2165     DFSF.setShadow(&LI, DFSF.DFS.getZeroShadow(&LI));
2166     DFSF.setOrigin(&LI, DFSF.DFS.ZeroOrigin);
2167     return;
2168   }
2169 
2170   // When an application load is atomic, increase atomic ordering between
2171   // atomic application loads and stores to ensure happen-before order; load
2172   // shadow data after application data; store zero shadow data before
2173   // application data. This ensure shadow loads return either labels of the
2174   // initial application data or zeros.
2175   if (LI.isAtomic())
2176     LI.setOrdering(addAcquireOrdering(LI.getOrdering()));
2177 
2178   Instruction *Pos = LI.isAtomic() ? LI.getNextNode() : &LI;
2179   std::vector<Value *> Shadows;
2180   std::vector<Value *> Origins;
2181   Value *PrimitiveShadow, *Origin;
2182   std::tie(PrimitiveShadow, Origin) =
2183       DFSF.loadShadowOrigin(LI.getPointerOperand(), Size, LI.getAlign(), Pos);
2184   const bool ShouldTrackOrigins = DFSF.DFS.shouldTrackOrigins();
2185   if (ShouldTrackOrigins) {
2186     Shadows.push_back(PrimitiveShadow);
2187     Origins.push_back(Origin);
2188   }
2189   if (ClCombinePointerLabelsOnLoad) {
2190     Value *PtrShadow = DFSF.getShadow(LI.getPointerOperand());
2191     PrimitiveShadow = DFSF.combineShadows(PrimitiveShadow, PtrShadow, Pos);
2192     if (ShouldTrackOrigins) {
2193       Shadows.push_back(PtrShadow);
2194       Origins.push_back(DFSF.getOrigin(LI.getPointerOperand()));
2195     }
2196   }
2197   if (!DFSF.DFS.isZeroShadow(PrimitiveShadow))
2198     DFSF.NonZeroChecks.push_back(PrimitiveShadow);
2199 
2200   Value *Shadow =
2201       DFSF.expandFromPrimitiveShadow(LI.getType(), PrimitiveShadow, Pos);
2202   DFSF.setShadow(&LI, Shadow);
2203 
2204   if (ShouldTrackOrigins) {
2205     DFSF.setOrigin(&LI, DFSF.combineOrigins(Shadows, Origins, Pos));
2206   }
2207 
2208   if (ClEventCallbacks) {
2209     IRBuilder<> IRB(Pos);
2210     Value *Addr8 = IRB.CreateBitCast(LI.getPointerOperand(), DFSF.DFS.Int8Ptr);
2211     IRB.CreateCall(DFSF.DFS.DFSanLoadCallbackFn, {PrimitiveShadow, Addr8});
2212   }
2213 }
2214 
2215 void DFSanFunction::storeZeroPrimitiveShadow(Value *Addr, uint64_t Size,
2216                                              Align ShadowAlign,
2217                                              Instruction *Pos) {
2218   IRBuilder<> IRB(Pos);
2219   IntegerType *ShadowTy =
2220       IntegerType::get(*DFS.Ctx, Size * DFS.ShadowWidthBits);
2221   Value *ExtZeroShadow = ConstantInt::get(ShadowTy, 0);
2222   Value *ShadowAddr = DFS.getShadowAddress(Addr, Pos);
2223   Value *ExtShadowAddr =
2224       IRB.CreateBitCast(ShadowAddr, PointerType::getUnqual(ShadowTy));
2225   IRB.CreateAlignedStore(ExtZeroShadow, ExtShadowAddr, ShadowAlign);
2226   // Do not write origins for 0 shadows because we do not trace origins for
2227   // untainted sinks.
2228 }
2229 void DFSanFunction::storePrimitiveShadow(Value *Addr, uint64_t Size,
2230                                          Align Alignment,
2231                                          Value *PrimitiveShadow,
2232                                          Instruction *Pos) {
2233   if (AllocaInst *AI = dyn_cast<AllocaInst>(Addr)) {
2234     const auto I = AllocaShadowMap.find(AI);
2235     if (I != AllocaShadowMap.end()) {
2236       IRBuilder<> IRB(Pos);
2237       IRB.CreateStore(PrimitiveShadow, I->second);
2238       return;
2239     }
2240   }
2241 
2242   const Align ShadowAlign(Alignment.value() * DFS.ShadowWidthBytes);
2243   if (DFS.isZeroShadow(PrimitiveShadow)) {
2244     storeZeroPrimitiveShadow(Addr, Size, ShadowAlign, Pos);
2245     return;
2246   }
2247 
2248   IRBuilder<> IRB(Pos);
2249   Value *ShadowAddr = DFS.getShadowAddress(Addr, Pos);
2250   const unsigned ShadowVecSize = 128 / DFS.ShadowWidthBits;
2251   uint64_t Offset = 0;
2252   if (Size >= ShadowVecSize) {
2253     auto *ShadowVecTy =
2254         FixedVectorType::get(DFS.PrimitiveShadowTy, ShadowVecSize);
2255     Value *ShadowVec = UndefValue::get(ShadowVecTy);
2256     for (unsigned I = 0; I != ShadowVecSize; ++I) {
2257       ShadowVec = IRB.CreateInsertElement(
2258           ShadowVec, PrimitiveShadow,
2259           ConstantInt::get(Type::getInt32Ty(*DFS.Ctx), I));
2260     }
2261     Value *ShadowVecAddr =
2262         IRB.CreateBitCast(ShadowAddr, PointerType::getUnqual(ShadowVecTy));
2263     do {
2264       Value *CurShadowVecAddr =
2265           IRB.CreateConstGEP1_32(ShadowVecTy, ShadowVecAddr, Offset);
2266       IRB.CreateAlignedStore(ShadowVec, CurShadowVecAddr, ShadowAlign);
2267       Size -= ShadowVecSize;
2268       ++Offset;
2269     } while (Size >= ShadowVecSize);
2270     Offset *= ShadowVecSize;
2271   }
2272   while (Size > 0) {
2273     Value *CurShadowAddr =
2274         IRB.CreateConstGEP1_32(DFS.PrimitiveShadowTy, ShadowAddr, Offset);
2275     IRB.CreateAlignedStore(PrimitiveShadow, CurShadowAddr, ShadowAlign);
2276     --Size;
2277     ++Offset;
2278   }
2279 }
2280 
2281 static AtomicOrdering addReleaseOrdering(AtomicOrdering AO) {
2282   switch (AO) {
2283   case AtomicOrdering::NotAtomic:
2284     return AtomicOrdering::NotAtomic;
2285   case AtomicOrdering::Unordered:
2286   case AtomicOrdering::Monotonic:
2287   case AtomicOrdering::Release:
2288     return AtomicOrdering::Release;
2289   case AtomicOrdering::Acquire:
2290   case AtomicOrdering::AcquireRelease:
2291     return AtomicOrdering::AcquireRelease;
2292   case AtomicOrdering::SequentiallyConsistent:
2293     return AtomicOrdering::SequentiallyConsistent;
2294   }
2295   llvm_unreachable("Unknown ordering");
2296 }
2297 
2298 void DFSanVisitor::visitStoreInst(StoreInst &SI) {
2299   auto &DL = SI.getModule()->getDataLayout();
2300   Value *Val = SI.getValueOperand();
2301   uint64_t Size = DL.getTypeStoreSize(Val->getType());
2302   if (Size == 0)
2303     return;
2304 
2305   // When an application store is atomic, increase atomic ordering between
2306   // atomic application loads and stores to ensure happen-before order; load
2307   // shadow data after application data; store zero shadow data before
2308   // application data. This ensure shadow loads return either labels of the
2309   // initial application data or zeros.
2310   if (SI.isAtomic())
2311     SI.setOrdering(addReleaseOrdering(SI.getOrdering()));
2312 
2313   const Align Alignment = ClPreserveAlignment ? SI.getAlign() : Align(1);
2314 
2315   Value *Shadow =
2316       SI.isAtomic() ? DFSF.DFS.getZeroShadow(Val) : DFSF.getShadow(Val);
2317   Value *PrimitiveShadow;
2318   if (ClCombinePointerLabelsOnStore) {
2319     Value *PtrShadow = DFSF.getShadow(SI.getPointerOperand());
2320     PrimitiveShadow = DFSF.combineShadows(Shadow, PtrShadow, &SI);
2321   } else {
2322     PrimitiveShadow = DFSF.collapseToPrimitiveShadow(Shadow, &SI);
2323   }
2324   DFSF.storePrimitiveShadow(SI.getPointerOperand(), Size, Alignment,
2325                             PrimitiveShadow, &SI);
2326   if (ClEventCallbacks) {
2327     IRBuilder<> IRB(&SI);
2328     Value *Addr8 = IRB.CreateBitCast(SI.getPointerOperand(), DFSF.DFS.Int8Ptr);
2329     IRB.CreateCall(DFSF.DFS.DFSanStoreCallbackFn, {PrimitiveShadow, Addr8});
2330   }
2331 }
2332 
2333 void DFSanVisitor::visitCASOrRMW(Align InstAlignment, Instruction &I) {
2334   assert(isa<AtomicRMWInst>(I) || isa<AtomicCmpXchgInst>(I));
2335 
2336   Value *Val = I.getOperand(1);
2337   const auto &DL = I.getModule()->getDataLayout();
2338   uint64_t Size = DL.getTypeStoreSize(Val->getType());
2339   if (Size == 0)
2340     return;
2341 
2342   // Conservatively set data at stored addresses and return with zero shadow to
2343   // prevent shadow data races.
2344   IRBuilder<> IRB(&I);
2345   Value *Addr = I.getOperand(0);
2346   const Align ShadowAlign = DFSF.getShadowAlign(InstAlignment);
2347   DFSF.storeZeroPrimitiveShadow(Addr, Size, ShadowAlign, &I);
2348   DFSF.setShadow(&I, DFSF.DFS.getZeroShadow(&I));
2349   DFSF.setOrigin(&I, DFSF.DFS.ZeroOrigin);
2350 }
2351 
2352 void DFSanVisitor::visitAtomicRMWInst(AtomicRMWInst &I) {
2353   visitCASOrRMW(I.getAlign(), I);
2354   // TODO: The ordering change follows MSan. It is possible not to change
2355   // ordering because we always set and use 0 shadows.
2356   I.setOrdering(addReleaseOrdering(I.getOrdering()));
2357 }
2358 
2359 void DFSanVisitor::visitAtomicCmpXchgInst(AtomicCmpXchgInst &I) {
2360   visitCASOrRMW(I.getAlign(), I);
2361   // TODO: The ordering change follows MSan. It is possible not to change
2362   // ordering because we always set and use 0 shadows.
2363   I.setSuccessOrdering(addReleaseOrdering(I.getSuccessOrdering()));
2364 }
2365 
2366 void DFSanVisitor::visitUnaryOperator(UnaryOperator &UO) {
2367   visitInstOperands(UO);
2368 }
2369 
2370 void DFSanVisitor::visitBinaryOperator(BinaryOperator &BO) {
2371   visitInstOperands(BO);
2372 }
2373 
2374 void DFSanVisitor::visitCastInst(CastInst &CI) { visitInstOperands(CI); }
2375 
2376 void DFSanVisitor::visitCmpInst(CmpInst &CI) {
2377   visitInstOperands(CI);
2378   if (ClEventCallbacks) {
2379     IRBuilder<> IRB(&CI);
2380     Value *CombinedShadow = DFSF.getShadow(&CI);
2381     IRB.CreateCall(DFSF.DFS.DFSanCmpCallbackFn, CombinedShadow);
2382   }
2383 }
2384 
2385 void DFSanVisitor::visitGetElementPtrInst(GetElementPtrInst &GEPI) {
2386   visitInstOperands(GEPI);
2387 }
2388 
2389 void DFSanVisitor::visitExtractElementInst(ExtractElementInst &I) {
2390   visitInstOperands(I);
2391 }
2392 
2393 void DFSanVisitor::visitInsertElementInst(InsertElementInst &I) {
2394   visitInstOperands(I);
2395 }
2396 
2397 void DFSanVisitor::visitShuffleVectorInst(ShuffleVectorInst &I) {
2398   visitInstOperands(I);
2399 }
2400 
2401 void DFSanVisitor::visitExtractValueInst(ExtractValueInst &I) {
2402   if (!DFSF.DFS.shouldTrackFieldsAndIndices()) {
2403     visitInstOperands(I);
2404     return;
2405   }
2406 
2407   IRBuilder<> IRB(&I);
2408   Value *Agg = I.getAggregateOperand();
2409   Value *AggShadow = DFSF.getShadow(Agg);
2410   Value *ResShadow = IRB.CreateExtractValue(AggShadow, I.getIndices());
2411   DFSF.setShadow(&I, ResShadow);
2412   visitInstOperandOrigins(I);
2413 }
2414 
2415 void DFSanVisitor::visitInsertValueInst(InsertValueInst &I) {
2416   if (!DFSF.DFS.shouldTrackFieldsAndIndices()) {
2417     visitInstOperands(I);
2418     return;
2419   }
2420 
2421   IRBuilder<> IRB(&I);
2422   Value *AggShadow = DFSF.getShadow(I.getAggregateOperand());
2423   Value *InsShadow = DFSF.getShadow(I.getInsertedValueOperand());
2424   Value *Res = IRB.CreateInsertValue(AggShadow, InsShadow, I.getIndices());
2425   DFSF.setShadow(&I, Res);
2426   visitInstOperandOrigins(I);
2427 }
2428 
2429 void DFSanVisitor::visitAllocaInst(AllocaInst &I) {
2430   bool AllLoadsStores = true;
2431   for (User *U : I.users()) {
2432     if (isa<LoadInst>(U))
2433       continue;
2434 
2435     if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
2436       if (SI->getPointerOperand() == &I)
2437         continue;
2438     }
2439 
2440     AllLoadsStores = false;
2441     break;
2442   }
2443   if (AllLoadsStores) {
2444     IRBuilder<> IRB(&I);
2445     DFSF.AllocaShadowMap[&I] = IRB.CreateAlloca(DFSF.DFS.PrimitiveShadowTy);
2446     if (DFSF.DFS.shouldTrackOrigins()) {
2447       DFSF.AllocaOriginMap[&I] =
2448           IRB.CreateAlloca(DFSF.DFS.OriginTy, nullptr, "_dfsa");
2449     }
2450   }
2451   DFSF.setShadow(&I, DFSF.DFS.ZeroPrimitiveShadow);
2452   DFSF.setOrigin(&I, DFSF.DFS.ZeroOrigin);
2453 }
2454 
2455 void DFSanVisitor::visitSelectInst(SelectInst &I) {
2456   Value *CondShadow = DFSF.getShadow(I.getCondition());
2457   Value *TrueShadow = DFSF.getShadow(I.getTrueValue());
2458   Value *FalseShadow = DFSF.getShadow(I.getFalseValue());
2459   Value *ShadowSel = nullptr;
2460   const bool ShouldTrackOrigins = DFSF.DFS.shouldTrackOrigins();
2461   std::vector<Value *> Shadows;
2462   std::vector<Value *> Origins;
2463   Value *TrueOrigin =
2464       ShouldTrackOrigins ? DFSF.getOrigin(I.getTrueValue()) : nullptr;
2465   Value *FalseOrigin =
2466       ShouldTrackOrigins ? DFSF.getOrigin(I.getFalseValue()) : nullptr;
2467 
2468   if (isa<VectorType>(I.getCondition()->getType())) {
2469     ShadowSel = DFSF.combineShadowsThenConvert(I.getType(), TrueShadow,
2470                                                FalseShadow, &I);
2471     if (ShouldTrackOrigins) {
2472       Shadows.push_back(TrueShadow);
2473       Shadows.push_back(FalseShadow);
2474       Origins.push_back(TrueOrigin);
2475       Origins.push_back(FalseOrigin);
2476     }
2477   } else {
2478     if (TrueShadow == FalseShadow) {
2479       ShadowSel = TrueShadow;
2480       if (ShouldTrackOrigins) {
2481         Shadows.push_back(TrueShadow);
2482         Origins.push_back(TrueOrigin);
2483       }
2484     } else {
2485       ShadowSel =
2486           SelectInst::Create(I.getCondition(), TrueShadow, FalseShadow, "", &I);
2487       if (ShouldTrackOrigins) {
2488         Shadows.push_back(ShadowSel);
2489         Origins.push_back(SelectInst::Create(I.getCondition(), TrueOrigin,
2490                                              FalseOrigin, "", &I));
2491       }
2492     }
2493   }
2494   DFSF.setShadow(&I, ClTrackSelectControlFlow
2495                          ? DFSF.combineShadowsThenConvert(
2496                                I.getType(), CondShadow, ShadowSel, &I)
2497                          : ShadowSel);
2498   if (ShouldTrackOrigins) {
2499     if (ClTrackSelectControlFlow) {
2500       Shadows.push_back(CondShadow);
2501       Origins.push_back(DFSF.getOrigin(I.getCondition()));
2502     }
2503     DFSF.setOrigin(&I, DFSF.combineOrigins(Shadows, Origins, &I));
2504   }
2505 }
2506 
2507 void DFSanVisitor::visitMemSetInst(MemSetInst &I) {
2508   IRBuilder<> IRB(&I);
2509   Value *ValShadow = DFSF.getShadow(I.getValue());
2510   Value *ValOrigin = DFSF.DFS.shouldTrackOrigins()
2511                          ? DFSF.getOrigin(I.getValue())
2512                          : DFSF.DFS.ZeroOrigin;
2513   IRB.CreateCall(
2514       DFSF.DFS.DFSanSetLabelFn,
2515       {ValShadow, ValOrigin,
2516        IRB.CreateBitCast(I.getDest(), Type::getInt8PtrTy(*DFSF.DFS.Ctx)),
2517        IRB.CreateZExtOrTrunc(I.getLength(), DFSF.DFS.IntptrTy)});
2518 }
2519 
2520 void DFSanVisitor::visitMemTransferInst(MemTransferInst &I) {
2521   IRBuilder<> IRB(&I);
2522   Value *RawDestShadow = DFSF.DFS.getShadowAddress(I.getDest(), &I);
2523   Value *SrcShadow = DFSF.DFS.getShadowAddress(I.getSource(), &I);
2524   Value *LenShadow =
2525       IRB.CreateMul(I.getLength(), ConstantInt::get(I.getLength()->getType(),
2526                                                     DFSF.DFS.ShadowWidthBytes));
2527   Type *Int8Ptr = Type::getInt8PtrTy(*DFSF.DFS.Ctx);
2528   Value *DestShadow = IRB.CreateBitCast(RawDestShadow, Int8Ptr);
2529   SrcShadow = IRB.CreateBitCast(SrcShadow, Int8Ptr);
2530   auto *MTI = cast<MemTransferInst>(
2531       IRB.CreateCall(I.getFunctionType(), I.getCalledOperand(),
2532                      {DestShadow, SrcShadow, LenShadow, I.getVolatileCst()}));
2533   if (ClPreserveAlignment) {
2534     MTI->setDestAlignment(I.getDestAlign() * DFSF.DFS.ShadowWidthBytes);
2535     MTI->setSourceAlignment(I.getSourceAlign() * DFSF.DFS.ShadowWidthBytes);
2536   } else {
2537     MTI->setDestAlignment(Align(DFSF.DFS.ShadowWidthBytes));
2538     MTI->setSourceAlignment(Align(DFSF.DFS.ShadowWidthBytes));
2539   }
2540   if (ClEventCallbacks) {
2541     IRB.CreateCall(DFSF.DFS.DFSanMemTransferCallbackFn,
2542                    {RawDestShadow, I.getLength()});
2543   }
2544 }
2545 
2546 void DFSanVisitor::visitReturnInst(ReturnInst &RI) {
2547   if (!DFSF.IsNativeABI && RI.getReturnValue()) {
2548     switch (DFSF.IA) {
2549     case DataFlowSanitizer::IA_TLS: {
2550       Value *S = DFSF.getShadow(RI.getReturnValue());
2551       IRBuilder<> IRB(&RI);
2552       Type *RT = DFSF.F->getFunctionType()->getReturnType();
2553       unsigned Size =
2554           getDataLayout().getTypeAllocSize(DFSF.DFS.getShadowTy(RT));
2555       if (Size <= RetvalTLSSize) {
2556         // If the size overflows, stores nothing. At callsite, oversized return
2557         // shadows are set to zero.
2558         IRB.CreateAlignedStore(S, DFSF.getRetvalTLS(RT, IRB),
2559                                ShadowTLSAlignment);
2560       }
2561       if (DFSF.DFS.shouldTrackOrigins()) {
2562         Value *O = DFSF.getOrigin(RI.getReturnValue());
2563         IRB.CreateStore(O, DFSF.getRetvalOriginTLS());
2564       }
2565       break;
2566     }
2567     case DataFlowSanitizer::IA_Args: {
2568       IRBuilder<> IRB(&RI);
2569       Type *RT = DFSF.F->getFunctionType()->getReturnType();
2570       Value *InsVal =
2571           IRB.CreateInsertValue(UndefValue::get(RT), RI.getReturnValue(), 0);
2572       Value *InsShadow =
2573           IRB.CreateInsertValue(InsVal, DFSF.getShadow(RI.getReturnValue()), 1);
2574       RI.setOperand(0, InsShadow);
2575       break;
2576     }
2577     }
2578   }
2579 }
2580 
2581 void DFSanVisitor::addShadowArguments(Function &F, CallBase &CB,
2582                                       std::vector<Value *> &Args,
2583                                       IRBuilder<> &IRB) {
2584   FunctionType *FT = F.getFunctionType();
2585 
2586   auto *I = CB.arg_begin();
2587 
2588   // Adds non-variable argument shadows.
2589   for (unsigned N = FT->getNumParams(); N != 0; ++I, --N)
2590     Args.push_back(DFSF.collapseToPrimitiveShadow(DFSF.getShadow(*I), &CB));
2591 
2592   // Adds variable argument shadows.
2593   if (FT->isVarArg()) {
2594     auto *LabelVATy = ArrayType::get(DFSF.DFS.PrimitiveShadowTy,
2595                                      CB.arg_size() - FT->getNumParams());
2596     auto *LabelVAAlloca =
2597         new AllocaInst(LabelVATy, getDataLayout().getAllocaAddrSpace(),
2598                        "labelva", &DFSF.F->getEntryBlock().front());
2599 
2600     for (unsigned N = 0; I != CB.arg_end(); ++I, ++N) {
2601       auto *LabelVAPtr = IRB.CreateStructGEP(LabelVATy, LabelVAAlloca, N);
2602       IRB.CreateStore(DFSF.collapseToPrimitiveShadow(DFSF.getShadow(*I), &CB),
2603                       LabelVAPtr);
2604     }
2605 
2606     Args.push_back(IRB.CreateStructGEP(LabelVATy, LabelVAAlloca, 0));
2607   }
2608 
2609   // Adds the return value shadow.
2610   if (!FT->getReturnType()->isVoidTy()) {
2611     if (!DFSF.LabelReturnAlloca) {
2612       DFSF.LabelReturnAlloca = new AllocaInst(
2613           DFSF.DFS.PrimitiveShadowTy, getDataLayout().getAllocaAddrSpace(),
2614           "labelreturn", &DFSF.F->getEntryBlock().front());
2615     }
2616     Args.push_back(DFSF.LabelReturnAlloca);
2617   }
2618 }
2619 
2620 void DFSanVisitor::addOriginArguments(Function &F, CallBase &CB,
2621                                       std::vector<Value *> &Args,
2622                                       IRBuilder<> &IRB) {
2623   FunctionType *FT = F.getFunctionType();
2624 
2625   auto *I = CB.arg_begin();
2626 
2627   // Add non-variable argument origins.
2628   for (unsigned N = FT->getNumParams(); N != 0; ++I, --N)
2629     Args.push_back(DFSF.getOrigin(*I));
2630 
2631   // Add variable argument origins.
2632   if (FT->isVarArg()) {
2633     auto *OriginVATy =
2634         ArrayType::get(DFSF.DFS.OriginTy, CB.arg_size() - FT->getNumParams());
2635     auto *OriginVAAlloca =
2636         new AllocaInst(OriginVATy, getDataLayout().getAllocaAddrSpace(),
2637                        "originva", &DFSF.F->getEntryBlock().front());
2638 
2639     for (unsigned N = 0; I != CB.arg_end(); ++I, ++N) {
2640       auto *OriginVAPtr = IRB.CreateStructGEP(OriginVATy, OriginVAAlloca, N);
2641       IRB.CreateStore(DFSF.getOrigin(*I), OriginVAPtr);
2642     }
2643 
2644     Args.push_back(IRB.CreateStructGEP(OriginVATy, OriginVAAlloca, 0));
2645   }
2646 
2647   // Add the return value origin.
2648   if (!FT->getReturnType()->isVoidTy()) {
2649     if (!DFSF.OriginReturnAlloca) {
2650       DFSF.OriginReturnAlloca = new AllocaInst(
2651           DFSF.DFS.OriginTy, getDataLayout().getAllocaAddrSpace(),
2652           "originreturn", &DFSF.F->getEntryBlock().front());
2653     }
2654     Args.push_back(DFSF.OriginReturnAlloca);
2655   }
2656 }
2657 
2658 bool DFSanVisitor::visitWrappedCallBase(Function &F, CallBase &CB) {
2659   IRBuilder<> IRB(&CB);
2660   switch (DFSF.DFS.getWrapperKind(&F)) {
2661   case DataFlowSanitizer::WK_Warning:
2662     CB.setCalledFunction(&F);
2663     IRB.CreateCall(DFSF.DFS.DFSanUnimplementedFn,
2664                    IRB.CreateGlobalStringPtr(F.getName()));
2665     DFSF.setShadow(&CB, DFSF.DFS.getZeroShadow(&CB));
2666     DFSF.setOrigin(&CB, DFSF.DFS.ZeroOrigin);
2667     return true;
2668   case DataFlowSanitizer::WK_Discard:
2669     CB.setCalledFunction(&F);
2670     DFSF.setShadow(&CB, DFSF.DFS.getZeroShadow(&CB));
2671     DFSF.setOrigin(&CB, DFSF.DFS.ZeroOrigin);
2672     return true;
2673   case DataFlowSanitizer::WK_Functional:
2674     CB.setCalledFunction(&F);
2675     visitInstOperands(CB);
2676     return true;
2677   case DataFlowSanitizer::WK_Custom:
2678     // Don't try to handle invokes of custom functions, it's too complicated.
2679     // Instead, invoke the dfsw$ wrapper, which will in turn call the __dfsw_
2680     // wrapper.
2681     CallInst *CI = dyn_cast<CallInst>(&CB);
2682     if (!CI)
2683       return false;
2684 
2685     const bool ShouldTrackOrigins = DFSF.DFS.shouldTrackOrigins();
2686     FunctionType *FT = F.getFunctionType();
2687     TransformedFunction CustomFn = DFSF.DFS.getCustomFunctionType(FT);
2688     std::string CustomFName = ShouldTrackOrigins ? "__dfso_" : "__dfsw_";
2689     CustomFName += F.getName();
2690     FunctionCallee CustomF = DFSF.DFS.Mod->getOrInsertFunction(
2691         CustomFName, CustomFn.TransformedType);
2692     if (Function *CustomFn = dyn_cast<Function>(CustomF.getCallee())) {
2693       CustomFn->copyAttributesFrom(&F);
2694 
2695       // Custom functions returning non-void will write to the return label.
2696       if (!FT->getReturnType()->isVoidTy()) {
2697         CustomFn->removeAttributes(AttributeList::FunctionIndex,
2698                                    DFSF.DFS.ReadOnlyNoneAttrs);
2699       }
2700     }
2701 
2702     std::vector<Value *> Args;
2703 
2704     // Adds non-variable arguments.
2705     auto *I = CB.arg_begin();
2706     for (unsigned N = FT->getNumParams(); N != 0; ++I, --N) {
2707       Type *T = (*I)->getType();
2708       FunctionType *ParamFT;
2709       if (isa<PointerType>(T) &&
2710           (ParamFT = dyn_cast<FunctionType>(T->getPointerElementType()))) {
2711         std::string TName = "dfst";
2712         TName += utostr(FT->getNumParams() - N);
2713         TName += "$";
2714         TName += F.getName();
2715         Constant *T = DFSF.DFS.getOrBuildTrampolineFunction(ParamFT, TName);
2716         Args.push_back(T);
2717         Args.push_back(
2718             IRB.CreateBitCast(*I, Type::getInt8PtrTy(*DFSF.DFS.Ctx)));
2719       } else {
2720         Args.push_back(*I);
2721       }
2722     }
2723 
2724     // Adds shadow arguments.
2725     const unsigned ShadowArgStart = Args.size();
2726     addShadowArguments(F, CB, Args, IRB);
2727 
2728     // Adds origin arguments.
2729     const unsigned OriginArgStart = Args.size();
2730     if (ShouldTrackOrigins)
2731       addOriginArguments(F, CB, Args, IRB);
2732 
2733     // Adds variable arguments.
2734     append_range(Args, drop_begin(CB.args(), FT->getNumParams()));
2735 
2736     CallInst *CustomCI = IRB.CreateCall(CustomF, Args);
2737     CustomCI->setCallingConv(CI->getCallingConv());
2738     CustomCI->setAttributes(transformFunctionAttributes(
2739         CustomFn, CI->getContext(), CI->getAttributes()));
2740 
2741     // Update the parameter attributes of the custom call instruction to
2742     // zero extend the shadow parameters. This is required for targets
2743     // which consider PrimitiveShadowTy an illegal type.
2744     for (unsigned N = 0; N < FT->getNumParams(); N++) {
2745       const unsigned ArgNo = ShadowArgStart + N;
2746       if (CustomCI->getArgOperand(ArgNo)->getType() ==
2747           DFSF.DFS.PrimitiveShadowTy)
2748         CustomCI->addParamAttr(ArgNo, Attribute::ZExt);
2749       if (ShouldTrackOrigins) {
2750         const unsigned OriginArgNo = OriginArgStart + N;
2751         if (CustomCI->getArgOperand(OriginArgNo)->getType() ==
2752             DFSF.DFS.OriginTy)
2753           CustomCI->addParamAttr(OriginArgNo, Attribute::ZExt);
2754       }
2755     }
2756 
2757     // Loads the return value shadow and origin.
2758     if (!FT->getReturnType()->isVoidTy()) {
2759       LoadInst *LabelLoad =
2760           IRB.CreateLoad(DFSF.DFS.PrimitiveShadowTy, DFSF.LabelReturnAlloca);
2761       DFSF.setShadow(CustomCI, DFSF.expandFromPrimitiveShadow(
2762                                    FT->getReturnType(), LabelLoad, &CB));
2763       if (ShouldTrackOrigins) {
2764         LoadInst *OriginLoad =
2765             IRB.CreateLoad(DFSF.DFS.OriginTy, DFSF.OriginReturnAlloca);
2766         DFSF.setOrigin(CustomCI, OriginLoad);
2767       }
2768     }
2769 
2770     CI->replaceAllUsesWith(CustomCI);
2771     CI->eraseFromParent();
2772     return true;
2773   }
2774   return false;
2775 }
2776 
2777 void DFSanVisitor::visitCallBase(CallBase &CB) {
2778   Function *F = CB.getCalledFunction();
2779   if ((F && F->isIntrinsic()) || CB.isInlineAsm()) {
2780     visitInstOperands(CB);
2781     return;
2782   }
2783 
2784   // Calls to this function are synthesized in wrappers, and we shouldn't
2785   // instrument them.
2786   if (F == DFSF.DFS.DFSanVarargWrapperFn.getCallee()->stripPointerCasts())
2787     return;
2788 
2789   DenseMap<Value *, Function *>::iterator UnwrappedFnIt =
2790       DFSF.DFS.UnwrappedFnMap.find(CB.getCalledOperand());
2791   if (UnwrappedFnIt != DFSF.DFS.UnwrappedFnMap.end())
2792     if (visitWrappedCallBase(*UnwrappedFnIt->second, CB))
2793       return;
2794 
2795   IRBuilder<> IRB(&CB);
2796 
2797   const bool ShouldTrackOrigins = DFSF.DFS.shouldTrackOrigins();
2798   FunctionType *FT = CB.getFunctionType();
2799   if (DFSF.DFS.getInstrumentedABI() == DataFlowSanitizer::IA_TLS) {
2800     // Stores argument shadows.
2801     unsigned ArgOffset = 0;
2802     const DataLayout &DL = getDataLayout();
2803     for (unsigned I = 0, N = FT->getNumParams(); I != N; ++I) {
2804       if (ShouldTrackOrigins) {
2805         // Ignore overflowed origins
2806         Value *ArgShadow = DFSF.getShadow(CB.getArgOperand(I));
2807         if (I < DFSF.DFS.NumOfElementsInArgOrgTLS &&
2808             !DFSF.DFS.isZeroShadow(ArgShadow))
2809           IRB.CreateStore(DFSF.getOrigin(CB.getArgOperand(I)),
2810                           DFSF.getArgOriginTLS(I, IRB));
2811       }
2812 
2813       unsigned Size =
2814           DL.getTypeAllocSize(DFSF.DFS.getShadowTy(FT->getParamType(I)));
2815       // Stop storing if arguments' size overflows. Inside a function, arguments
2816       // after overflow have zero shadow values.
2817       if (ArgOffset + Size > ArgTLSSize)
2818         break;
2819       IRB.CreateAlignedStore(
2820           DFSF.getShadow(CB.getArgOperand(I)),
2821           DFSF.getArgTLS(FT->getParamType(I), ArgOffset, IRB),
2822           ShadowTLSAlignment);
2823       ArgOffset += alignTo(Size, ShadowTLSAlignment);
2824     }
2825   }
2826 
2827   Instruction *Next = nullptr;
2828   if (!CB.getType()->isVoidTy()) {
2829     if (InvokeInst *II = dyn_cast<InvokeInst>(&CB)) {
2830       if (II->getNormalDest()->getSinglePredecessor()) {
2831         Next = &II->getNormalDest()->front();
2832       } else {
2833         BasicBlock *NewBB =
2834             SplitEdge(II->getParent(), II->getNormalDest(), &DFSF.DT);
2835         Next = &NewBB->front();
2836       }
2837     } else {
2838       assert(CB.getIterator() != CB.getParent()->end());
2839       Next = CB.getNextNode();
2840     }
2841 
2842     if (DFSF.DFS.getInstrumentedABI() == DataFlowSanitizer::IA_TLS) {
2843       // Loads the return value shadow.
2844       IRBuilder<> NextIRB(Next);
2845       const DataLayout &DL = getDataLayout();
2846       unsigned Size = DL.getTypeAllocSize(DFSF.DFS.getShadowTy(&CB));
2847       if (Size > RetvalTLSSize) {
2848         // Set overflowed return shadow to be zero.
2849         DFSF.setShadow(&CB, DFSF.DFS.getZeroShadow(&CB));
2850       } else {
2851         LoadInst *LI = NextIRB.CreateAlignedLoad(
2852             DFSF.DFS.getShadowTy(&CB), DFSF.getRetvalTLS(CB.getType(), NextIRB),
2853             ShadowTLSAlignment, "_dfsret");
2854         DFSF.SkipInsts.insert(LI);
2855         DFSF.setShadow(&CB, LI);
2856         DFSF.NonZeroChecks.push_back(LI);
2857       }
2858 
2859       if (ShouldTrackOrigins) {
2860         LoadInst *LI = NextIRB.CreateLoad(
2861             DFSF.DFS.OriginTy, DFSF.getRetvalOriginTLS(), "_dfsret_o");
2862         DFSF.SkipInsts.insert(LI);
2863         DFSF.setOrigin(&CB, LI);
2864       }
2865     }
2866   }
2867 
2868   // Do all instrumentation for IA_Args down here to defer tampering with the
2869   // CFG in a way that SplitEdge may be able to detect.
2870   if (DFSF.DFS.getInstrumentedABI() == DataFlowSanitizer::IA_Args) {
2871     FunctionType *NewFT = DFSF.DFS.getArgsFunctionType(FT);
2872     Value *Func =
2873         IRB.CreateBitCast(CB.getCalledOperand(), PointerType::getUnqual(NewFT));
2874 
2875     const unsigned NumParams = FT->getNumParams();
2876 
2877     // Copy original arguments.
2878     auto *ArgIt = CB.arg_begin(), *ArgEnd = CB.arg_end();
2879     std::vector<Value *> Args(NumParams);
2880     std::copy_n(ArgIt, NumParams, Args.begin());
2881 
2882     // Add shadow arguments by transforming original arguments.
2883     std::generate_n(std::back_inserter(Args), NumParams,
2884                     [&]() { return DFSF.getShadow(*ArgIt++); });
2885 
2886     if (FT->isVarArg()) {
2887       unsigned VarArgSize = CB.arg_size() - NumParams;
2888       ArrayType *VarArgArrayTy =
2889           ArrayType::get(DFSF.DFS.PrimitiveShadowTy, VarArgSize);
2890       AllocaInst *VarArgShadow =
2891           new AllocaInst(VarArgArrayTy, getDataLayout().getAllocaAddrSpace(),
2892                          "", &DFSF.F->getEntryBlock().front());
2893       Args.push_back(IRB.CreateConstGEP2_32(VarArgArrayTy, VarArgShadow, 0, 0));
2894 
2895       // Copy remaining var args.
2896       unsigned GepIndex = 0;
2897       std::for_each(ArgIt, ArgEnd, [&](Value *Arg) {
2898         IRB.CreateStore(
2899             DFSF.getShadow(Arg),
2900             IRB.CreateConstGEP2_32(VarArgArrayTy, VarArgShadow, 0, GepIndex++));
2901         Args.push_back(Arg);
2902       });
2903     }
2904 
2905     CallBase *NewCB;
2906     if (InvokeInst *II = dyn_cast<InvokeInst>(&CB)) {
2907       NewCB = IRB.CreateInvoke(NewFT, Func, II->getNormalDest(),
2908                                II->getUnwindDest(), Args);
2909     } else {
2910       NewCB = IRB.CreateCall(NewFT, Func, Args);
2911     }
2912     NewCB->setCallingConv(CB.getCallingConv());
2913     NewCB->setAttributes(CB.getAttributes().removeAttributes(
2914         *DFSF.DFS.Ctx, AttributeList::ReturnIndex,
2915         AttributeFuncs::typeIncompatible(NewCB->getType())));
2916 
2917     if (Next) {
2918       ExtractValueInst *ExVal = ExtractValueInst::Create(NewCB, 0, "", Next);
2919       DFSF.SkipInsts.insert(ExVal);
2920       ExtractValueInst *ExShadow = ExtractValueInst::Create(NewCB, 1, "", Next);
2921       DFSF.SkipInsts.insert(ExShadow);
2922       DFSF.setShadow(ExVal, ExShadow);
2923       DFSF.NonZeroChecks.push_back(ExShadow);
2924 
2925       CB.replaceAllUsesWith(ExVal);
2926     }
2927 
2928     CB.eraseFromParent();
2929   }
2930 }
2931 
2932 void DFSanVisitor::visitPHINode(PHINode &PN) {
2933   Type *ShadowTy = DFSF.DFS.getShadowTy(&PN);
2934   PHINode *ShadowPN =
2935       PHINode::Create(ShadowTy, PN.getNumIncomingValues(), "", &PN);
2936 
2937   // Give the shadow phi node valid predecessors to fool SplitEdge into working.
2938   Value *UndefShadow = UndefValue::get(ShadowTy);
2939   for (BasicBlock *BB : PN.blocks())
2940     ShadowPN->addIncoming(UndefShadow, BB);
2941 
2942   DFSF.PHIFixups.push_back(std::make_pair(&PN, ShadowPN));
2943   DFSF.setShadow(&PN, ShadowPN);
2944 }
2945 
2946 namespace {
2947 class DataFlowSanitizerLegacyPass : public ModulePass {
2948 private:
2949   std::vector<std::string> ABIListFiles;
2950 
2951 public:
2952   static char ID;
2953 
2954   DataFlowSanitizerLegacyPass(
2955       const std::vector<std::string> &ABIListFiles = std::vector<std::string>())
2956       : ModulePass(ID), ABIListFiles(ABIListFiles) {}
2957 
2958   bool runOnModule(Module &M) override {
2959     return DataFlowSanitizer(ABIListFiles).runImpl(M);
2960   }
2961 };
2962 } // namespace
2963 
2964 char DataFlowSanitizerLegacyPass::ID;
2965 
2966 INITIALIZE_PASS(DataFlowSanitizerLegacyPass, "dfsan",
2967                 "DataFlowSanitizer: dynamic data flow analysis.", false, false)
2968 
2969 ModulePass *llvm::createDataFlowSanitizerLegacyPassPass(
2970     const std::vector<std::string> &ABIListFiles) {
2971   return new DataFlowSanitizerLegacyPass(ABIListFiles);
2972 }
2973 
2974 PreservedAnalyses DataFlowSanitizerPass::run(Module &M,
2975                                              ModuleAnalysisManager &AM) {
2976   if (DataFlowSanitizer(ABIListFiles).runImpl(M)) {
2977     return PreservedAnalyses::none();
2978   }
2979   return PreservedAnalyses::all();
2980 }
2981