1 //===- DataFlowSanitizer.cpp - dynamic data flow analysis -----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// This file is a part of DataFlowSanitizer, a generalised dynamic data flow
11 /// analysis.
12 ///
13 /// Unlike other Sanitizer tools, this tool is not designed to detect a specific
14 /// class of bugs on its own.  Instead, it provides a generic dynamic data flow
15 /// analysis framework to be used by clients to help detect application-specific
16 /// issues within their own code.
17 ///
18 /// The analysis is based on automatic propagation of data flow labels (also
19 /// known as taint labels) through a program as it performs computation.  Each
20 /// byte of application memory is backed by two bytes of shadow memory which
21 /// hold the label.  On Linux/x86_64, memory is laid out as follows:
22 ///
23 /// +--------------------+ 0x800000000000 (top of memory)
24 /// | application memory |
25 /// +--------------------+ 0x700000008000 (kAppAddr)
26 /// |                    |
27 /// |       unused       |
28 /// |                    |
29 /// +--------------------+ 0x300200000000 (kUnusedAddr)
30 /// |    union table     |
31 /// +--------------------+ 0x300000000000 (kUnionTableAddr)
32 /// |       origin       |
33 /// +--------------------+ 0x200000008000 (kOriginAddr)
34 /// |   shadow memory    |
35 /// +--------------------+ 0x000000010000 (kShadowAddr)
36 /// | reserved by kernel |
37 /// +--------------------+ 0x000000000000
38 ///
39 /// To derive a shadow memory address from an application memory address,
40 /// bits 44-46 are cleared to bring the address into the range
41 /// [0x000000008000,0x100000000000).  Then the address is shifted left by 1 to
42 /// account for the double byte representation of shadow labels and move the
43 /// address into the shadow memory range.  See the function
44 /// DataFlowSanitizer::getShadowAddress below.
45 ///
46 /// For more information, please refer to the design document:
47 /// http://clang.llvm.org/docs/DataFlowSanitizerDesign.html
48 //
49 //===----------------------------------------------------------------------===//
50 
51 #include "llvm/Transforms/Instrumentation/DataFlowSanitizer.h"
52 #include "llvm/ADT/DenseMap.h"
53 #include "llvm/ADT/DenseSet.h"
54 #include "llvm/ADT/DepthFirstIterator.h"
55 #include "llvm/ADT/None.h"
56 #include "llvm/ADT/SmallPtrSet.h"
57 #include "llvm/ADT/SmallVector.h"
58 #include "llvm/ADT/StringExtras.h"
59 #include "llvm/ADT/StringRef.h"
60 #include "llvm/ADT/Triple.h"
61 #include "llvm/ADT/iterator.h"
62 #include "llvm/Analysis/ValueTracking.h"
63 #include "llvm/IR/Argument.h"
64 #include "llvm/IR/Attributes.h"
65 #include "llvm/IR/BasicBlock.h"
66 #include "llvm/IR/Constant.h"
67 #include "llvm/IR/Constants.h"
68 #include "llvm/IR/DataLayout.h"
69 #include "llvm/IR/DerivedTypes.h"
70 #include "llvm/IR/Dominators.h"
71 #include "llvm/IR/Function.h"
72 #include "llvm/IR/GlobalAlias.h"
73 #include "llvm/IR/GlobalValue.h"
74 #include "llvm/IR/GlobalVariable.h"
75 #include "llvm/IR/IRBuilder.h"
76 #include "llvm/IR/InlineAsm.h"
77 #include "llvm/IR/InstVisitor.h"
78 #include "llvm/IR/InstrTypes.h"
79 #include "llvm/IR/Instruction.h"
80 #include "llvm/IR/Instructions.h"
81 #include "llvm/IR/IntrinsicInst.h"
82 #include "llvm/IR/LLVMContext.h"
83 #include "llvm/IR/MDBuilder.h"
84 #include "llvm/IR/Module.h"
85 #include "llvm/IR/PassManager.h"
86 #include "llvm/IR/Type.h"
87 #include "llvm/IR/User.h"
88 #include "llvm/IR/Value.h"
89 #include "llvm/InitializePasses.h"
90 #include "llvm/Pass.h"
91 #include "llvm/Support/Alignment.h"
92 #include "llvm/Support/Casting.h"
93 #include "llvm/Support/CommandLine.h"
94 #include "llvm/Support/ErrorHandling.h"
95 #include "llvm/Support/SpecialCaseList.h"
96 #include "llvm/Support/VirtualFileSystem.h"
97 #include "llvm/Transforms/Instrumentation.h"
98 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
99 #include "llvm/Transforms/Utils/Local.h"
100 #include <algorithm>
101 #include <cassert>
102 #include <cstddef>
103 #include <cstdint>
104 #include <iterator>
105 #include <memory>
106 #include <set>
107 #include <string>
108 #include <utility>
109 #include <vector>
110 
111 using namespace llvm;
112 
113 // This must be consistent with ShadowWidthBits.
114 static const Align ShadowTLSAlignment = Align(2);
115 
116 static const Align MinOriginAlignment = Align(4);
117 
118 // The size of TLS variables. These constants must be kept in sync with the ones
119 // in dfsan.cpp.
120 static const unsigned ArgTLSSize = 800;
121 static const unsigned RetvalTLSSize = 800;
122 
123 // External symbol to be used when generating the shadow address for
124 // architectures with multiple VMAs. Instead of using a constant integer
125 // the runtime will set the external mask based on the VMA range.
126 const char DFSanExternShadowPtrMask[] = "__dfsan_shadow_ptr_mask";
127 
128 // The -dfsan-preserve-alignment flag controls whether this pass assumes that
129 // alignment requirements provided by the input IR are correct.  For example,
130 // if the input IR contains a load with alignment 8, this flag will cause
131 // the shadow load to have alignment 16.  This flag is disabled by default as
132 // we have unfortunately encountered too much code (including Clang itself;
133 // see PR14291) which performs misaligned access.
134 static cl::opt<bool> ClPreserveAlignment(
135     "dfsan-preserve-alignment",
136     cl::desc("respect alignment requirements provided by input IR"), cl::Hidden,
137     cl::init(false));
138 
139 // The ABI list files control how shadow parameters are passed. The pass treats
140 // every function labelled "uninstrumented" in the ABI list file as conforming
141 // to the "native" (i.e. unsanitized) ABI.  Unless the ABI list contains
142 // additional annotations for those functions, a call to one of those functions
143 // will produce a warning message, as the labelling behaviour of the function is
144 // unknown.  The other supported annotations are "functional" and "discard",
145 // which are described below under DataFlowSanitizer::WrapperKind.
146 static cl::list<std::string> ClABIListFiles(
147     "dfsan-abilist",
148     cl::desc("File listing native ABI functions and how the pass treats them"),
149     cl::Hidden);
150 
151 // Controls whether the pass uses IA_Args or IA_TLS as the ABI for instrumented
152 // functions (see DataFlowSanitizer::InstrumentedABI below).
153 static cl::opt<bool>
154     ClArgsABI("dfsan-args-abi",
155               cl::desc("Use the argument ABI rather than the TLS ABI"),
156               cl::Hidden);
157 
158 // Controls whether the pass includes or ignores the labels of pointers in load
159 // instructions.
160 static cl::opt<bool> ClCombinePointerLabelsOnLoad(
161     "dfsan-combine-pointer-labels-on-load",
162     cl::desc("Combine the label of the pointer with the label of the data when "
163              "loading from memory."),
164     cl::Hidden, cl::init(true));
165 
166 // Controls whether the pass includes or ignores the labels of pointers in
167 // stores instructions.
168 static cl::opt<bool> ClCombinePointerLabelsOnStore(
169     "dfsan-combine-pointer-labels-on-store",
170     cl::desc("Combine the label of the pointer with the label of the data when "
171              "storing in memory."),
172     cl::Hidden, cl::init(false));
173 
174 static cl::opt<bool> ClDebugNonzeroLabels(
175     "dfsan-debug-nonzero-labels",
176     cl::desc("Insert calls to __dfsan_nonzero_label on observing a parameter, "
177              "load or return with a nonzero label"),
178     cl::Hidden);
179 
180 // Experimental feature that inserts callbacks for certain data events.
181 // Currently callbacks are only inserted for loads, stores, memory transfers
182 // (i.e. memcpy and memmove), and comparisons.
183 //
184 // If this flag is set to true, the user must provide definitions for the
185 // following callback functions:
186 //   void __dfsan_load_callback(dfsan_label Label, void* addr);
187 //   void __dfsan_store_callback(dfsan_label Label, void* addr);
188 //   void __dfsan_mem_transfer_callback(dfsan_label *Start, size_t Len);
189 //   void __dfsan_cmp_callback(dfsan_label CombinedLabel);
190 static cl::opt<bool> ClEventCallbacks(
191     "dfsan-event-callbacks",
192     cl::desc("Insert calls to __dfsan_*_callback functions on data events."),
193     cl::Hidden, cl::init(false));
194 
195 // Use a distinct bit for each base label, enabling faster unions with less
196 // instrumentation.  Limits the max number of base labels to 16.
197 static cl::opt<bool> ClFast16Labels(
198     "dfsan-fast-16-labels",
199     cl::desc("Use more efficient instrumentation, limiting the number of "
200              "labels to 16."),
201     cl::Hidden, cl::init(false));
202 
203 // Controls whether the pass tracks the control flow of select instructions.
204 static cl::opt<bool> ClTrackSelectControlFlow(
205     "dfsan-track-select-control-flow",
206     cl::desc("Propagate labels from condition values of select instructions "
207              "to results."),
208     cl::Hidden, cl::init(true));
209 
210 // TODO: This default value follows MSan. DFSan may use a different value.
211 static cl::opt<int> ClInstrumentWithCallThreshold(
212     "dfsan-instrument-with-call-threshold",
213     cl::desc("If the function being instrumented requires more than "
214              "this number of origin stores, use callbacks instead of "
215              "inline checks (-1 means never use callbacks)."),
216     cl::Hidden, cl::init(3500));
217 
218 // Controls how to track origins.
219 // * 0: do not track origins.
220 // * 1: track origins at memory store operations.
221 // * 2: TODO: track origins at memory store operations and callsites.
222 static cl::opt<int> ClTrackOrigins("dfsan-track-origins",
223                                    cl::desc("Track origins of labels"),
224                                    cl::Hidden, cl::init(0));
225 
226 static StringRef getGlobalTypeString(const GlobalValue &G) {
227   // Types of GlobalVariables are always pointer types.
228   Type *GType = G.getValueType();
229   // For now we support excluding struct types only.
230   if (StructType *SGType = dyn_cast<StructType>(GType)) {
231     if (!SGType->isLiteral())
232       return SGType->getName();
233   }
234   return "<unknown type>";
235 }
236 
237 namespace {
238 
239 class DFSanABIList {
240   std::unique_ptr<SpecialCaseList> SCL;
241 
242 public:
243   DFSanABIList() = default;
244 
245   void set(std::unique_ptr<SpecialCaseList> List) { SCL = std::move(List); }
246 
247   /// Returns whether either this function or its source file are listed in the
248   /// given category.
249   bool isIn(const Function &F, StringRef Category) const {
250     return isIn(*F.getParent(), Category) ||
251            SCL->inSection("dataflow", "fun", F.getName(), Category);
252   }
253 
254   /// Returns whether this global alias is listed in the given category.
255   ///
256   /// If GA aliases a function, the alias's name is matched as a function name
257   /// would be.  Similarly, aliases of globals are matched like globals.
258   bool isIn(const GlobalAlias &GA, StringRef Category) const {
259     if (isIn(*GA.getParent(), Category))
260       return true;
261 
262     if (isa<FunctionType>(GA.getValueType()))
263       return SCL->inSection("dataflow", "fun", GA.getName(), Category);
264 
265     return SCL->inSection("dataflow", "global", GA.getName(), Category) ||
266            SCL->inSection("dataflow", "type", getGlobalTypeString(GA),
267                           Category);
268   }
269 
270   /// Returns whether this module is listed in the given category.
271   bool isIn(const Module &M, StringRef Category) const {
272     return SCL->inSection("dataflow", "src", M.getModuleIdentifier(), Category);
273   }
274 };
275 
276 /// TransformedFunction is used to express the result of transforming one
277 /// function type into another.  This struct is immutable.  It holds metadata
278 /// useful for updating calls of the old function to the new type.
279 struct TransformedFunction {
280   TransformedFunction(FunctionType *OriginalType, FunctionType *TransformedType,
281                       std::vector<unsigned> ArgumentIndexMapping)
282       : OriginalType(OriginalType), TransformedType(TransformedType),
283         ArgumentIndexMapping(ArgumentIndexMapping) {}
284 
285   // Disallow copies.
286   TransformedFunction(const TransformedFunction &) = delete;
287   TransformedFunction &operator=(const TransformedFunction &) = delete;
288 
289   // Allow moves.
290   TransformedFunction(TransformedFunction &&) = default;
291   TransformedFunction &operator=(TransformedFunction &&) = default;
292 
293   /// Type of the function before the transformation.
294   FunctionType *OriginalType;
295 
296   /// Type of the function after the transformation.
297   FunctionType *TransformedType;
298 
299   /// Transforming a function may change the position of arguments.  This
300   /// member records the mapping from each argument's old position to its new
301   /// position.  Argument positions are zero-indexed.  If the transformation
302   /// from F to F' made the first argument of F into the third argument of F',
303   /// then ArgumentIndexMapping[0] will equal 2.
304   std::vector<unsigned> ArgumentIndexMapping;
305 };
306 
307 /// Given function attributes from a call site for the original function,
308 /// return function attributes appropriate for a call to the transformed
309 /// function.
310 AttributeList
311 transformFunctionAttributes(const TransformedFunction &TransformedFunction,
312                             LLVMContext &Ctx, AttributeList CallSiteAttrs) {
313 
314   // Construct a vector of AttributeSet for each function argument.
315   std::vector<llvm::AttributeSet> ArgumentAttributes(
316       TransformedFunction.TransformedType->getNumParams());
317 
318   // Copy attributes from the parameter of the original function to the
319   // transformed version.  'ArgumentIndexMapping' holds the mapping from
320   // old argument position to new.
321   for (unsigned I = 0, IE = TransformedFunction.ArgumentIndexMapping.size();
322        I < IE; ++I) {
323     unsigned TransformedIndex = TransformedFunction.ArgumentIndexMapping[I];
324     ArgumentAttributes[TransformedIndex] = CallSiteAttrs.getParamAttributes(I);
325   }
326 
327   // Copy annotations on varargs arguments.
328   for (unsigned I = TransformedFunction.OriginalType->getNumParams(),
329                 IE = CallSiteAttrs.getNumAttrSets();
330        I < IE; ++I) {
331     ArgumentAttributes.push_back(CallSiteAttrs.getParamAttributes(I));
332   }
333 
334   return AttributeList::get(Ctx, CallSiteAttrs.getFnAttributes(),
335                             CallSiteAttrs.getRetAttributes(),
336                             llvm::makeArrayRef(ArgumentAttributes));
337 }
338 
339 class DataFlowSanitizer {
340   friend struct DFSanFunction;
341   friend class DFSanVisitor;
342 
343   enum {
344     ShadowWidthBits = 16,
345     ShadowWidthBytes = ShadowWidthBits / 8,
346     OriginWidthBits = 32,
347     OriginWidthBytes = OriginWidthBits / 8
348   };
349 
350   /// Which ABI should be used for instrumented functions?
351   enum InstrumentedABI {
352     /// Argument and return value labels are passed through additional
353     /// arguments and by modifying the return type.
354     IA_Args,
355 
356     /// Argument and return value labels are passed through TLS variables
357     /// __dfsan_arg_tls and __dfsan_retval_tls.
358     IA_TLS
359   };
360 
361   /// How should calls to uninstrumented functions be handled?
362   enum WrapperKind {
363     /// This function is present in an uninstrumented form but we don't know
364     /// how it should be handled.  Print a warning and call the function anyway.
365     /// Don't label the return value.
366     WK_Warning,
367 
368     /// This function does not write to (user-accessible) memory, and its return
369     /// value is unlabelled.
370     WK_Discard,
371 
372     /// This function does not write to (user-accessible) memory, and the label
373     /// of its return value is the union of the label of its arguments.
374     WK_Functional,
375 
376     /// Instead of calling the function, a custom wrapper __dfsw_F is called,
377     /// where F is the name of the function.  This function may wrap the
378     /// original function or provide its own implementation.  This is similar to
379     /// the IA_Args ABI, except that IA_Args uses a struct return type to
380     /// pass the return value shadow in a register, while WK_Custom uses an
381     /// extra pointer argument to return the shadow.  This allows the wrapped
382     /// form of the function type to be expressed in C.
383     WK_Custom
384   };
385 
386   Module *Mod;
387   LLVMContext *Ctx;
388   Type *Int8Ptr;
389   IntegerType *OriginTy;
390   PointerType *OriginPtrTy;
391   ConstantInt *OriginBase;
392   ConstantInt *ZeroOrigin;
393   /// The shadow type for all primitive types and vector types.
394   IntegerType *PrimitiveShadowTy;
395   PointerType *PrimitiveShadowPtrTy;
396   IntegerType *IntptrTy;
397   ConstantInt *ZeroPrimitiveShadow;
398   ConstantInt *ShadowPtrMask;
399   ConstantInt *ShadowPtrMul;
400   Constant *ArgTLS;
401   ArrayType *ArgOriginTLSTy;
402   Constant *ArgOriginTLS;
403   Constant *RetvalTLS;
404   Constant *RetvalOriginTLS;
405   Constant *ExternalShadowMask;
406   FunctionType *DFSanUnionFnTy;
407   FunctionType *DFSanUnionLoadFnTy;
408   FunctionType *DFSanLoadLabelAndOriginFnTy;
409   FunctionType *DFSanUnimplementedFnTy;
410   FunctionType *DFSanSetLabelFnTy;
411   FunctionType *DFSanNonzeroLabelFnTy;
412   FunctionType *DFSanVarargWrapperFnTy;
413   FunctionType *DFSanCmpCallbackFnTy;
414   FunctionType *DFSanLoadStoreCallbackFnTy;
415   FunctionType *DFSanMemTransferCallbackFnTy;
416   FunctionType *DFSanChainOriginFnTy;
417   FunctionType *DFSanMemOriginTransferFnTy;
418   FunctionType *DFSanMaybeStoreOriginFnTy;
419   FunctionCallee DFSanUnionFn;
420   FunctionCallee DFSanCheckedUnionFn;
421   FunctionCallee DFSanUnionLoadFn;
422   FunctionCallee DFSanUnionLoadFast16LabelsFn;
423   FunctionCallee DFSanLoadLabelAndOriginFn;
424   FunctionCallee DFSanUnimplementedFn;
425   FunctionCallee DFSanSetLabelFn;
426   FunctionCallee DFSanNonzeroLabelFn;
427   FunctionCallee DFSanVarargWrapperFn;
428   FunctionCallee DFSanLoadCallbackFn;
429   FunctionCallee DFSanStoreCallbackFn;
430   FunctionCallee DFSanMemTransferCallbackFn;
431   FunctionCallee DFSanCmpCallbackFn;
432   FunctionCallee DFSanChainOriginFn;
433   FunctionCallee DFSanMemOriginTransferFn;
434   FunctionCallee DFSanMaybeStoreOriginFn;
435   SmallPtrSet<Value *, 16> DFSanRuntimeFunctions;
436   MDNode *ColdCallWeights;
437   MDNode *OriginStoreWeights;
438   DFSanABIList ABIList;
439   DenseMap<Value *, Function *> UnwrappedFnMap;
440   AttrBuilder ReadOnlyNoneAttrs;
441   bool DFSanRuntimeShadowMask = false;
442 
443   Value *getShadowOffset(Value *Addr, IRBuilder<> &IRB);
444   Value *getShadowAddress(Value *Addr, Instruction *Pos);
445   std::pair<Value *, Value *>
446   getShadowOriginAddress(Value *Addr, Align InstAlignment, Instruction *Pos);
447   bool isInstrumented(const Function *F);
448   bool isInstrumented(const GlobalAlias *GA);
449   FunctionType *getArgsFunctionType(FunctionType *T);
450   FunctionType *getTrampolineFunctionType(FunctionType *T);
451   TransformedFunction getCustomFunctionType(FunctionType *T);
452   InstrumentedABI getInstrumentedABI();
453   WrapperKind getWrapperKind(Function *F);
454   void addGlobalNamePrefix(GlobalValue *GV);
455   Function *buildWrapperFunction(Function *F, StringRef NewFName,
456                                  GlobalValue::LinkageTypes NewFLink,
457                                  FunctionType *NewFT);
458   Constant *getOrBuildTrampolineFunction(FunctionType *FT, StringRef FName);
459   void initializeCallbackFunctions(Module &M);
460   void initializeRuntimeFunctions(Module &M);
461   void injectMetadataGlobals(Module &M);
462 
463   bool init(Module &M);
464 
465   /// Returns whether the pass tracks origins. Support only fast16 mode in TLS
466   /// ABI mode.
467   bool shouldTrackOrigins();
468 
469   /// Returns whether the pass tracks labels for struct fields and array
470   /// indices. Support only fast16 mode in TLS ABI mode.
471   bool shouldTrackFieldsAndIndices();
472 
473   /// Returns a zero constant with the shadow type of OrigTy.
474   ///
475   /// getZeroShadow({T1,T2,...}) = {getZeroShadow(T1),getZeroShadow(T2,...}
476   /// getZeroShadow([n x T]) = [n x getZeroShadow(T)]
477   /// getZeroShadow(other type) = i16(0)
478   ///
479   /// Note that a zero shadow is always i16(0) when shouldTrackFieldsAndIndices
480   /// returns false.
481   Constant *getZeroShadow(Type *OrigTy);
482   /// Returns a zero constant with the shadow type of V's type.
483   Constant *getZeroShadow(Value *V);
484 
485   /// Checks if V is a zero shadow.
486   bool isZeroShadow(Value *V);
487 
488   /// Returns the shadow type of OrigTy.
489   ///
490   /// getShadowTy({T1,T2,...}) = {getShadowTy(T1),getShadowTy(T2),...}
491   /// getShadowTy([n x T]) = [n x getShadowTy(T)]
492   /// getShadowTy(other type) = i16
493   ///
494   /// Note that a shadow type is always i16 when shouldTrackFieldsAndIndices
495   /// returns false.
496   Type *getShadowTy(Type *OrigTy);
497   /// Returns the shadow type of of V's type.
498   Type *getShadowTy(Value *V);
499 
500   const uint64_t NumOfElementsInArgOrgTLS = ArgTLSSize / OriginWidthBytes;
501 
502 public:
503   DataFlowSanitizer(const std::vector<std::string> &ABIListFiles);
504 
505   bool runImpl(Module &M);
506 };
507 
508 struct DFSanFunction {
509   DataFlowSanitizer &DFS;
510   Function *F;
511   DominatorTree DT;
512   DataFlowSanitizer::InstrumentedABI IA;
513   bool IsNativeABI;
514   AllocaInst *LabelReturnAlloca = nullptr;
515   AllocaInst *OriginReturnAlloca = nullptr;
516   DenseMap<Value *, Value *> ValShadowMap;
517   DenseMap<Value *, Value *> ValOriginMap;
518   DenseMap<AllocaInst *, AllocaInst *> AllocaShadowMap;
519   DenseMap<AllocaInst *, AllocaInst *> AllocaOriginMap;
520 
521   struct PHIFixupElement {
522     PHINode *Phi;
523     PHINode *ShadowPhi;
524     PHINode *OriginPhi;
525   };
526   std::vector<PHIFixupElement> PHIFixups;
527 
528   DenseSet<Instruction *> SkipInsts;
529   std::vector<Value *> NonZeroChecks;
530   bool AvoidNewBlocks;
531 
532   struct CachedShadow {
533     BasicBlock *Block; // The block where Shadow is defined.
534     Value *Shadow;
535   };
536   /// Maps a value to its latest shadow value in terms of domination tree.
537   DenseMap<std::pair<Value *, Value *>, CachedShadow> CachedShadows;
538   /// Maps a value to its latest collapsed shadow value it was converted to in
539   /// terms of domination tree. When ClDebugNonzeroLabels is on, this cache is
540   /// used at a post process where CFG blocks are split. So it does not cache
541   /// BasicBlock like CachedShadows, but uses domination between values.
542   DenseMap<Value *, Value *> CachedCollapsedShadows;
543   DenseMap<Value *, std::set<Value *>> ShadowElements;
544 
545   DFSanFunction(DataFlowSanitizer &DFS, Function *F, bool IsNativeABI)
546       : DFS(DFS), F(F), IA(DFS.getInstrumentedABI()), IsNativeABI(IsNativeABI) {
547     DT.recalculate(*F);
548     // FIXME: Need to track down the register allocator issue which causes poor
549     // performance in pathological cases with large numbers of basic blocks.
550     AvoidNewBlocks = F->size() > 1000;
551   }
552 
553   /// Computes the shadow address for a given function argument.
554   ///
555   /// Shadow = ArgTLS+ArgOffset.
556   Value *getArgTLS(Type *T, unsigned ArgOffset, IRBuilder<> &IRB);
557 
558   /// Computes the shadow address for a return value.
559   Value *getRetvalTLS(Type *T, IRBuilder<> &IRB);
560 
561   /// Computes the origin address for a given function argument.
562   ///
563   /// Origin = ArgOriginTLS[ArgNo].
564   Value *getArgOriginTLS(unsigned ArgNo, IRBuilder<> &IRB);
565 
566   /// Computes the origin address for a return value.
567   Value *getRetvalOriginTLS();
568 
569   Value *getOrigin(Value *V);
570   void setOrigin(Instruction *I, Value *Origin);
571   /// Generates IR to compute the origin of the last operand with a taint label.
572   Value *combineOperandOrigins(Instruction *Inst);
573   /// Before the instruction Pos, generates IR to compute the last origin with a
574   /// taint label. Labels and origins are from vectors Shadows and Origins
575   /// correspondingly. The generated IR is like
576   ///   Sn-1 != Zero ? On-1: ... S2 != Zero ? O2: S1 != Zero ? O1: O0
577   /// When Zero is nullptr, it uses ZeroPrimitiveShadow. Otherwise it can be
578   /// zeros with other bitwidths.
579   Value *combineOrigins(const std::vector<Value *> &Shadows,
580                         const std::vector<Value *> &Origins, Instruction *Pos,
581                         ConstantInt *Zero = nullptr);
582 
583   Value *getShadow(Value *V);
584   void setShadow(Instruction *I, Value *Shadow);
585   /// Generates IR to compute the union of the two given shadows, inserting it
586   /// before Pos. The combined value is with primitive type.
587   Value *combineShadows(Value *V1, Value *V2, Instruction *Pos);
588   /// Combines the shadow values of V1 and V2, then converts the combined value
589   /// with primitive type into a shadow value with the original type T.
590   Value *combineShadowsThenConvert(Type *T, Value *V1, Value *V2,
591                                    Instruction *Pos);
592   Value *combineOperandShadows(Instruction *Inst);
593   std::pair<Value *, Value *> loadShadowOrigin(Value *ShadowAddr, uint64_t Size,
594                                                Align InstAlignment,
595                                                Instruction *Pos);
596   void storePrimitiveShadowOrigin(Value *Addr, uint64_t Size,
597                                   Align InstAlignment, Value *PrimitiveShadow,
598                                   Value *Origin, Instruction *Pos);
599   /// Applies PrimitiveShadow to all primitive subtypes of T, returning
600   /// the expanded shadow value.
601   ///
602   /// EFP({T1,T2, ...}, PS) = {EFP(T1,PS),EFP(T2,PS),...}
603   /// EFP([n x T], PS) = [n x EFP(T,PS)]
604   /// EFP(other types, PS) = PS
605   Value *expandFromPrimitiveShadow(Type *T, Value *PrimitiveShadow,
606                                    Instruction *Pos);
607   /// Collapses Shadow into a single primitive shadow value, unioning all
608   /// primitive shadow values in the process. Returns the final primitive
609   /// shadow value.
610   ///
611   /// CTP({V1,V2, ...}) = UNION(CFP(V1,PS),CFP(V2,PS),...)
612   /// CTP([V1,V2,...]) = UNION(CFP(V1,PS),CFP(V2,PS),...)
613   /// CTP(other types, PS) = PS
614   Value *collapseToPrimitiveShadow(Value *Shadow, Instruction *Pos);
615 
616   void storeZeroPrimitiveShadow(Value *Addr, uint64_t Size, Align ShadowAlign,
617                                 Instruction *Pos);
618 
619   Align getShadowAlign(Align InstAlignment);
620 
621 private:
622   /// Collapses the shadow with aggregate type into a single primitive shadow
623   /// value.
624   template <class AggregateType>
625   Value *collapseAggregateShadow(AggregateType *AT, Value *Shadow,
626                                  IRBuilder<> &IRB);
627 
628   Value *collapseToPrimitiveShadow(Value *Shadow, IRBuilder<> &IRB);
629 
630   /// Returns the shadow value of an argument A.
631   Value *getShadowForTLSArgument(Argument *A);
632 
633   /// The fast path of loading shadow in legacy mode.
634   Value *loadLegacyShadowFast(Value *ShadowAddr, uint64_t Size,
635                               Align ShadowAlign, Instruction *Pos);
636 
637   /// The fast path of loading shadow in fast-16-label mode.
638   std::pair<Value *, Value *>
639   loadFast16ShadowFast(Value *ShadowAddr, Value *OriginAddr, uint64_t Size,
640                        Align ShadowAlign, Align OriginAlign, Value *FirstOrigin,
641                        Instruction *Pos);
642 
643   Align getOriginAlign(Align InstAlignment);
644 
645   /// Because 4 contiguous bytes share one 4-byte origin, the most accurate load
646   /// is __dfsan_load_label_and_origin. This function returns the union of all
647   /// labels and the origin of the first taint label. However this is an
648   /// additional call with many instructions. To ensure common cases are fast,
649   /// checks if it is possible to load labels and origins without using the
650   /// callback function.
651   bool useCallbackLoadLabelAndOrigin(uint64_t Size, Align InstAlignment);
652 
653   /// Returns a chain at the current stack with previous origin V.
654   Value *updateOrigin(Value *V, IRBuilder<> &IRB);
655 
656   /// Creates an Intptr = Origin | Origin << 32 if Intptr's size is 64. Returns
657   /// Origin otherwise.
658   Value *originToIntptr(IRBuilder<> &IRB, Value *Origin);
659 
660   /// Stores Origin into the address range [StoreOriginAddr, StoreOriginAddr +
661   /// Size).
662   void paintOrigin(IRBuilder<> &IRB, Value *Origin, Value *StoreOriginAddr,
663                    uint64_t StoreOriginSize, Align Alignment);
664 
665   /// Stores Origin in terms of its Shadow value.
666   /// * Do not write origins for zero shadows because we do not trace origins
667   ///   for untainted sinks.
668   /// * Use __dfsan_maybe_store_origin if there are too many origin store
669   ///   instrumentations.
670   void storeOrigin(Instruction *Pos, Value *Addr, uint64_t Size, Value *Shadow,
671                    Value *Origin, Value *StoreOriginAddr, Align InstAlignment);
672 
673   /// Convert a scalar value to an i1 by comparing with 0.
674   Value *convertToBool(Value *V, IRBuilder<> &IRB, const Twine &Name = "");
675 
676   bool shouldInstrumentWithCall();
677 
678   int NumOriginStores = 0;
679 };
680 
681 class DFSanVisitor : public InstVisitor<DFSanVisitor> {
682 public:
683   DFSanFunction &DFSF;
684 
685   DFSanVisitor(DFSanFunction &DFSF) : DFSF(DFSF) {}
686 
687   const DataLayout &getDataLayout() const {
688     return DFSF.F->getParent()->getDataLayout();
689   }
690 
691   // Combines shadow values and origins for all of I's operands.
692   void visitInstOperands(Instruction &I);
693 
694   void visitUnaryOperator(UnaryOperator &UO);
695   void visitBinaryOperator(BinaryOperator &BO);
696   void visitCastInst(CastInst &CI);
697   void visitCmpInst(CmpInst &CI);
698   void visitGetElementPtrInst(GetElementPtrInst &GEPI);
699   void visitLoadInst(LoadInst &LI);
700   void visitStoreInst(StoreInst &SI);
701   void visitAtomicRMWInst(AtomicRMWInst &I);
702   void visitAtomicCmpXchgInst(AtomicCmpXchgInst &I);
703   void visitReturnInst(ReturnInst &RI);
704   void visitCallBase(CallBase &CB);
705   void visitPHINode(PHINode &PN);
706   void visitExtractElementInst(ExtractElementInst &I);
707   void visitInsertElementInst(InsertElementInst &I);
708   void visitShuffleVectorInst(ShuffleVectorInst &I);
709   void visitExtractValueInst(ExtractValueInst &I);
710   void visitInsertValueInst(InsertValueInst &I);
711   void visitAllocaInst(AllocaInst &I);
712   void visitSelectInst(SelectInst &I);
713   void visitMemSetInst(MemSetInst &I);
714   void visitMemTransferInst(MemTransferInst &I);
715 
716 private:
717   void visitCASOrRMW(Align InstAlignment, Instruction &I);
718 
719   // Returns false when this is an invoke of a custom function.
720   bool visitWrappedCallBase(Function &F, CallBase &CB);
721 
722   // Combines origins for all of I's operands.
723   void visitInstOperandOrigins(Instruction &I);
724 
725   void addShadowArguments(Function &F, CallBase &CB, std::vector<Value *> &Args,
726                           IRBuilder<> &IRB);
727 
728   void addOriginArguments(Function &F, CallBase &CB, std::vector<Value *> &Args,
729                           IRBuilder<> &IRB);
730 };
731 
732 } // end anonymous namespace
733 
734 DataFlowSanitizer::DataFlowSanitizer(
735     const std::vector<std::string> &ABIListFiles) {
736   std::vector<std::string> AllABIListFiles(std::move(ABIListFiles));
737   llvm::append_range(AllABIListFiles, ClABIListFiles);
738   // FIXME: should we propagate vfs::FileSystem to this constructor?
739   ABIList.set(
740       SpecialCaseList::createOrDie(AllABIListFiles, *vfs::getRealFileSystem()));
741 }
742 
743 FunctionType *DataFlowSanitizer::getArgsFunctionType(FunctionType *T) {
744   SmallVector<Type *, 4> ArgTypes(T->param_begin(), T->param_end());
745   ArgTypes.append(T->getNumParams(), PrimitiveShadowTy);
746   if (T->isVarArg())
747     ArgTypes.push_back(PrimitiveShadowPtrTy);
748   Type *RetType = T->getReturnType();
749   if (!RetType->isVoidTy())
750     RetType = StructType::get(RetType, PrimitiveShadowTy);
751   return FunctionType::get(RetType, ArgTypes, T->isVarArg());
752 }
753 
754 FunctionType *DataFlowSanitizer::getTrampolineFunctionType(FunctionType *T) {
755   assert(!T->isVarArg());
756   SmallVector<Type *, 4> ArgTypes;
757   ArgTypes.push_back(T->getPointerTo());
758   ArgTypes.append(T->param_begin(), T->param_end());
759   ArgTypes.append(T->getNumParams(), PrimitiveShadowTy);
760   Type *RetType = T->getReturnType();
761   if (!RetType->isVoidTy())
762     ArgTypes.push_back(PrimitiveShadowPtrTy);
763 
764   if (shouldTrackOrigins()) {
765     ArgTypes.append(T->getNumParams(), OriginTy);
766     if (!RetType->isVoidTy())
767       ArgTypes.push_back(OriginPtrTy);
768   }
769 
770   return FunctionType::get(T->getReturnType(), ArgTypes, false);
771 }
772 
773 TransformedFunction DataFlowSanitizer::getCustomFunctionType(FunctionType *T) {
774   SmallVector<Type *, 4> ArgTypes;
775 
776   // Some parameters of the custom function being constructed are
777   // parameters of T.  Record the mapping from parameters of T to
778   // parameters of the custom function, so that parameter attributes
779   // at call sites can be updated.
780   std::vector<unsigned> ArgumentIndexMapping;
781   for (unsigned I = 0, E = T->getNumParams(); I != E; ++I) {
782     Type *ParamType = T->getParamType(I);
783     FunctionType *FT;
784     if (isa<PointerType>(ParamType) &&
785         (FT = dyn_cast<FunctionType>(ParamType->getPointerElementType()))) {
786       ArgumentIndexMapping.push_back(ArgTypes.size());
787       ArgTypes.push_back(getTrampolineFunctionType(FT)->getPointerTo());
788       ArgTypes.push_back(Type::getInt8PtrTy(*Ctx));
789     } else {
790       ArgumentIndexMapping.push_back(ArgTypes.size());
791       ArgTypes.push_back(ParamType);
792     }
793   }
794   for (unsigned I = 0, E = T->getNumParams(); I != E; ++I)
795     ArgTypes.push_back(PrimitiveShadowTy);
796   if (T->isVarArg())
797     ArgTypes.push_back(PrimitiveShadowPtrTy);
798   Type *RetType = T->getReturnType();
799   if (!RetType->isVoidTy())
800     ArgTypes.push_back(PrimitiveShadowPtrTy);
801 
802   if (shouldTrackOrigins()) {
803     for (unsigned I = 0, E = T->getNumParams(); I != E; ++I)
804       ArgTypes.push_back(OriginTy);
805     if (T->isVarArg())
806       ArgTypes.push_back(OriginPtrTy);
807     if (!RetType->isVoidTy())
808       ArgTypes.push_back(OriginPtrTy);
809   }
810 
811   return TransformedFunction(
812       T, FunctionType::get(T->getReturnType(), ArgTypes, T->isVarArg()),
813       ArgumentIndexMapping);
814 }
815 
816 bool DataFlowSanitizer::isZeroShadow(Value *V) {
817   if (!shouldTrackFieldsAndIndices())
818     return ZeroPrimitiveShadow == V;
819 
820   Type *T = V->getType();
821   if (!isa<ArrayType>(T) && !isa<StructType>(T)) {
822     if (const ConstantInt *CI = dyn_cast<ConstantInt>(V))
823       return CI->isZero();
824     return false;
825   }
826 
827   return isa<ConstantAggregateZero>(V);
828 }
829 
830 bool DataFlowSanitizer::shouldTrackOrigins() {
831   static const bool ShouldTrackOrigins =
832       ClTrackOrigins && getInstrumentedABI() == DataFlowSanitizer::IA_TLS &&
833       ClFast16Labels;
834   return ShouldTrackOrigins;
835 }
836 
837 bool DataFlowSanitizer::shouldTrackFieldsAndIndices() {
838   return getInstrumentedABI() == DataFlowSanitizer::IA_TLS && ClFast16Labels;
839 }
840 
841 Constant *DataFlowSanitizer::getZeroShadow(Type *OrigTy) {
842   if (!shouldTrackFieldsAndIndices())
843     return ZeroPrimitiveShadow;
844 
845   if (!isa<ArrayType>(OrigTy) && !isa<StructType>(OrigTy))
846     return ZeroPrimitiveShadow;
847   Type *ShadowTy = getShadowTy(OrigTy);
848   return ConstantAggregateZero::get(ShadowTy);
849 }
850 
851 Constant *DataFlowSanitizer::getZeroShadow(Value *V) {
852   return getZeroShadow(V->getType());
853 }
854 
855 static Value *expandFromPrimitiveShadowRecursive(
856     Value *Shadow, SmallVector<unsigned, 4> &Indices, Type *SubShadowTy,
857     Value *PrimitiveShadow, IRBuilder<> &IRB) {
858   if (!isa<ArrayType>(SubShadowTy) && !isa<StructType>(SubShadowTy))
859     return IRB.CreateInsertValue(Shadow, PrimitiveShadow, Indices);
860 
861   if (ArrayType *AT = dyn_cast<ArrayType>(SubShadowTy)) {
862     for (unsigned Idx = 0; Idx < AT->getNumElements(); Idx++) {
863       Indices.push_back(Idx);
864       Shadow = expandFromPrimitiveShadowRecursive(
865           Shadow, Indices, AT->getElementType(), PrimitiveShadow, IRB);
866       Indices.pop_back();
867     }
868     return Shadow;
869   }
870 
871   if (StructType *ST = dyn_cast<StructType>(SubShadowTy)) {
872     for (unsigned Idx = 0; Idx < ST->getNumElements(); Idx++) {
873       Indices.push_back(Idx);
874       Shadow = expandFromPrimitiveShadowRecursive(
875           Shadow, Indices, ST->getElementType(Idx), PrimitiveShadow, IRB);
876       Indices.pop_back();
877     }
878     return Shadow;
879   }
880   llvm_unreachable("Unexpected shadow type");
881 }
882 
883 bool DFSanFunction::shouldInstrumentWithCall() {
884   return ClInstrumentWithCallThreshold >= 0 &&
885          NumOriginStores >= ClInstrumentWithCallThreshold;
886 }
887 
888 Value *DFSanFunction::expandFromPrimitiveShadow(Type *T, Value *PrimitiveShadow,
889                                                 Instruction *Pos) {
890   Type *ShadowTy = DFS.getShadowTy(T);
891 
892   if (!isa<ArrayType>(ShadowTy) && !isa<StructType>(ShadowTy))
893     return PrimitiveShadow;
894 
895   if (DFS.isZeroShadow(PrimitiveShadow))
896     return DFS.getZeroShadow(ShadowTy);
897 
898   IRBuilder<> IRB(Pos);
899   SmallVector<unsigned, 4> Indices;
900   Value *Shadow = UndefValue::get(ShadowTy);
901   Shadow = expandFromPrimitiveShadowRecursive(Shadow, Indices, ShadowTy,
902                                               PrimitiveShadow, IRB);
903 
904   // Caches the primitive shadow value that built the shadow value.
905   CachedCollapsedShadows[Shadow] = PrimitiveShadow;
906   return Shadow;
907 }
908 
909 template <class AggregateType>
910 Value *DFSanFunction::collapseAggregateShadow(AggregateType *AT, Value *Shadow,
911                                               IRBuilder<> &IRB) {
912   if (!AT->getNumElements())
913     return DFS.ZeroPrimitiveShadow;
914 
915   Value *FirstItem = IRB.CreateExtractValue(Shadow, 0);
916   Value *Aggregator = collapseToPrimitiveShadow(FirstItem, IRB);
917 
918   for (unsigned Idx = 1; Idx < AT->getNumElements(); Idx++) {
919     Value *ShadowItem = IRB.CreateExtractValue(Shadow, Idx);
920     Value *ShadowInner = collapseToPrimitiveShadow(ShadowItem, IRB);
921     Aggregator = IRB.CreateOr(Aggregator, ShadowInner);
922   }
923   return Aggregator;
924 }
925 
926 Value *DFSanFunction::collapseToPrimitiveShadow(Value *Shadow,
927                                                 IRBuilder<> &IRB) {
928   Type *ShadowTy = Shadow->getType();
929   if (!isa<ArrayType>(ShadowTy) && !isa<StructType>(ShadowTy))
930     return Shadow;
931   if (ArrayType *AT = dyn_cast<ArrayType>(ShadowTy))
932     return collapseAggregateShadow<>(AT, Shadow, IRB);
933   if (StructType *ST = dyn_cast<StructType>(ShadowTy))
934     return collapseAggregateShadow<>(ST, Shadow, IRB);
935   llvm_unreachable("Unexpected shadow type");
936 }
937 
938 Value *DFSanFunction::collapseToPrimitiveShadow(Value *Shadow,
939                                                 Instruction *Pos) {
940   Type *ShadowTy = Shadow->getType();
941   if (!isa<ArrayType>(ShadowTy) && !isa<StructType>(ShadowTy))
942     return Shadow;
943 
944   assert(DFS.shouldTrackFieldsAndIndices());
945 
946   // Checks if the cached collapsed shadow value dominates Pos.
947   Value *&CS = CachedCollapsedShadows[Shadow];
948   if (CS && DT.dominates(CS, Pos))
949     return CS;
950 
951   IRBuilder<> IRB(Pos);
952   Value *PrimitiveShadow = collapseToPrimitiveShadow(Shadow, IRB);
953   // Caches the converted primitive shadow value.
954   CS = PrimitiveShadow;
955   return PrimitiveShadow;
956 }
957 
958 Type *DataFlowSanitizer::getShadowTy(Type *OrigTy) {
959   if (!shouldTrackFieldsAndIndices())
960     return PrimitiveShadowTy;
961 
962   if (!OrigTy->isSized())
963     return PrimitiveShadowTy;
964   if (isa<IntegerType>(OrigTy))
965     return PrimitiveShadowTy;
966   if (isa<VectorType>(OrigTy))
967     return PrimitiveShadowTy;
968   if (ArrayType *AT = dyn_cast<ArrayType>(OrigTy))
969     return ArrayType::get(getShadowTy(AT->getElementType()),
970                           AT->getNumElements());
971   if (StructType *ST = dyn_cast<StructType>(OrigTy)) {
972     SmallVector<Type *, 4> Elements;
973     for (unsigned I = 0, N = ST->getNumElements(); I < N; ++I)
974       Elements.push_back(getShadowTy(ST->getElementType(I)));
975     return StructType::get(*Ctx, Elements);
976   }
977   return PrimitiveShadowTy;
978 }
979 
980 Type *DataFlowSanitizer::getShadowTy(Value *V) {
981   return getShadowTy(V->getType());
982 }
983 
984 bool DataFlowSanitizer::init(Module &M) {
985   Triple TargetTriple(M.getTargetTriple());
986   const DataLayout &DL = M.getDataLayout();
987 
988   Mod = &M;
989   Ctx = &M.getContext();
990   Int8Ptr = Type::getInt8PtrTy(*Ctx);
991   OriginTy = IntegerType::get(*Ctx, OriginWidthBits);
992   OriginPtrTy = PointerType::getUnqual(OriginTy);
993   PrimitiveShadowTy = IntegerType::get(*Ctx, ShadowWidthBits);
994   PrimitiveShadowPtrTy = PointerType::getUnqual(PrimitiveShadowTy);
995   IntptrTy = DL.getIntPtrType(*Ctx);
996   ZeroPrimitiveShadow = ConstantInt::getSigned(PrimitiveShadowTy, 0);
997   ShadowPtrMul = ConstantInt::getSigned(IntptrTy, ShadowWidthBytes);
998   OriginBase = ConstantInt::get(IntptrTy, 0x200000000000LL);
999   ZeroOrigin = ConstantInt::getSigned(OriginTy, 0);
1000 
1001   switch (TargetTriple.getArch()) {
1002   case Triple::x86_64:
1003     ShadowPtrMask = ConstantInt::getSigned(IntptrTy, ~0x700000000000LL);
1004     break;
1005   case Triple::mips64:
1006   case Triple::mips64el:
1007     ShadowPtrMask = ConstantInt::getSigned(IntptrTy, ~0xF000000000LL);
1008     break;
1009   case Triple::aarch64:
1010   case Triple::aarch64_be:
1011     // AArch64 supports multiple VMAs and the shadow mask is set at runtime.
1012     DFSanRuntimeShadowMask = true;
1013     break;
1014   default:
1015     report_fatal_error("unsupported triple");
1016   }
1017 
1018   Type *DFSanUnionArgs[2] = {PrimitiveShadowTy, PrimitiveShadowTy};
1019   DFSanUnionFnTy =
1020       FunctionType::get(PrimitiveShadowTy, DFSanUnionArgs, /*isVarArg=*/false);
1021   Type *DFSanUnionLoadArgs[2] = {PrimitiveShadowPtrTy, IntptrTy};
1022   DFSanUnionLoadFnTy = FunctionType::get(PrimitiveShadowTy, DFSanUnionLoadArgs,
1023                                          /*isVarArg=*/false);
1024   Type *DFSanLoadLabelAndOriginArgs[2] = {Int8Ptr, IntptrTy};
1025   DFSanLoadLabelAndOriginFnTy =
1026       FunctionType::get(IntegerType::get(*Ctx, 64), DFSanLoadLabelAndOriginArgs,
1027                         /*isVarArg=*/false);
1028   DFSanUnimplementedFnTy = FunctionType::get(
1029       Type::getVoidTy(*Ctx), Type::getInt8PtrTy(*Ctx), /*isVarArg=*/false);
1030   Type *DFSanSetLabelArgs[4] = {PrimitiveShadowTy, OriginTy,
1031                                 Type::getInt8PtrTy(*Ctx), IntptrTy};
1032   DFSanSetLabelFnTy = FunctionType::get(Type::getVoidTy(*Ctx),
1033                                         DFSanSetLabelArgs, /*isVarArg=*/false);
1034   DFSanNonzeroLabelFnTy =
1035       FunctionType::get(Type::getVoidTy(*Ctx), None, /*isVarArg=*/false);
1036   DFSanVarargWrapperFnTy = FunctionType::get(
1037       Type::getVoidTy(*Ctx), Type::getInt8PtrTy(*Ctx), /*isVarArg=*/false);
1038   DFSanCmpCallbackFnTy =
1039       FunctionType::get(Type::getVoidTy(*Ctx), PrimitiveShadowTy,
1040                         /*isVarArg=*/false);
1041   DFSanChainOriginFnTy =
1042       FunctionType::get(OriginTy, OriginTy, /*isVarArg=*/false);
1043   Type *DFSanMaybeStoreOriginArgs[4] = {IntegerType::get(*Ctx, ShadowWidthBits),
1044                                         Int8Ptr, IntptrTy, OriginTy};
1045   DFSanMaybeStoreOriginFnTy = FunctionType::get(
1046       Type::getVoidTy(*Ctx), DFSanMaybeStoreOriginArgs, /*isVarArg=*/false);
1047   Type *DFSanMemOriginTransferArgs[3] = {Int8Ptr, Int8Ptr, IntptrTy};
1048   DFSanMemOriginTransferFnTy = FunctionType::get(
1049       Type::getVoidTy(*Ctx), DFSanMemOriginTransferArgs, /*isVarArg=*/false);
1050   Type *DFSanLoadStoreCallbackArgs[2] = {PrimitiveShadowTy, Int8Ptr};
1051   DFSanLoadStoreCallbackFnTy =
1052       FunctionType::get(Type::getVoidTy(*Ctx), DFSanLoadStoreCallbackArgs,
1053                         /*isVarArg=*/false);
1054   Type *DFSanMemTransferCallbackArgs[2] = {PrimitiveShadowPtrTy, IntptrTy};
1055   DFSanMemTransferCallbackFnTy =
1056       FunctionType::get(Type::getVoidTy(*Ctx), DFSanMemTransferCallbackArgs,
1057                         /*isVarArg=*/false);
1058 
1059   ColdCallWeights = MDBuilder(*Ctx).createBranchWeights(1, 1000);
1060   OriginStoreWeights = MDBuilder(*Ctx).createBranchWeights(1, 1000);
1061   return true;
1062 }
1063 
1064 bool DataFlowSanitizer::isInstrumented(const Function *F) {
1065   return !ABIList.isIn(*F, "uninstrumented");
1066 }
1067 
1068 bool DataFlowSanitizer::isInstrumented(const GlobalAlias *GA) {
1069   return !ABIList.isIn(*GA, "uninstrumented");
1070 }
1071 
1072 DataFlowSanitizer::InstrumentedABI DataFlowSanitizer::getInstrumentedABI() {
1073   return ClArgsABI ? IA_Args : IA_TLS;
1074 }
1075 
1076 DataFlowSanitizer::WrapperKind DataFlowSanitizer::getWrapperKind(Function *F) {
1077   if (ABIList.isIn(*F, "functional"))
1078     return WK_Functional;
1079   if (ABIList.isIn(*F, "discard"))
1080     return WK_Discard;
1081   if (ABIList.isIn(*F, "custom"))
1082     return WK_Custom;
1083 
1084   return WK_Warning;
1085 }
1086 
1087 void DataFlowSanitizer::addGlobalNamePrefix(GlobalValue *GV) {
1088   std::string GVName = std::string(GV->getName()), Prefix = "dfs$";
1089   GV->setName(Prefix + GVName);
1090 
1091   // Try to change the name of the function in module inline asm.  We only do
1092   // this for specific asm directives, currently only ".symver", to try to avoid
1093   // corrupting asm which happens to contain the symbol name as a substring.
1094   // Note that the substitution for .symver assumes that the versioned symbol
1095   // also has an instrumented name.
1096   std::string Asm = GV->getParent()->getModuleInlineAsm();
1097   std::string SearchStr = ".symver " + GVName + ",";
1098   size_t Pos = Asm.find(SearchStr);
1099   if (Pos != std::string::npos) {
1100     Asm.replace(Pos, SearchStr.size(),
1101                 ".symver " + Prefix + GVName + "," + Prefix);
1102     GV->getParent()->setModuleInlineAsm(Asm);
1103   }
1104 }
1105 
1106 Function *
1107 DataFlowSanitizer::buildWrapperFunction(Function *F, StringRef NewFName,
1108                                         GlobalValue::LinkageTypes NewFLink,
1109                                         FunctionType *NewFT) {
1110   FunctionType *FT = F->getFunctionType();
1111   Function *NewF = Function::Create(NewFT, NewFLink, F->getAddressSpace(),
1112                                     NewFName, F->getParent());
1113   NewF->copyAttributesFrom(F);
1114   NewF->removeAttributes(
1115       AttributeList::ReturnIndex,
1116       AttributeFuncs::typeIncompatible(NewFT->getReturnType()));
1117 
1118   BasicBlock *BB = BasicBlock::Create(*Ctx, "entry", NewF);
1119   if (F->isVarArg()) {
1120     NewF->removeAttributes(AttributeList::FunctionIndex,
1121                            AttrBuilder().addAttribute("split-stack"));
1122     CallInst::Create(DFSanVarargWrapperFn,
1123                      IRBuilder<>(BB).CreateGlobalStringPtr(F->getName()), "",
1124                      BB);
1125     new UnreachableInst(*Ctx, BB);
1126   } else {
1127     auto ArgIt = pointer_iterator<Argument *>(NewF->arg_begin());
1128     std::vector<Value *> Args(ArgIt, ArgIt + FT->getNumParams());
1129 
1130     CallInst *CI = CallInst::Create(F, Args, "", BB);
1131     if (FT->getReturnType()->isVoidTy())
1132       ReturnInst::Create(*Ctx, BB);
1133     else
1134       ReturnInst::Create(*Ctx, CI, BB);
1135   }
1136 
1137   return NewF;
1138 }
1139 
1140 Constant *DataFlowSanitizer::getOrBuildTrampolineFunction(FunctionType *FT,
1141                                                           StringRef FName) {
1142   FunctionType *FTT = getTrampolineFunctionType(FT);
1143   FunctionCallee C = Mod->getOrInsertFunction(FName, FTT);
1144   Function *F = dyn_cast<Function>(C.getCallee());
1145   if (F && F->isDeclaration()) {
1146     F->setLinkage(GlobalValue::LinkOnceODRLinkage);
1147     BasicBlock *BB = BasicBlock::Create(*Ctx, "entry", F);
1148     std::vector<Value *> Args;
1149     Function::arg_iterator AI = F->arg_begin() + 1;
1150     for (unsigned N = FT->getNumParams(); N != 0; ++AI, --N)
1151       Args.push_back(&*AI);
1152     CallInst *CI = CallInst::Create(FT, &*F->arg_begin(), Args, "", BB);
1153     Type *RetType = FT->getReturnType();
1154     ReturnInst *RI = RetType->isVoidTy() ? ReturnInst::Create(*Ctx, BB)
1155                                          : ReturnInst::Create(*Ctx, CI, BB);
1156 
1157     // F is called by a wrapped custom function with primitive shadows. So
1158     // its arguments and return value need conversion.
1159     DFSanFunction DFSF(*this, F, /*IsNativeABI=*/true);
1160     Function::arg_iterator ValAI = F->arg_begin(), ShadowAI = AI;
1161     ++ValAI;
1162     for (unsigned N = FT->getNumParams(); N != 0; ++ValAI, ++ShadowAI, --N) {
1163       Value *Shadow =
1164           DFSF.expandFromPrimitiveShadow(ValAI->getType(), &*ShadowAI, CI);
1165       DFSF.ValShadowMap[&*ValAI] = Shadow;
1166     }
1167     Function::arg_iterator RetShadowAI = ShadowAI;
1168     const bool ShouldTrackOrigins = shouldTrackOrigins();
1169     if (ShouldTrackOrigins) {
1170       ValAI = F->arg_begin();
1171       ++ValAI;
1172       Function::arg_iterator OriginAI = ShadowAI;
1173       if (!RetType->isVoidTy())
1174         ++OriginAI;
1175       for (unsigned N = FT->getNumParams(); N != 0; ++ValAI, ++OriginAI, --N) {
1176         DFSF.ValOriginMap[&*ValAI] = &*OriginAI;
1177       }
1178     }
1179     DFSanVisitor(DFSF).visitCallInst(*CI);
1180     if (!RetType->isVoidTy()) {
1181       Value *PrimitiveShadow = DFSF.collapseToPrimitiveShadow(
1182           DFSF.getShadow(RI->getReturnValue()), RI);
1183       new StoreInst(PrimitiveShadow, &*RetShadowAI, RI);
1184       if (ShouldTrackOrigins) {
1185         Value *Origin = DFSF.getOrigin(RI->getReturnValue());
1186         new StoreInst(Origin, &*std::prev(F->arg_end()), RI);
1187       }
1188     }
1189   }
1190 
1191   return cast<Constant>(C.getCallee());
1192 }
1193 
1194 // Initialize DataFlowSanitizer runtime functions and declare them in the module
1195 void DataFlowSanitizer::initializeRuntimeFunctions(Module &M) {
1196   {
1197     AttributeList AL;
1198     AL = AL.addAttribute(M.getContext(), AttributeList::FunctionIndex,
1199                          Attribute::NoUnwind);
1200     AL = AL.addAttribute(M.getContext(), AttributeList::FunctionIndex,
1201                          Attribute::ReadNone);
1202     AL = AL.addAttribute(M.getContext(), AttributeList::ReturnIndex,
1203                          Attribute::ZExt);
1204     AL = AL.addParamAttribute(M.getContext(), 0, Attribute::ZExt);
1205     AL = AL.addParamAttribute(M.getContext(), 1, Attribute::ZExt);
1206     DFSanUnionFn =
1207         Mod->getOrInsertFunction("__dfsan_union", DFSanUnionFnTy, AL);
1208   }
1209   {
1210     AttributeList AL;
1211     AL = AL.addAttribute(M.getContext(), AttributeList::FunctionIndex,
1212                          Attribute::NoUnwind);
1213     AL = AL.addAttribute(M.getContext(), AttributeList::FunctionIndex,
1214                          Attribute::ReadNone);
1215     AL = AL.addAttribute(M.getContext(), AttributeList::ReturnIndex,
1216                          Attribute::ZExt);
1217     AL = AL.addParamAttribute(M.getContext(), 0, Attribute::ZExt);
1218     AL = AL.addParamAttribute(M.getContext(), 1, Attribute::ZExt);
1219     DFSanCheckedUnionFn =
1220         Mod->getOrInsertFunction("dfsan_union", DFSanUnionFnTy, AL);
1221   }
1222   {
1223     AttributeList AL;
1224     AL = AL.addAttribute(M.getContext(), AttributeList::FunctionIndex,
1225                          Attribute::NoUnwind);
1226     AL = AL.addAttribute(M.getContext(), AttributeList::FunctionIndex,
1227                          Attribute::ReadOnly);
1228     AL = AL.addAttribute(M.getContext(), AttributeList::ReturnIndex,
1229                          Attribute::ZExt);
1230     DFSanUnionLoadFn =
1231         Mod->getOrInsertFunction("__dfsan_union_load", DFSanUnionLoadFnTy, AL);
1232   }
1233   {
1234     AttributeList AL;
1235     AL = AL.addAttribute(M.getContext(), AttributeList::FunctionIndex,
1236                          Attribute::NoUnwind);
1237     AL = AL.addAttribute(M.getContext(), AttributeList::FunctionIndex,
1238                          Attribute::ReadOnly);
1239     AL = AL.addAttribute(M.getContext(), AttributeList::ReturnIndex,
1240                          Attribute::ZExt);
1241     DFSanUnionLoadFast16LabelsFn = Mod->getOrInsertFunction(
1242         "__dfsan_union_load_fast16labels", DFSanUnionLoadFnTy, AL);
1243   }
1244   {
1245     AttributeList AL;
1246     AL = AL.addAttribute(M.getContext(), AttributeList::FunctionIndex,
1247                          Attribute::NoUnwind);
1248     AL = AL.addAttribute(M.getContext(), AttributeList::FunctionIndex,
1249                          Attribute::ReadOnly);
1250     AL = AL.addAttribute(M.getContext(), AttributeList::ReturnIndex,
1251                          Attribute::ZExt);
1252     DFSanLoadLabelAndOriginFn = Mod->getOrInsertFunction(
1253         "__dfsan_load_label_and_origin", DFSanLoadLabelAndOriginFnTy, AL);
1254   }
1255   DFSanUnimplementedFn =
1256       Mod->getOrInsertFunction("__dfsan_unimplemented", DFSanUnimplementedFnTy);
1257   {
1258     AttributeList AL;
1259     AL = AL.addParamAttribute(M.getContext(), 0, Attribute::ZExt);
1260     AL = AL.addParamAttribute(M.getContext(), 1, Attribute::ZExt);
1261     DFSanSetLabelFn =
1262         Mod->getOrInsertFunction("__dfsan_set_label", DFSanSetLabelFnTy, AL);
1263   }
1264   DFSanNonzeroLabelFn =
1265       Mod->getOrInsertFunction("__dfsan_nonzero_label", DFSanNonzeroLabelFnTy);
1266   DFSanVarargWrapperFn = Mod->getOrInsertFunction("__dfsan_vararg_wrapper",
1267                                                   DFSanVarargWrapperFnTy);
1268   {
1269     AttributeList AL;
1270     AL = AL.addParamAttribute(M.getContext(), 0, Attribute::ZExt);
1271     AL = AL.addAttribute(M.getContext(), AttributeList::ReturnIndex,
1272                          Attribute::ZExt);
1273     DFSanChainOriginFn = Mod->getOrInsertFunction("__dfsan_chain_origin",
1274                                                   DFSanChainOriginFnTy, AL);
1275   }
1276   DFSanMemOriginTransferFn = Mod->getOrInsertFunction(
1277       "__dfsan_mem_origin_transfer", DFSanMemOriginTransferFnTy);
1278 
1279   {
1280     AttributeList AL;
1281     AL = AL.addParamAttribute(M.getContext(), 0, Attribute::ZExt);
1282     AL = AL.addParamAttribute(M.getContext(), 3, Attribute::ZExt);
1283     DFSanMaybeStoreOriginFn = Mod->getOrInsertFunction(
1284         "__dfsan_maybe_store_origin", DFSanMaybeStoreOriginFnTy, AL);
1285   }
1286 
1287   DFSanRuntimeFunctions.insert(DFSanUnionFn.getCallee()->stripPointerCasts());
1288   DFSanRuntimeFunctions.insert(
1289       DFSanCheckedUnionFn.getCallee()->stripPointerCasts());
1290   DFSanRuntimeFunctions.insert(
1291       DFSanUnionLoadFn.getCallee()->stripPointerCasts());
1292   DFSanRuntimeFunctions.insert(
1293       DFSanUnionLoadFast16LabelsFn.getCallee()->stripPointerCasts());
1294   DFSanRuntimeFunctions.insert(
1295       DFSanLoadLabelAndOriginFn.getCallee()->stripPointerCasts());
1296   DFSanRuntimeFunctions.insert(
1297       DFSanUnimplementedFn.getCallee()->stripPointerCasts());
1298   DFSanRuntimeFunctions.insert(
1299       DFSanSetLabelFn.getCallee()->stripPointerCasts());
1300   DFSanRuntimeFunctions.insert(
1301       DFSanNonzeroLabelFn.getCallee()->stripPointerCasts());
1302   DFSanRuntimeFunctions.insert(
1303       DFSanVarargWrapperFn.getCallee()->stripPointerCasts());
1304   DFSanRuntimeFunctions.insert(
1305       DFSanLoadCallbackFn.getCallee()->stripPointerCasts());
1306   DFSanRuntimeFunctions.insert(
1307       DFSanStoreCallbackFn.getCallee()->stripPointerCasts());
1308   DFSanRuntimeFunctions.insert(
1309       DFSanMemTransferCallbackFn.getCallee()->stripPointerCasts());
1310   DFSanRuntimeFunctions.insert(
1311       DFSanCmpCallbackFn.getCallee()->stripPointerCasts());
1312   DFSanRuntimeFunctions.insert(
1313       DFSanChainOriginFn.getCallee()->stripPointerCasts());
1314   DFSanRuntimeFunctions.insert(
1315       DFSanMemOriginTransferFn.getCallee()->stripPointerCasts());
1316   DFSanRuntimeFunctions.insert(
1317       DFSanMaybeStoreOriginFn.getCallee()->stripPointerCasts());
1318 }
1319 
1320 // Initializes event callback functions and declare them in the module
1321 void DataFlowSanitizer::initializeCallbackFunctions(Module &M) {
1322   DFSanLoadCallbackFn = Mod->getOrInsertFunction("__dfsan_load_callback",
1323                                                  DFSanLoadStoreCallbackFnTy);
1324   DFSanStoreCallbackFn = Mod->getOrInsertFunction("__dfsan_store_callback",
1325                                                   DFSanLoadStoreCallbackFnTy);
1326   DFSanMemTransferCallbackFn = Mod->getOrInsertFunction(
1327       "__dfsan_mem_transfer_callback", DFSanMemTransferCallbackFnTy);
1328   DFSanCmpCallbackFn =
1329       Mod->getOrInsertFunction("__dfsan_cmp_callback", DFSanCmpCallbackFnTy);
1330 }
1331 
1332 void DataFlowSanitizer::injectMetadataGlobals(Module &M) {
1333   // These variables can be used:
1334   // - by the runtime (to discover what the shadow width was, during
1335   //   compilation)
1336   // - in testing (to avoid hardcoding the shadow width and type but instead
1337   //   extract them by pattern matching)
1338   Type *IntTy = Type::getInt32Ty(*Ctx);
1339   (void)Mod->getOrInsertGlobal("__dfsan_shadow_width_bits", IntTy, [&] {
1340     return new GlobalVariable(
1341         M, IntTy, /*isConstant=*/true, GlobalValue::WeakODRLinkage,
1342         ConstantInt::get(IntTy, ShadowWidthBits), "__dfsan_shadow_width_bits");
1343   });
1344   (void)Mod->getOrInsertGlobal("__dfsan_shadow_width_bytes", IntTy, [&] {
1345     return new GlobalVariable(M, IntTy, /*isConstant=*/true,
1346                               GlobalValue::WeakODRLinkage,
1347                               ConstantInt::get(IntTy, ShadowWidthBytes),
1348                               "__dfsan_shadow_width_bytes");
1349   });
1350 }
1351 
1352 bool DataFlowSanitizer::runImpl(Module &M) {
1353   init(M);
1354 
1355   if (ABIList.isIn(M, "skip"))
1356     return false;
1357 
1358   const unsigned InitialGlobalSize = M.global_size();
1359   const unsigned InitialModuleSize = M.size();
1360 
1361   bool Changed = false;
1362 
1363   auto GetOrInsertGlobal = [this, &Changed](StringRef Name,
1364                                             Type *Ty) -> Constant * {
1365     Constant *C = Mod->getOrInsertGlobal(Name, Ty);
1366     if (GlobalVariable *G = dyn_cast<GlobalVariable>(C)) {
1367       Changed |= G->getThreadLocalMode() != GlobalVariable::InitialExecTLSModel;
1368       G->setThreadLocalMode(GlobalVariable::InitialExecTLSModel);
1369     }
1370     return C;
1371   };
1372 
1373   // These globals must be kept in sync with the ones in dfsan.cpp.
1374   ArgTLS =
1375       GetOrInsertGlobal("__dfsan_arg_tls",
1376                         ArrayType::get(Type::getInt64Ty(*Ctx), ArgTLSSize / 8));
1377   RetvalTLS = GetOrInsertGlobal(
1378       "__dfsan_retval_tls",
1379       ArrayType::get(Type::getInt64Ty(*Ctx), RetvalTLSSize / 8));
1380   ArgOriginTLSTy = ArrayType::get(OriginTy, NumOfElementsInArgOrgTLS);
1381   ArgOriginTLS = GetOrInsertGlobal("__dfsan_arg_origin_tls", ArgOriginTLSTy);
1382   RetvalOriginTLS = GetOrInsertGlobal("__dfsan_retval_origin_tls", OriginTy);
1383 
1384   (void)Mod->getOrInsertGlobal("__dfsan_track_origins", OriginTy, [&] {
1385     Changed = true;
1386     return new GlobalVariable(
1387         M, OriginTy, true, GlobalValue::WeakODRLinkage,
1388         ConstantInt::getSigned(OriginTy, shouldTrackOrigins()),
1389         "__dfsan_track_origins");
1390   });
1391 
1392   injectMetadataGlobals(M);
1393 
1394   ExternalShadowMask =
1395       Mod->getOrInsertGlobal(DFSanExternShadowPtrMask, IntptrTy);
1396 
1397   initializeCallbackFunctions(M);
1398   initializeRuntimeFunctions(M);
1399 
1400   std::vector<Function *> FnsToInstrument;
1401   SmallPtrSet<Function *, 2> FnsWithNativeABI;
1402   for (Function &F : M)
1403     if (!F.isIntrinsic() && !DFSanRuntimeFunctions.contains(&F))
1404       FnsToInstrument.push_back(&F);
1405 
1406   // Give function aliases prefixes when necessary, and build wrappers where the
1407   // instrumentedness is inconsistent.
1408   for (Module::alias_iterator AI = M.alias_begin(), AE = M.alias_end();
1409        AI != AE;) {
1410     GlobalAlias *GA = &*AI;
1411     ++AI;
1412     // Don't stop on weak.  We assume people aren't playing games with the
1413     // instrumentedness of overridden weak aliases.
1414     auto *F = dyn_cast<Function>(GA->getBaseObject());
1415     if (!F)
1416       continue;
1417 
1418     bool GAInst = isInstrumented(GA), FInst = isInstrumented(F);
1419     if (GAInst && FInst) {
1420       addGlobalNamePrefix(GA);
1421     } else if (GAInst != FInst) {
1422       // Non-instrumented alias of an instrumented function, or vice versa.
1423       // Replace the alias with a native-ABI wrapper of the aliasee.  The pass
1424       // below will take care of instrumenting it.
1425       Function *NewF =
1426           buildWrapperFunction(F, "", GA->getLinkage(), F->getFunctionType());
1427       GA->replaceAllUsesWith(ConstantExpr::getBitCast(NewF, GA->getType()));
1428       NewF->takeName(GA);
1429       GA->eraseFromParent();
1430       FnsToInstrument.push_back(NewF);
1431     }
1432   }
1433 
1434   ReadOnlyNoneAttrs.addAttribute(Attribute::ReadOnly)
1435       .addAttribute(Attribute::ReadNone);
1436 
1437   // First, change the ABI of every function in the module.  ABI-listed
1438   // functions keep their original ABI and get a wrapper function.
1439   for (std::vector<Function *>::iterator FI = FnsToInstrument.begin(),
1440                                          FE = FnsToInstrument.end();
1441        FI != FE; ++FI) {
1442     Function &F = **FI;
1443     FunctionType *FT = F.getFunctionType();
1444 
1445     bool IsZeroArgsVoidRet = (FT->getNumParams() == 0 && !FT->isVarArg() &&
1446                               FT->getReturnType()->isVoidTy());
1447 
1448     if (isInstrumented(&F)) {
1449       // Instrumented functions get a 'dfs$' prefix.  This allows us to more
1450       // easily identify cases of mismatching ABIs.
1451       if (getInstrumentedABI() == IA_Args && !IsZeroArgsVoidRet) {
1452         FunctionType *NewFT = getArgsFunctionType(FT);
1453         Function *NewF = Function::Create(NewFT, F.getLinkage(),
1454                                           F.getAddressSpace(), "", &M);
1455         NewF->copyAttributesFrom(&F);
1456         NewF->removeAttributes(
1457             AttributeList::ReturnIndex,
1458             AttributeFuncs::typeIncompatible(NewFT->getReturnType()));
1459         for (Function::arg_iterator FArg = F.arg_begin(),
1460                                     NewFArg = NewF->arg_begin(),
1461                                     FArgEnd = F.arg_end();
1462              FArg != FArgEnd; ++FArg, ++NewFArg) {
1463           FArg->replaceAllUsesWith(&*NewFArg);
1464         }
1465         NewF->getBasicBlockList().splice(NewF->begin(), F.getBasicBlockList());
1466 
1467         for (Function::user_iterator UI = F.user_begin(), UE = F.user_end();
1468              UI != UE;) {
1469           BlockAddress *BA = dyn_cast<BlockAddress>(*UI);
1470           ++UI;
1471           if (BA) {
1472             BA->replaceAllUsesWith(
1473                 BlockAddress::get(NewF, BA->getBasicBlock()));
1474             delete BA;
1475           }
1476         }
1477         F.replaceAllUsesWith(
1478             ConstantExpr::getBitCast(NewF, PointerType::getUnqual(FT)));
1479         NewF->takeName(&F);
1480         F.eraseFromParent();
1481         *FI = NewF;
1482         addGlobalNamePrefix(NewF);
1483       } else {
1484         addGlobalNamePrefix(&F);
1485       }
1486     } else if (!IsZeroArgsVoidRet || getWrapperKind(&F) == WK_Custom) {
1487       // Build a wrapper function for F.  The wrapper simply calls F, and is
1488       // added to FnsToInstrument so that any instrumentation according to its
1489       // WrapperKind is done in the second pass below.
1490       FunctionType *NewFT =
1491           getInstrumentedABI() == IA_Args ? getArgsFunctionType(FT) : FT;
1492 
1493       // If the function being wrapped has local linkage, then preserve the
1494       // function's linkage in the wrapper function.
1495       GlobalValue::LinkageTypes WrapperLinkage =
1496           F.hasLocalLinkage() ? F.getLinkage()
1497                               : GlobalValue::LinkOnceODRLinkage;
1498 
1499       Function *NewF = buildWrapperFunction(
1500           &F,
1501           (shouldTrackOrigins() ? std::string("dfso$") : std::string("dfsw$")) +
1502               std::string(F.getName()),
1503           WrapperLinkage, NewFT);
1504       if (getInstrumentedABI() == IA_TLS)
1505         NewF->removeAttributes(AttributeList::FunctionIndex, ReadOnlyNoneAttrs);
1506 
1507       Value *WrappedFnCst =
1508           ConstantExpr::getBitCast(NewF, PointerType::getUnqual(FT));
1509       F.replaceAllUsesWith(WrappedFnCst);
1510 
1511       UnwrappedFnMap[WrappedFnCst] = &F;
1512       *FI = NewF;
1513 
1514       if (!F.isDeclaration()) {
1515         // This function is probably defining an interposition of an
1516         // uninstrumented function and hence needs to keep the original ABI.
1517         // But any functions it may call need to use the instrumented ABI, so
1518         // we instrument it in a mode which preserves the original ABI.
1519         FnsWithNativeABI.insert(&F);
1520 
1521         // This code needs to rebuild the iterators, as they may be invalidated
1522         // by the push_back, taking care that the new range does not include
1523         // any functions added by this code.
1524         size_t N = FI - FnsToInstrument.begin(),
1525                Count = FE - FnsToInstrument.begin();
1526         FnsToInstrument.push_back(&F);
1527         FI = FnsToInstrument.begin() + N;
1528         FE = FnsToInstrument.begin() + Count;
1529       }
1530       // Hopefully, nobody will try to indirectly call a vararg
1531       // function... yet.
1532     } else if (FT->isVarArg()) {
1533       UnwrappedFnMap[&F] = &F;
1534       *FI = nullptr;
1535     }
1536   }
1537 
1538   for (Function *F : FnsToInstrument) {
1539     if (!F || F->isDeclaration())
1540       continue;
1541 
1542     removeUnreachableBlocks(*F);
1543 
1544     DFSanFunction DFSF(*this, F, FnsWithNativeABI.count(F));
1545 
1546     // DFSanVisitor may create new basic blocks, which confuses df_iterator.
1547     // Build a copy of the list before iterating over it.
1548     SmallVector<BasicBlock *, 4> BBList(depth_first(&F->getEntryBlock()));
1549 
1550     for (BasicBlock *BB : BBList) {
1551       Instruction *Inst = &BB->front();
1552       while (true) {
1553         // DFSanVisitor may split the current basic block, changing the current
1554         // instruction's next pointer and moving the next instruction to the
1555         // tail block from which we should continue.
1556         Instruction *Next = Inst->getNextNode();
1557         // DFSanVisitor may delete Inst, so keep track of whether it was a
1558         // terminator.
1559         bool IsTerminator = Inst->isTerminator();
1560         if (!DFSF.SkipInsts.count(Inst))
1561           DFSanVisitor(DFSF).visit(Inst);
1562         if (IsTerminator)
1563           break;
1564         Inst = Next;
1565       }
1566     }
1567 
1568     // We will not necessarily be able to compute the shadow for every phi node
1569     // until we have visited every block.  Therefore, the code that handles phi
1570     // nodes adds them to the PHIFixups list so that they can be properly
1571     // handled here.
1572     for (DFSanFunction::PHIFixupElement &P : DFSF.PHIFixups) {
1573       for (unsigned Val = 0, N = P.Phi->getNumIncomingValues(); Val != N;
1574            ++Val) {
1575         P.ShadowPhi->setIncomingValue(
1576             Val, DFSF.getShadow(P.Phi->getIncomingValue(Val)));
1577         if (P.OriginPhi)
1578           P.OriginPhi->setIncomingValue(
1579               Val, DFSF.getOrigin(P.Phi->getIncomingValue(Val)));
1580       }
1581     }
1582 
1583     // -dfsan-debug-nonzero-labels will split the CFG in all kinds of crazy
1584     // places (i.e. instructions in basic blocks we haven't even begun visiting
1585     // yet).  To make our life easier, do this work in a pass after the main
1586     // instrumentation.
1587     if (ClDebugNonzeroLabels) {
1588       for (Value *V : DFSF.NonZeroChecks) {
1589         Instruction *Pos;
1590         if (Instruction *I = dyn_cast<Instruction>(V))
1591           Pos = I->getNextNode();
1592         else
1593           Pos = &DFSF.F->getEntryBlock().front();
1594         while (isa<PHINode>(Pos) || isa<AllocaInst>(Pos))
1595           Pos = Pos->getNextNode();
1596         IRBuilder<> IRB(Pos);
1597         Value *PrimitiveShadow = DFSF.collapseToPrimitiveShadow(V, Pos);
1598         Value *Ne =
1599             IRB.CreateICmpNE(PrimitiveShadow, DFSF.DFS.ZeroPrimitiveShadow);
1600         BranchInst *BI = cast<BranchInst>(SplitBlockAndInsertIfThen(
1601             Ne, Pos, /*Unreachable=*/false, ColdCallWeights));
1602         IRBuilder<> ThenIRB(BI);
1603         ThenIRB.CreateCall(DFSF.DFS.DFSanNonzeroLabelFn, {});
1604       }
1605     }
1606   }
1607 
1608   return Changed || !FnsToInstrument.empty() ||
1609          M.global_size() != InitialGlobalSize || M.size() != InitialModuleSize;
1610 }
1611 
1612 Value *DFSanFunction::getArgTLS(Type *T, unsigned ArgOffset, IRBuilder<> &IRB) {
1613   Value *Base = IRB.CreatePointerCast(DFS.ArgTLS, DFS.IntptrTy);
1614   if (ArgOffset)
1615     Base = IRB.CreateAdd(Base, ConstantInt::get(DFS.IntptrTy, ArgOffset));
1616   return IRB.CreateIntToPtr(Base, PointerType::get(DFS.getShadowTy(T), 0),
1617                             "_dfsarg");
1618 }
1619 
1620 Value *DFSanFunction::getRetvalTLS(Type *T, IRBuilder<> &IRB) {
1621   return IRB.CreatePointerCast(
1622       DFS.RetvalTLS, PointerType::get(DFS.getShadowTy(T), 0), "_dfsret");
1623 }
1624 
1625 Value *DFSanFunction::getRetvalOriginTLS() { return DFS.RetvalOriginTLS; }
1626 
1627 Value *DFSanFunction::getArgOriginTLS(unsigned ArgNo, IRBuilder<> &IRB) {
1628   return IRB.CreateConstGEP2_64(DFS.ArgOriginTLSTy, DFS.ArgOriginTLS, 0, ArgNo,
1629                                 "_dfsarg_o");
1630 }
1631 
1632 Value *DFSanFunction::getOrigin(Value *V) {
1633   assert(DFS.shouldTrackOrigins());
1634   if (!isa<Argument>(V) && !isa<Instruction>(V))
1635     return DFS.ZeroOrigin;
1636   Value *&Origin = ValOriginMap[V];
1637   if (!Origin) {
1638     if (Argument *A = dyn_cast<Argument>(V)) {
1639       if (IsNativeABI)
1640         return DFS.ZeroOrigin;
1641       switch (IA) {
1642       case DataFlowSanitizer::IA_TLS: {
1643         if (A->getArgNo() < DFS.NumOfElementsInArgOrgTLS) {
1644           Instruction *ArgOriginTLSPos = &*F->getEntryBlock().begin();
1645           IRBuilder<> IRB(ArgOriginTLSPos);
1646           Value *ArgOriginPtr = getArgOriginTLS(A->getArgNo(), IRB);
1647           Origin = IRB.CreateLoad(DFS.OriginTy, ArgOriginPtr);
1648         } else {
1649           // Overflow
1650           Origin = DFS.ZeroOrigin;
1651         }
1652         break;
1653       }
1654       case DataFlowSanitizer::IA_Args: {
1655         Origin = DFS.ZeroOrigin;
1656         break;
1657       }
1658       }
1659     } else {
1660       Origin = DFS.ZeroOrigin;
1661     }
1662   }
1663   return Origin;
1664 }
1665 
1666 void DFSanFunction::setOrigin(Instruction *I, Value *Origin) {
1667   if (!DFS.shouldTrackOrigins())
1668     return;
1669   assert(!ValOriginMap.count(I));
1670   assert(Origin->getType() == DFS.OriginTy);
1671   ValOriginMap[I] = Origin;
1672 }
1673 
1674 Value *DFSanFunction::getShadowForTLSArgument(Argument *A) {
1675   unsigned ArgOffset = 0;
1676   const DataLayout &DL = F->getParent()->getDataLayout();
1677   for (auto &FArg : F->args()) {
1678     if (!FArg.getType()->isSized()) {
1679       if (A == &FArg)
1680         break;
1681       continue;
1682     }
1683 
1684     unsigned Size = DL.getTypeAllocSize(DFS.getShadowTy(&FArg));
1685     if (A != &FArg) {
1686       ArgOffset += alignTo(Size, ShadowTLSAlignment);
1687       if (ArgOffset > ArgTLSSize)
1688         break; // ArgTLS overflows, uses a zero shadow.
1689       continue;
1690     }
1691 
1692     if (ArgOffset + Size > ArgTLSSize)
1693       break; // ArgTLS overflows, uses a zero shadow.
1694 
1695     Instruction *ArgTLSPos = &*F->getEntryBlock().begin();
1696     IRBuilder<> IRB(ArgTLSPos);
1697     Value *ArgShadowPtr = getArgTLS(FArg.getType(), ArgOffset, IRB);
1698     return IRB.CreateAlignedLoad(DFS.getShadowTy(&FArg), ArgShadowPtr,
1699                                  ShadowTLSAlignment);
1700   }
1701 
1702   return DFS.getZeroShadow(A);
1703 }
1704 
1705 Value *DFSanFunction::getShadow(Value *V) {
1706   if (!isa<Argument>(V) && !isa<Instruction>(V))
1707     return DFS.getZeroShadow(V);
1708   Value *&Shadow = ValShadowMap[V];
1709   if (!Shadow) {
1710     if (Argument *A = dyn_cast<Argument>(V)) {
1711       if (IsNativeABI)
1712         return DFS.getZeroShadow(V);
1713       switch (IA) {
1714       case DataFlowSanitizer::IA_TLS: {
1715         Shadow = getShadowForTLSArgument(A);
1716         break;
1717       }
1718       case DataFlowSanitizer::IA_Args: {
1719         unsigned ArgIdx = A->getArgNo() + F->arg_size() / 2;
1720         Function::arg_iterator Arg = F->arg_begin();
1721         std::advance(Arg, ArgIdx);
1722         Shadow = &*Arg;
1723         assert(Shadow->getType() == DFS.PrimitiveShadowTy);
1724         break;
1725       }
1726       }
1727       NonZeroChecks.push_back(Shadow);
1728     } else {
1729       Shadow = DFS.getZeroShadow(V);
1730     }
1731   }
1732   return Shadow;
1733 }
1734 
1735 void DFSanFunction::setShadow(Instruction *I, Value *Shadow) {
1736   assert(!ValShadowMap.count(I));
1737   assert(DFS.shouldTrackFieldsAndIndices() ||
1738          Shadow->getType() == DFS.PrimitiveShadowTy);
1739   ValShadowMap[I] = Shadow;
1740 }
1741 
1742 Value *DataFlowSanitizer::getShadowOffset(Value *Addr, IRBuilder<> &IRB) {
1743   // Returns Addr & shadow_mask
1744   assert(Addr != RetvalTLS && "Reinstrumenting?");
1745   Value *ShadowPtrMaskValue;
1746   if (DFSanRuntimeShadowMask)
1747     ShadowPtrMaskValue = IRB.CreateLoad(IntptrTy, ExternalShadowMask);
1748   else
1749     ShadowPtrMaskValue = ShadowPtrMask;
1750   return IRB.CreateAnd(IRB.CreatePtrToInt(Addr, IntptrTy),
1751                        IRB.CreatePtrToInt(ShadowPtrMaskValue, IntptrTy));
1752 }
1753 
1754 std::pair<Value *, Value *>
1755 DataFlowSanitizer::getShadowOriginAddress(Value *Addr, Align InstAlignment,
1756                                           Instruction *Pos) {
1757   // Returns ((Addr & shadow_mask) + origin_base) & ~4UL
1758   IRBuilder<> IRB(Pos);
1759   Value *ShadowOffset = getShadowOffset(Addr, IRB);
1760   Value *ShadowPtr = IRB.CreateIntToPtr(
1761       IRB.CreateMul(ShadowOffset, ShadowPtrMul), PrimitiveShadowPtrTy);
1762   Value *OriginPtr = nullptr;
1763   if (shouldTrackOrigins()) {
1764     Value *OriginLong = IRB.CreateAdd(ShadowOffset, OriginBase);
1765     const Align Alignment = llvm::assumeAligned(InstAlignment.value());
1766     // When alignment is >= 4, Addr must be aligned to 4, otherwise it is UB.
1767     // So Mask is unnecessary.
1768     if (Alignment < MinOriginAlignment) {
1769       uint64_t Mask = MinOriginAlignment.value() - 1;
1770       OriginLong = IRB.CreateAnd(OriginLong, ConstantInt::get(IntptrTy, ~Mask));
1771     }
1772     OriginPtr = IRB.CreateIntToPtr(OriginLong, OriginPtrTy);
1773   }
1774   return {ShadowPtr, OriginPtr};
1775 }
1776 
1777 Value *DataFlowSanitizer::getShadowAddress(Value *Addr, Instruction *Pos) {
1778   // Returns (Addr & shadow_mask) x 2
1779   IRBuilder<> IRB(Pos);
1780   Value *ShadowOffset = getShadowOffset(Addr, IRB);
1781   return IRB.CreateIntToPtr(IRB.CreateMul(ShadowOffset, ShadowPtrMul),
1782                             PrimitiveShadowPtrTy);
1783 }
1784 
1785 Value *DFSanFunction::combineShadowsThenConvert(Type *T, Value *V1, Value *V2,
1786                                                 Instruction *Pos) {
1787   Value *PrimitiveValue = combineShadows(V1, V2, Pos);
1788   return expandFromPrimitiveShadow(T, PrimitiveValue, Pos);
1789 }
1790 
1791 // Generates IR to compute the union of the two given shadows, inserting it
1792 // before Pos. The combined value is with primitive type.
1793 Value *DFSanFunction::combineShadows(Value *V1, Value *V2, Instruction *Pos) {
1794   if (DFS.isZeroShadow(V1))
1795     return collapseToPrimitiveShadow(V2, Pos);
1796   if (DFS.isZeroShadow(V2))
1797     return collapseToPrimitiveShadow(V1, Pos);
1798   if (V1 == V2)
1799     return collapseToPrimitiveShadow(V1, Pos);
1800 
1801   auto V1Elems = ShadowElements.find(V1);
1802   auto V2Elems = ShadowElements.find(V2);
1803   if (V1Elems != ShadowElements.end() && V2Elems != ShadowElements.end()) {
1804     if (std::includes(V1Elems->second.begin(), V1Elems->second.end(),
1805                       V2Elems->second.begin(), V2Elems->second.end())) {
1806       return collapseToPrimitiveShadow(V1, Pos);
1807     }
1808     if (std::includes(V2Elems->second.begin(), V2Elems->second.end(),
1809                       V1Elems->second.begin(), V1Elems->second.end())) {
1810       return collapseToPrimitiveShadow(V2, Pos);
1811     }
1812   } else if (V1Elems != ShadowElements.end()) {
1813     if (V1Elems->second.count(V2))
1814       return collapseToPrimitiveShadow(V1, Pos);
1815   } else if (V2Elems != ShadowElements.end()) {
1816     if (V2Elems->second.count(V1))
1817       return collapseToPrimitiveShadow(V2, Pos);
1818   }
1819 
1820   auto Key = std::make_pair(V1, V2);
1821   if (V1 > V2)
1822     std::swap(Key.first, Key.second);
1823   CachedShadow &CCS = CachedShadows[Key];
1824   if (CCS.Block && DT.dominates(CCS.Block, Pos->getParent()))
1825     return CCS.Shadow;
1826 
1827   // Converts inputs shadows to shadows with primitive types.
1828   Value *PV1 = collapseToPrimitiveShadow(V1, Pos);
1829   Value *PV2 = collapseToPrimitiveShadow(V2, Pos);
1830 
1831   IRBuilder<> IRB(Pos);
1832   if (ClFast16Labels) {
1833     CCS.Block = Pos->getParent();
1834     CCS.Shadow = IRB.CreateOr(PV1, PV2);
1835   } else if (AvoidNewBlocks) {
1836     CallInst *Call = IRB.CreateCall(DFS.DFSanCheckedUnionFn, {PV1, PV2});
1837     Call->addAttribute(AttributeList::ReturnIndex, Attribute::ZExt);
1838     Call->addParamAttr(0, Attribute::ZExt);
1839     Call->addParamAttr(1, Attribute::ZExt);
1840 
1841     CCS.Block = Pos->getParent();
1842     CCS.Shadow = Call;
1843   } else {
1844     BasicBlock *Head = Pos->getParent();
1845     Value *Ne = IRB.CreateICmpNE(PV1, PV2);
1846     BranchInst *BI = cast<BranchInst>(SplitBlockAndInsertIfThen(
1847         Ne, Pos, /*Unreachable=*/false, DFS.ColdCallWeights, &DT));
1848     IRBuilder<> ThenIRB(BI);
1849     CallInst *Call = ThenIRB.CreateCall(DFS.DFSanUnionFn, {PV1, PV2});
1850     Call->addAttribute(AttributeList::ReturnIndex, Attribute::ZExt);
1851     Call->addParamAttr(0, Attribute::ZExt);
1852     Call->addParamAttr(1, Attribute::ZExt);
1853 
1854     BasicBlock *Tail = BI->getSuccessor(0);
1855     PHINode *Phi =
1856         PHINode::Create(DFS.PrimitiveShadowTy, 2, "", &Tail->front());
1857     Phi->addIncoming(Call, Call->getParent());
1858     Phi->addIncoming(PV1, Head);
1859 
1860     CCS.Block = Tail;
1861     CCS.Shadow = Phi;
1862   }
1863 
1864   std::set<Value *> UnionElems;
1865   if (V1Elems != ShadowElements.end()) {
1866     UnionElems = V1Elems->second;
1867   } else {
1868     UnionElems.insert(V1);
1869   }
1870   if (V2Elems != ShadowElements.end()) {
1871     UnionElems.insert(V2Elems->second.begin(), V2Elems->second.end());
1872   } else {
1873     UnionElems.insert(V2);
1874   }
1875   ShadowElements[CCS.Shadow] = std::move(UnionElems);
1876 
1877   return CCS.Shadow;
1878 }
1879 
1880 // A convenience function which folds the shadows of each of the operands
1881 // of the provided instruction Inst, inserting the IR before Inst.  Returns
1882 // the computed union Value.
1883 Value *DFSanFunction::combineOperandShadows(Instruction *Inst) {
1884   if (Inst->getNumOperands() == 0)
1885     return DFS.getZeroShadow(Inst);
1886 
1887   Value *Shadow = getShadow(Inst->getOperand(0));
1888   for (unsigned I = 1, N = Inst->getNumOperands(); I < N; ++I)
1889     Shadow = combineShadows(Shadow, getShadow(Inst->getOperand(I)), Inst);
1890 
1891   return expandFromPrimitiveShadow(Inst->getType(), Shadow, Inst);
1892 }
1893 
1894 void DFSanVisitor::visitInstOperands(Instruction &I) {
1895   Value *CombinedShadow = DFSF.combineOperandShadows(&I);
1896   DFSF.setShadow(&I, CombinedShadow);
1897   visitInstOperandOrigins(I);
1898 }
1899 
1900 Value *DFSanFunction::combineOrigins(const std::vector<Value *> &Shadows,
1901                                      const std::vector<Value *> &Origins,
1902                                      Instruction *Pos, ConstantInt *Zero) {
1903   assert(Shadows.size() == Origins.size());
1904   size_t Size = Origins.size();
1905   if (Size == 0)
1906     return DFS.ZeroOrigin;
1907   Value *Origin = nullptr;
1908   if (!Zero)
1909     Zero = DFS.ZeroPrimitiveShadow;
1910   for (size_t I = 0; I != Size; ++I) {
1911     Value *OpOrigin = Origins[I];
1912     Constant *ConstOpOrigin = dyn_cast<Constant>(OpOrigin);
1913     if (ConstOpOrigin && ConstOpOrigin->isNullValue())
1914       continue;
1915     if (!Origin) {
1916       Origin = OpOrigin;
1917       continue;
1918     }
1919     Value *OpShadow = Shadows[I];
1920     Value *PrimitiveShadow = collapseToPrimitiveShadow(OpShadow, Pos);
1921     IRBuilder<> IRB(Pos);
1922     Value *Cond = IRB.CreateICmpNE(PrimitiveShadow, Zero);
1923     Origin = IRB.CreateSelect(Cond, OpOrigin, Origin);
1924   }
1925   return Origin ? Origin : DFS.ZeroOrigin;
1926 }
1927 
1928 Value *DFSanFunction::combineOperandOrigins(Instruction *Inst) {
1929   size_t Size = Inst->getNumOperands();
1930   std::vector<Value *> Shadows(Size);
1931   std::vector<Value *> Origins(Size);
1932   for (unsigned I = 0; I != Size; ++I) {
1933     Shadows[I] = getShadow(Inst->getOperand(I));
1934     Origins[I] = getOrigin(Inst->getOperand(I));
1935   }
1936   return combineOrigins(Shadows, Origins, Inst);
1937 }
1938 
1939 void DFSanVisitor::visitInstOperandOrigins(Instruction &I) {
1940   if (!DFSF.DFS.shouldTrackOrigins())
1941     return;
1942   Value *CombinedOrigin = DFSF.combineOperandOrigins(&I);
1943   DFSF.setOrigin(&I, CombinedOrigin);
1944 }
1945 
1946 Align DFSanFunction::getShadowAlign(Align InstAlignment) {
1947   const Align Alignment = ClPreserveAlignment ? InstAlignment : Align(1);
1948   return Align(Alignment.value() * DFS.ShadowWidthBytes);
1949 }
1950 
1951 Align DFSanFunction::getOriginAlign(Align InstAlignment) {
1952   const Align Alignment = llvm::assumeAligned(InstAlignment.value());
1953   return Align(std::max(MinOriginAlignment, Alignment));
1954 }
1955 
1956 bool DFSanFunction::useCallbackLoadLabelAndOrigin(uint64_t Size,
1957                                                   Align InstAlignment) {
1958   assert(Size != 0);
1959   // * if Size == 1, it is sufficient to load its origin aligned at 4.
1960   // * if Size == 2, we assume most cases Addr % 2 == 0, so it is sufficient to
1961   //   load its origin aligned at 4. If not, although origins may be lost, it
1962   //   should not happen very often.
1963   // * if align >= 4, Addr must be aligned to 4, otherwise it is UB. When
1964   //   Size % 4 == 0, it is more efficient to load origins without callbacks.
1965   // * Otherwise we use __dfsan_load_label_and_origin.
1966   // This should ensure that common cases run efficiently.
1967   if (Size <= 2)
1968     return false;
1969 
1970   const Align Alignment = llvm::assumeAligned(InstAlignment.value());
1971   if (Alignment >= MinOriginAlignment &&
1972       Size % (64 / DFS.ShadowWidthBits) == 0)
1973     return false;
1974 
1975   return true;
1976 }
1977 
1978 std::pair<Value *, Value *> DFSanFunction::loadFast16ShadowFast(
1979     Value *ShadowAddr, Value *OriginAddr, uint64_t Size, Align ShadowAlign,
1980     Align OriginAlign, Value *FirstOrigin, Instruction *Pos) {
1981   // First OR all the WideShadows, then OR individual shadows within the
1982   // combined WideShadow. This is fewer instructions than ORing shadows
1983   // individually.
1984   const bool ShouldTrackOrigins = DFS.shouldTrackOrigins();
1985   std::vector<Value *> Shadows;
1986   std::vector<Value *> Origins;
1987   IRBuilder<> IRB(Pos);
1988   Value *WideAddr =
1989       IRB.CreateBitCast(ShadowAddr, Type::getInt64PtrTy(*DFS.Ctx));
1990   Value *CombinedWideShadow =
1991       IRB.CreateAlignedLoad(IRB.getInt64Ty(), WideAddr, ShadowAlign);
1992   if (ShouldTrackOrigins) {
1993     Shadows.push_back(CombinedWideShadow);
1994     Origins.push_back(FirstOrigin);
1995   }
1996   for (uint64_t Ofs = 64 / DFS.ShadowWidthBits; Ofs != Size;
1997        Ofs += 64 / DFS.ShadowWidthBits) {
1998     WideAddr = IRB.CreateGEP(Type::getInt64Ty(*DFS.Ctx), WideAddr,
1999                              ConstantInt::get(DFS.IntptrTy, 1));
2000     Value *NextWideShadow =
2001         IRB.CreateAlignedLoad(IRB.getInt64Ty(), WideAddr, ShadowAlign);
2002     CombinedWideShadow = IRB.CreateOr(CombinedWideShadow, NextWideShadow);
2003     if (ShouldTrackOrigins) {
2004       Shadows.push_back(NextWideShadow);
2005       OriginAddr = IRB.CreateGEP(DFS.OriginTy, OriginAddr,
2006                                  ConstantInt::get(DFS.IntptrTy, 1));
2007       Origins.push_back(
2008           IRB.CreateAlignedLoad(DFS.OriginTy, OriginAddr, OriginAlign));
2009     }
2010   }
2011   for (unsigned Width = 32; Width >= DFS.ShadowWidthBits; Width >>= 1) {
2012     Value *ShrShadow = IRB.CreateLShr(CombinedWideShadow, Width);
2013     CombinedWideShadow = IRB.CreateOr(CombinedWideShadow, ShrShadow);
2014   }
2015   return {IRB.CreateTrunc(CombinedWideShadow, DFS.PrimitiveShadowTy),
2016           ShouldTrackOrigins
2017               ? combineOrigins(Shadows, Origins, Pos,
2018                                ConstantInt::getSigned(IRB.getInt64Ty(), 0))
2019               : DFS.ZeroOrigin};
2020 }
2021 
2022 Value *DFSanFunction::loadLegacyShadowFast(Value *ShadowAddr, uint64_t Size,
2023                                            Align ShadowAlign,
2024                                            Instruction *Pos) {
2025   // Fast path for the common case where each byte has identical shadow: load
2026   // shadow 64 bits at a time, fall out to a __dfsan_union_load call if any
2027   // shadow is non-equal.
2028   BasicBlock *FallbackBB = BasicBlock::Create(*DFS.Ctx, "", F);
2029   IRBuilder<> FallbackIRB(FallbackBB);
2030   CallInst *FallbackCall = FallbackIRB.CreateCall(
2031       DFS.DFSanUnionLoadFn, {ShadowAddr, ConstantInt::get(DFS.IntptrTy, Size)});
2032   FallbackCall->addAttribute(AttributeList::ReturnIndex, Attribute::ZExt);
2033 
2034   // Compare each of the shadows stored in the loaded 64 bits to each other,
2035   // by computing (WideShadow rotl ShadowWidthBits) == WideShadow.
2036   IRBuilder<> IRB(Pos);
2037   Value *WideAddr =
2038       IRB.CreateBitCast(ShadowAddr, Type::getInt64PtrTy(*DFS.Ctx));
2039   Value *WideShadow =
2040       IRB.CreateAlignedLoad(IRB.getInt64Ty(), WideAddr, ShadowAlign);
2041   Value *TruncShadow = IRB.CreateTrunc(WideShadow, DFS.PrimitiveShadowTy);
2042   Value *ShlShadow = IRB.CreateShl(WideShadow, DFS.ShadowWidthBits);
2043   Value *ShrShadow = IRB.CreateLShr(WideShadow, 64 - DFS.ShadowWidthBits);
2044   Value *RotShadow = IRB.CreateOr(ShlShadow, ShrShadow);
2045   Value *ShadowsEq = IRB.CreateICmpEQ(WideShadow, RotShadow);
2046 
2047   BasicBlock *Head = Pos->getParent();
2048   BasicBlock *Tail = Head->splitBasicBlock(Pos->getIterator());
2049 
2050   if (DomTreeNode *OldNode = DT.getNode(Head)) {
2051     std::vector<DomTreeNode *> Children(OldNode->begin(), OldNode->end());
2052 
2053     DomTreeNode *NewNode = DT.addNewBlock(Tail, Head);
2054     for (auto *Child : Children)
2055       DT.changeImmediateDominator(Child, NewNode);
2056   }
2057 
2058   // In the following code LastBr will refer to the previous basic block's
2059   // conditional branch instruction, whose true successor is fixed up to point
2060   // to the next block during the loop below or to the tail after the final
2061   // iteration.
2062   BranchInst *LastBr = BranchInst::Create(FallbackBB, FallbackBB, ShadowsEq);
2063   ReplaceInstWithInst(Head->getTerminator(), LastBr);
2064   DT.addNewBlock(FallbackBB, Head);
2065 
2066   for (uint64_t Ofs = 64 / DFS.ShadowWidthBits; Ofs != Size;
2067        Ofs += 64 / DFS.ShadowWidthBits) {
2068     BasicBlock *NextBB = BasicBlock::Create(*DFS.Ctx, "", F);
2069     DT.addNewBlock(NextBB, LastBr->getParent());
2070     IRBuilder<> NextIRB(NextBB);
2071     WideAddr = NextIRB.CreateGEP(Type::getInt64Ty(*DFS.Ctx), WideAddr,
2072                                  ConstantInt::get(DFS.IntptrTy, 1));
2073     Value *NextWideShadow =
2074         NextIRB.CreateAlignedLoad(NextIRB.getInt64Ty(), WideAddr, ShadowAlign);
2075     ShadowsEq = NextIRB.CreateICmpEQ(WideShadow, NextWideShadow);
2076     LastBr->setSuccessor(0, NextBB);
2077     LastBr = NextIRB.CreateCondBr(ShadowsEq, FallbackBB, FallbackBB);
2078   }
2079 
2080   LastBr->setSuccessor(0, Tail);
2081   FallbackIRB.CreateBr(Tail);
2082   PHINode *Shadow =
2083       PHINode::Create(DFS.PrimitiveShadowTy, 2, "", &Tail->front());
2084   Shadow->addIncoming(FallbackCall, FallbackBB);
2085   Shadow->addIncoming(TruncShadow, LastBr->getParent());
2086   return Shadow;
2087 }
2088 
2089 // Generates IR to load shadow corresponding to bytes [Addr, Addr+Size), where
2090 // Addr has alignment Align, and take the union of each of those shadows. The
2091 // returned shadow always has primitive type.
2092 std::pair<Value *, Value *> DFSanFunction::loadShadowOrigin(Value *Addr,
2093                                                             uint64_t Size,
2094                                                             Align InstAlignment,
2095                                                             Instruction *Pos) {
2096   const bool ShouldTrackOrigins = DFS.shouldTrackOrigins();
2097 
2098   // Non-escaped loads.
2099   if (AllocaInst *AI = dyn_cast<AllocaInst>(Addr)) {
2100     const auto SI = AllocaShadowMap.find(AI);
2101     if (SI != AllocaShadowMap.end()) {
2102       IRBuilder<> IRB(Pos);
2103       Value *ShadowLI = IRB.CreateLoad(DFS.PrimitiveShadowTy, SI->second);
2104       const auto OI = AllocaOriginMap.find(AI);
2105       assert(!ShouldTrackOrigins || OI != AllocaOriginMap.end());
2106       return {ShadowLI, ShouldTrackOrigins
2107                             ? IRB.CreateLoad(DFS.OriginTy, OI->second)
2108                             : nullptr};
2109     }
2110   }
2111 
2112   // Load from constant addresses.
2113   SmallVector<const Value *, 2> Objs;
2114   getUnderlyingObjects(Addr, Objs);
2115   bool AllConstants = true;
2116   for (const Value *Obj : Objs) {
2117     if (isa<Function>(Obj) || isa<BlockAddress>(Obj))
2118       continue;
2119     if (isa<GlobalVariable>(Obj) && cast<GlobalVariable>(Obj)->isConstant())
2120       continue;
2121 
2122     AllConstants = false;
2123     break;
2124   }
2125   if (AllConstants)
2126     return {DFS.ZeroPrimitiveShadow,
2127             ShouldTrackOrigins ? DFS.ZeroOrigin : nullptr};
2128 
2129   if (Size == 0)
2130     return {DFS.ZeroPrimitiveShadow,
2131             ShouldTrackOrigins ? DFS.ZeroOrigin : nullptr};
2132 
2133   // Use callback to load if this is not an optimizable case for origin
2134   // tracking.
2135   if (ShouldTrackOrigins &&
2136       useCallbackLoadLabelAndOrigin(Size, InstAlignment)) {
2137     IRBuilder<> IRB(Pos);
2138     CallInst *Call =
2139         IRB.CreateCall(DFS.DFSanLoadLabelAndOriginFn,
2140                        {IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()),
2141                         ConstantInt::get(DFS.IntptrTy, Size)});
2142     Call->addAttribute(AttributeList::ReturnIndex, Attribute::ZExt);
2143     return {IRB.CreateTrunc(IRB.CreateLShr(Call, DFS.OriginWidthBits),
2144                             DFS.PrimitiveShadowTy),
2145             IRB.CreateTrunc(Call, DFS.OriginTy)};
2146   }
2147 
2148   // Other cases that support loading shadows or origins in a fast way.
2149   Value *ShadowAddr, *OriginAddr;
2150   std::tie(ShadowAddr, OriginAddr) =
2151       DFS.getShadowOriginAddress(Addr, InstAlignment, Pos);
2152 
2153   const Align ShadowAlign = getShadowAlign(InstAlignment);
2154   const Align OriginAlign = getOriginAlign(InstAlignment);
2155   Value *Origin = nullptr;
2156   if (ShouldTrackOrigins) {
2157     IRBuilder<> IRB(Pos);
2158     Origin = IRB.CreateAlignedLoad(DFS.OriginTy, OriginAddr, OriginAlign);
2159   }
2160 
2161   switch (Size) {
2162   case 1: {
2163     LoadInst *LI = new LoadInst(DFS.PrimitiveShadowTy, ShadowAddr, "", Pos);
2164     LI->setAlignment(ShadowAlign);
2165     return {LI, Origin};
2166   }
2167   case 2: {
2168     IRBuilder<> IRB(Pos);
2169     Value *ShadowAddr1 = IRB.CreateGEP(DFS.PrimitiveShadowTy, ShadowAddr,
2170                                        ConstantInt::get(DFS.IntptrTy, 1));
2171     Value *Load =
2172         IRB.CreateAlignedLoad(DFS.PrimitiveShadowTy, ShadowAddr, ShadowAlign);
2173     Value *Load1 =
2174         IRB.CreateAlignedLoad(DFS.PrimitiveShadowTy, ShadowAddr1, ShadowAlign);
2175     return {combineShadows(Load, Load1, Pos), Origin};
2176   }
2177   }
2178 
2179   if (ClFast16Labels && Size % (64 / DFS.ShadowWidthBits) == 0)
2180     return loadFast16ShadowFast(ShadowAddr, OriginAddr, Size, ShadowAlign,
2181                                 OriginAlign, Origin, Pos);
2182 
2183   if (!AvoidNewBlocks && Size % (64 / DFS.ShadowWidthBits) == 0)
2184     return {loadLegacyShadowFast(ShadowAddr, Size, ShadowAlign, Pos), Origin};
2185 
2186   IRBuilder<> IRB(Pos);
2187   FunctionCallee &UnionLoadFn =
2188       ClFast16Labels ? DFS.DFSanUnionLoadFast16LabelsFn : DFS.DFSanUnionLoadFn;
2189   CallInst *FallbackCall = IRB.CreateCall(
2190       UnionLoadFn, {ShadowAddr, ConstantInt::get(DFS.IntptrTy, Size)});
2191   FallbackCall->addAttribute(AttributeList::ReturnIndex, Attribute::ZExt);
2192   return {FallbackCall, Origin};
2193 }
2194 
2195 static AtomicOrdering addAcquireOrdering(AtomicOrdering AO) {
2196   switch (AO) {
2197   case AtomicOrdering::NotAtomic:
2198     return AtomicOrdering::NotAtomic;
2199   case AtomicOrdering::Unordered:
2200   case AtomicOrdering::Monotonic:
2201   case AtomicOrdering::Acquire:
2202     return AtomicOrdering::Acquire;
2203   case AtomicOrdering::Release:
2204   case AtomicOrdering::AcquireRelease:
2205     return AtomicOrdering::AcquireRelease;
2206   case AtomicOrdering::SequentiallyConsistent:
2207     return AtomicOrdering::SequentiallyConsistent;
2208   }
2209   llvm_unreachable("Unknown ordering");
2210 }
2211 
2212 void DFSanVisitor::visitLoadInst(LoadInst &LI) {
2213   auto &DL = LI.getModule()->getDataLayout();
2214   uint64_t Size = DL.getTypeStoreSize(LI.getType());
2215   if (Size == 0) {
2216     DFSF.setShadow(&LI, DFSF.DFS.getZeroShadow(&LI));
2217     DFSF.setOrigin(&LI, DFSF.DFS.ZeroOrigin);
2218     return;
2219   }
2220 
2221   // When an application load is atomic, increase atomic ordering between
2222   // atomic application loads and stores to ensure happen-before order; load
2223   // shadow data after application data; store zero shadow data before
2224   // application data. This ensure shadow loads return either labels of the
2225   // initial application data or zeros.
2226   if (LI.isAtomic())
2227     LI.setOrdering(addAcquireOrdering(LI.getOrdering()));
2228 
2229   Instruction *Pos = LI.isAtomic() ? LI.getNextNode() : &LI;
2230   std::vector<Value *> Shadows;
2231   std::vector<Value *> Origins;
2232   Value *PrimitiveShadow, *Origin;
2233   std::tie(PrimitiveShadow, Origin) =
2234       DFSF.loadShadowOrigin(LI.getPointerOperand(), Size, LI.getAlign(), Pos);
2235   const bool ShouldTrackOrigins = DFSF.DFS.shouldTrackOrigins();
2236   if (ShouldTrackOrigins) {
2237     Shadows.push_back(PrimitiveShadow);
2238     Origins.push_back(Origin);
2239   }
2240   if (ClCombinePointerLabelsOnLoad) {
2241     Value *PtrShadow = DFSF.getShadow(LI.getPointerOperand());
2242     PrimitiveShadow = DFSF.combineShadows(PrimitiveShadow, PtrShadow, Pos);
2243     if (ShouldTrackOrigins) {
2244       Shadows.push_back(PtrShadow);
2245       Origins.push_back(DFSF.getOrigin(LI.getPointerOperand()));
2246     }
2247   }
2248   if (!DFSF.DFS.isZeroShadow(PrimitiveShadow))
2249     DFSF.NonZeroChecks.push_back(PrimitiveShadow);
2250 
2251   Value *Shadow =
2252       DFSF.expandFromPrimitiveShadow(LI.getType(), PrimitiveShadow, Pos);
2253   DFSF.setShadow(&LI, Shadow);
2254 
2255   if (ShouldTrackOrigins) {
2256     DFSF.setOrigin(&LI, DFSF.combineOrigins(Shadows, Origins, Pos));
2257   }
2258 
2259   if (ClEventCallbacks) {
2260     IRBuilder<> IRB(Pos);
2261     Value *Addr8 = IRB.CreateBitCast(LI.getPointerOperand(), DFSF.DFS.Int8Ptr);
2262     IRB.CreateCall(DFSF.DFS.DFSanLoadCallbackFn, {PrimitiveShadow, Addr8});
2263   }
2264 }
2265 
2266 Value *DFSanFunction::updateOrigin(Value *V, IRBuilder<> &IRB) {
2267   if (!DFS.shouldTrackOrigins())
2268     return V;
2269   return IRB.CreateCall(DFS.DFSanChainOriginFn, V);
2270 }
2271 
2272 Value *DFSanFunction::originToIntptr(IRBuilder<> &IRB, Value *Origin) {
2273   const unsigned OriginSize = DataFlowSanitizer::OriginWidthBytes;
2274   const DataLayout &DL = F->getParent()->getDataLayout();
2275   unsigned IntptrSize = DL.getTypeStoreSize(DFS.IntptrTy);
2276   if (IntptrSize == OriginSize)
2277     return Origin;
2278   assert(IntptrSize == OriginSize * 2);
2279   Origin = IRB.CreateIntCast(Origin, DFS.IntptrTy, /* isSigned */ false);
2280   return IRB.CreateOr(Origin, IRB.CreateShl(Origin, OriginSize * 8));
2281 }
2282 
2283 void DFSanFunction::paintOrigin(IRBuilder<> &IRB, Value *Origin,
2284                                 Value *StoreOriginAddr,
2285                                 uint64_t StoreOriginSize, Align Alignment) {
2286   const unsigned OriginSize = DataFlowSanitizer::OriginWidthBytes;
2287   const DataLayout &DL = F->getParent()->getDataLayout();
2288   const Align IntptrAlignment = DL.getABITypeAlign(DFS.IntptrTy);
2289   unsigned IntptrSize = DL.getTypeStoreSize(DFS.IntptrTy);
2290   assert(IntptrAlignment >= MinOriginAlignment);
2291   assert(IntptrSize >= OriginSize);
2292 
2293   unsigned Ofs = 0;
2294   Align CurrentAlignment = Alignment;
2295   if (Alignment >= IntptrAlignment && IntptrSize > OriginSize) {
2296     Value *IntptrOrigin = originToIntptr(IRB, Origin);
2297     Value *IntptrStoreOriginPtr = IRB.CreatePointerCast(
2298         StoreOriginAddr, PointerType::get(DFS.IntptrTy, 0));
2299     for (unsigned I = 0; I < StoreOriginSize / IntptrSize; ++I) {
2300       Value *Ptr =
2301           I ? IRB.CreateConstGEP1_32(DFS.IntptrTy, IntptrStoreOriginPtr, I)
2302             : IntptrStoreOriginPtr;
2303       IRB.CreateAlignedStore(IntptrOrigin, Ptr, CurrentAlignment);
2304       Ofs += IntptrSize / OriginSize;
2305       CurrentAlignment = IntptrAlignment;
2306     }
2307   }
2308 
2309   for (unsigned I = Ofs; I < (StoreOriginSize + OriginSize - 1) / OriginSize;
2310        ++I) {
2311     Value *GEP = I ? IRB.CreateConstGEP1_32(DFS.OriginTy, StoreOriginAddr, I)
2312                    : StoreOriginAddr;
2313     IRB.CreateAlignedStore(Origin, GEP, CurrentAlignment);
2314     CurrentAlignment = MinOriginAlignment;
2315   }
2316 }
2317 
2318 Value *DFSanFunction::convertToBool(Value *V, IRBuilder<> &IRB,
2319                                     const Twine &Name) {
2320   Type *VTy = V->getType();
2321   assert(VTy->isIntegerTy());
2322   if (VTy->getIntegerBitWidth() == 1)
2323     // Just converting a bool to a bool, so do nothing.
2324     return V;
2325   return IRB.CreateICmpNE(V, ConstantInt::get(VTy, 0), Name);
2326 }
2327 
2328 void DFSanFunction::storeOrigin(Instruction *Pos, Value *Addr, uint64_t Size,
2329                                 Value *Shadow, Value *Origin,
2330                                 Value *StoreOriginAddr, Align InstAlignment) {
2331   // Do not write origins for zero shadows because we do not trace origins for
2332   // untainted sinks.
2333   const Align OriginAlignment = getOriginAlign(InstAlignment);
2334   Value *CollapsedShadow = collapseToPrimitiveShadow(Shadow, Pos);
2335   IRBuilder<> IRB(Pos);
2336   if (auto *ConstantShadow = dyn_cast<Constant>(CollapsedShadow)) {
2337     if (!ConstantShadow->isZeroValue())
2338       paintOrigin(IRB, updateOrigin(Origin, IRB), StoreOriginAddr, Size,
2339                   OriginAlignment);
2340     return;
2341   }
2342 
2343   if (shouldInstrumentWithCall()) {
2344     IRB.CreateCall(DFS.DFSanMaybeStoreOriginFn,
2345                    {CollapsedShadow,
2346                     IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()),
2347                     ConstantInt::get(DFS.IntptrTy, Size), Origin});
2348   } else {
2349     Value *Cmp = convertToBool(CollapsedShadow, IRB, "_dfscmp");
2350     Instruction *CheckTerm = SplitBlockAndInsertIfThen(
2351         Cmp, &*IRB.GetInsertPoint(), false, DFS.OriginStoreWeights, &DT);
2352     IRBuilder<> IRBNew(CheckTerm);
2353     paintOrigin(IRBNew, updateOrigin(Origin, IRBNew), StoreOriginAddr, Size,
2354                 OriginAlignment);
2355     ++NumOriginStores;
2356   }
2357 }
2358 
2359 void DFSanFunction::storeZeroPrimitiveShadow(Value *Addr, uint64_t Size,
2360                                              Align ShadowAlign,
2361                                              Instruction *Pos) {
2362   IRBuilder<> IRB(Pos);
2363   IntegerType *ShadowTy =
2364       IntegerType::get(*DFS.Ctx, Size * DFS.ShadowWidthBits);
2365   Value *ExtZeroShadow = ConstantInt::get(ShadowTy, 0);
2366   Value *ShadowAddr = DFS.getShadowAddress(Addr, Pos);
2367   Value *ExtShadowAddr =
2368       IRB.CreateBitCast(ShadowAddr, PointerType::getUnqual(ShadowTy));
2369   IRB.CreateAlignedStore(ExtZeroShadow, ExtShadowAddr, ShadowAlign);
2370   // Do not write origins for 0 shadows because we do not trace origins for
2371   // untainted sinks.
2372 }
2373 
2374 void DFSanFunction::storePrimitiveShadowOrigin(Value *Addr, uint64_t Size,
2375                                                Align InstAlignment,
2376                                                Value *PrimitiveShadow,
2377                                                Value *Origin,
2378                                                Instruction *Pos) {
2379   const bool ShouldTrackOrigins = DFS.shouldTrackOrigins() && Origin;
2380 
2381   if (AllocaInst *AI = dyn_cast<AllocaInst>(Addr)) {
2382     const auto SI = AllocaShadowMap.find(AI);
2383     if (SI != AllocaShadowMap.end()) {
2384       IRBuilder<> IRB(Pos);
2385       IRB.CreateStore(PrimitiveShadow, SI->second);
2386 
2387       // Do not write origins for 0 shadows because we do not trace origins for
2388       // untainted sinks.
2389       if (ShouldTrackOrigins && !DFS.isZeroShadow(PrimitiveShadow)) {
2390         const auto OI = AllocaOriginMap.find(AI);
2391         assert(OI != AllocaOriginMap.end() && Origin);
2392         IRB.CreateStore(Origin, OI->second);
2393       }
2394       return;
2395     }
2396   }
2397 
2398   const Align ShadowAlign = getShadowAlign(InstAlignment);
2399   if (DFS.isZeroShadow(PrimitiveShadow)) {
2400     storeZeroPrimitiveShadow(Addr, Size, ShadowAlign, Pos);
2401     return;
2402   }
2403 
2404   IRBuilder<> IRB(Pos);
2405   Value *ShadowAddr, *OriginAddr;
2406   std::tie(ShadowAddr, OriginAddr) =
2407       DFS.getShadowOriginAddress(Addr, InstAlignment, Pos);
2408 
2409   const unsigned ShadowVecSize = 128 / DFS.ShadowWidthBits;
2410   uint64_t Offset = 0;
2411   uint64_t LeftSize = Size;
2412   if (LeftSize >= ShadowVecSize) {
2413     auto *ShadowVecTy =
2414         FixedVectorType::get(DFS.PrimitiveShadowTy, ShadowVecSize);
2415     Value *ShadowVec = UndefValue::get(ShadowVecTy);
2416     for (unsigned I = 0; I != ShadowVecSize; ++I) {
2417       ShadowVec = IRB.CreateInsertElement(
2418           ShadowVec, PrimitiveShadow,
2419           ConstantInt::get(Type::getInt32Ty(*DFS.Ctx), I));
2420     }
2421     Value *ShadowVecAddr =
2422         IRB.CreateBitCast(ShadowAddr, PointerType::getUnqual(ShadowVecTy));
2423     do {
2424       Value *CurShadowVecAddr =
2425           IRB.CreateConstGEP1_32(ShadowVecTy, ShadowVecAddr, Offset);
2426       IRB.CreateAlignedStore(ShadowVec, CurShadowVecAddr, ShadowAlign);
2427       LeftSize -= ShadowVecSize;
2428       ++Offset;
2429     } while (LeftSize >= ShadowVecSize);
2430     Offset *= ShadowVecSize;
2431   }
2432   while (LeftSize > 0) {
2433     Value *CurShadowAddr =
2434         IRB.CreateConstGEP1_32(DFS.PrimitiveShadowTy, ShadowAddr, Offset);
2435     IRB.CreateAlignedStore(PrimitiveShadow, CurShadowAddr, ShadowAlign);
2436     --LeftSize;
2437     ++Offset;
2438   }
2439 
2440   if (ShouldTrackOrigins) {
2441     storeOrigin(Pos, Addr, Size, PrimitiveShadow, Origin, OriginAddr,
2442                 InstAlignment);
2443   }
2444 }
2445 
2446 static AtomicOrdering addReleaseOrdering(AtomicOrdering AO) {
2447   switch (AO) {
2448   case AtomicOrdering::NotAtomic:
2449     return AtomicOrdering::NotAtomic;
2450   case AtomicOrdering::Unordered:
2451   case AtomicOrdering::Monotonic:
2452   case AtomicOrdering::Release:
2453     return AtomicOrdering::Release;
2454   case AtomicOrdering::Acquire:
2455   case AtomicOrdering::AcquireRelease:
2456     return AtomicOrdering::AcquireRelease;
2457   case AtomicOrdering::SequentiallyConsistent:
2458     return AtomicOrdering::SequentiallyConsistent;
2459   }
2460   llvm_unreachable("Unknown ordering");
2461 }
2462 
2463 void DFSanVisitor::visitStoreInst(StoreInst &SI) {
2464   auto &DL = SI.getModule()->getDataLayout();
2465   Value *Val = SI.getValueOperand();
2466   uint64_t Size = DL.getTypeStoreSize(Val->getType());
2467   if (Size == 0)
2468     return;
2469 
2470   // When an application store is atomic, increase atomic ordering between
2471   // atomic application loads and stores to ensure happen-before order; load
2472   // shadow data after application data; store zero shadow data before
2473   // application data. This ensure shadow loads return either labels of the
2474   // initial application data or zeros.
2475   if (SI.isAtomic())
2476     SI.setOrdering(addReleaseOrdering(SI.getOrdering()));
2477 
2478   const bool ShouldTrackOrigins =
2479       DFSF.DFS.shouldTrackOrigins() && !SI.isAtomic();
2480   std::vector<Value *> Shadows;
2481   std::vector<Value *> Origins;
2482 
2483   Value *Shadow =
2484       SI.isAtomic() ? DFSF.DFS.getZeroShadow(Val) : DFSF.getShadow(Val);
2485 
2486   if (ShouldTrackOrigins) {
2487     Shadows.push_back(Shadow);
2488     Origins.push_back(DFSF.getOrigin(Val));
2489   }
2490 
2491   Value *PrimitiveShadow;
2492   if (ClCombinePointerLabelsOnStore) {
2493     Value *PtrShadow = DFSF.getShadow(SI.getPointerOperand());
2494     if (ShouldTrackOrigins) {
2495       Shadows.push_back(PtrShadow);
2496       Origins.push_back(DFSF.getOrigin(SI.getPointerOperand()));
2497     }
2498     PrimitiveShadow = DFSF.combineShadows(Shadow, PtrShadow, &SI);
2499   } else {
2500     PrimitiveShadow = DFSF.collapseToPrimitiveShadow(Shadow, &SI);
2501   }
2502   Value *Origin = nullptr;
2503   if (ShouldTrackOrigins)
2504     Origin = DFSF.combineOrigins(Shadows, Origins, &SI);
2505   DFSF.storePrimitiveShadowOrigin(SI.getPointerOperand(), Size, SI.getAlign(),
2506                                   PrimitiveShadow, Origin, &SI);
2507   if (ClEventCallbacks) {
2508     IRBuilder<> IRB(&SI);
2509     Value *Addr8 = IRB.CreateBitCast(SI.getPointerOperand(), DFSF.DFS.Int8Ptr);
2510     IRB.CreateCall(DFSF.DFS.DFSanStoreCallbackFn, {PrimitiveShadow, Addr8});
2511   }
2512 }
2513 
2514 void DFSanVisitor::visitCASOrRMW(Align InstAlignment, Instruction &I) {
2515   assert(isa<AtomicRMWInst>(I) || isa<AtomicCmpXchgInst>(I));
2516 
2517   Value *Val = I.getOperand(1);
2518   const auto &DL = I.getModule()->getDataLayout();
2519   uint64_t Size = DL.getTypeStoreSize(Val->getType());
2520   if (Size == 0)
2521     return;
2522 
2523   // Conservatively set data at stored addresses and return with zero shadow to
2524   // prevent shadow data races.
2525   IRBuilder<> IRB(&I);
2526   Value *Addr = I.getOperand(0);
2527   const Align ShadowAlign = DFSF.getShadowAlign(InstAlignment);
2528   DFSF.storeZeroPrimitiveShadow(Addr, Size, ShadowAlign, &I);
2529   DFSF.setShadow(&I, DFSF.DFS.getZeroShadow(&I));
2530   DFSF.setOrigin(&I, DFSF.DFS.ZeroOrigin);
2531 }
2532 
2533 void DFSanVisitor::visitAtomicRMWInst(AtomicRMWInst &I) {
2534   visitCASOrRMW(I.getAlign(), I);
2535   // TODO: The ordering change follows MSan. It is possible not to change
2536   // ordering because we always set and use 0 shadows.
2537   I.setOrdering(addReleaseOrdering(I.getOrdering()));
2538 }
2539 
2540 void DFSanVisitor::visitAtomicCmpXchgInst(AtomicCmpXchgInst &I) {
2541   visitCASOrRMW(I.getAlign(), I);
2542   // TODO: The ordering change follows MSan. It is possible not to change
2543   // ordering because we always set and use 0 shadows.
2544   I.setSuccessOrdering(addReleaseOrdering(I.getSuccessOrdering()));
2545 }
2546 
2547 void DFSanVisitor::visitUnaryOperator(UnaryOperator &UO) {
2548   visitInstOperands(UO);
2549 }
2550 
2551 void DFSanVisitor::visitBinaryOperator(BinaryOperator &BO) {
2552   visitInstOperands(BO);
2553 }
2554 
2555 void DFSanVisitor::visitCastInst(CastInst &CI) { visitInstOperands(CI); }
2556 
2557 void DFSanVisitor::visitCmpInst(CmpInst &CI) {
2558   visitInstOperands(CI);
2559   if (ClEventCallbacks) {
2560     IRBuilder<> IRB(&CI);
2561     Value *CombinedShadow = DFSF.getShadow(&CI);
2562     IRB.CreateCall(DFSF.DFS.DFSanCmpCallbackFn, CombinedShadow);
2563   }
2564 }
2565 
2566 void DFSanVisitor::visitGetElementPtrInst(GetElementPtrInst &GEPI) {
2567   visitInstOperands(GEPI);
2568 }
2569 
2570 void DFSanVisitor::visitExtractElementInst(ExtractElementInst &I) {
2571   visitInstOperands(I);
2572 }
2573 
2574 void DFSanVisitor::visitInsertElementInst(InsertElementInst &I) {
2575   visitInstOperands(I);
2576 }
2577 
2578 void DFSanVisitor::visitShuffleVectorInst(ShuffleVectorInst &I) {
2579   visitInstOperands(I);
2580 }
2581 
2582 void DFSanVisitor::visitExtractValueInst(ExtractValueInst &I) {
2583   if (!DFSF.DFS.shouldTrackFieldsAndIndices()) {
2584     visitInstOperands(I);
2585     return;
2586   }
2587 
2588   IRBuilder<> IRB(&I);
2589   Value *Agg = I.getAggregateOperand();
2590   Value *AggShadow = DFSF.getShadow(Agg);
2591   Value *ResShadow = IRB.CreateExtractValue(AggShadow, I.getIndices());
2592   DFSF.setShadow(&I, ResShadow);
2593   visitInstOperandOrigins(I);
2594 }
2595 
2596 void DFSanVisitor::visitInsertValueInst(InsertValueInst &I) {
2597   if (!DFSF.DFS.shouldTrackFieldsAndIndices()) {
2598     visitInstOperands(I);
2599     return;
2600   }
2601 
2602   IRBuilder<> IRB(&I);
2603   Value *AggShadow = DFSF.getShadow(I.getAggregateOperand());
2604   Value *InsShadow = DFSF.getShadow(I.getInsertedValueOperand());
2605   Value *Res = IRB.CreateInsertValue(AggShadow, InsShadow, I.getIndices());
2606   DFSF.setShadow(&I, Res);
2607   visitInstOperandOrigins(I);
2608 }
2609 
2610 void DFSanVisitor::visitAllocaInst(AllocaInst &I) {
2611   bool AllLoadsStores = true;
2612   for (User *U : I.users()) {
2613     if (isa<LoadInst>(U))
2614       continue;
2615 
2616     if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
2617       if (SI->getPointerOperand() == &I)
2618         continue;
2619     }
2620 
2621     AllLoadsStores = false;
2622     break;
2623   }
2624   if (AllLoadsStores) {
2625     IRBuilder<> IRB(&I);
2626     DFSF.AllocaShadowMap[&I] = IRB.CreateAlloca(DFSF.DFS.PrimitiveShadowTy);
2627     if (DFSF.DFS.shouldTrackOrigins()) {
2628       DFSF.AllocaOriginMap[&I] =
2629           IRB.CreateAlloca(DFSF.DFS.OriginTy, nullptr, "_dfsa");
2630     }
2631   }
2632   DFSF.setShadow(&I, DFSF.DFS.ZeroPrimitiveShadow);
2633   DFSF.setOrigin(&I, DFSF.DFS.ZeroOrigin);
2634 }
2635 
2636 void DFSanVisitor::visitSelectInst(SelectInst &I) {
2637   Value *CondShadow = DFSF.getShadow(I.getCondition());
2638   Value *TrueShadow = DFSF.getShadow(I.getTrueValue());
2639   Value *FalseShadow = DFSF.getShadow(I.getFalseValue());
2640   Value *ShadowSel = nullptr;
2641   const bool ShouldTrackOrigins = DFSF.DFS.shouldTrackOrigins();
2642   std::vector<Value *> Shadows;
2643   std::vector<Value *> Origins;
2644   Value *TrueOrigin =
2645       ShouldTrackOrigins ? DFSF.getOrigin(I.getTrueValue()) : nullptr;
2646   Value *FalseOrigin =
2647       ShouldTrackOrigins ? DFSF.getOrigin(I.getFalseValue()) : nullptr;
2648 
2649   if (isa<VectorType>(I.getCondition()->getType())) {
2650     ShadowSel = DFSF.combineShadowsThenConvert(I.getType(), TrueShadow,
2651                                                FalseShadow, &I);
2652     if (ShouldTrackOrigins) {
2653       Shadows.push_back(TrueShadow);
2654       Shadows.push_back(FalseShadow);
2655       Origins.push_back(TrueOrigin);
2656       Origins.push_back(FalseOrigin);
2657     }
2658   } else {
2659     if (TrueShadow == FalseShadow) {
2660       ShadowSel = TrueShadow;
2661       if (ShouldTrackOrigins) {
2662         Shadows.push_back(TrueShadow);
2663         Origins.push_back(TrueOrigin);
2664       }
2665     } else {
2666       ShadowSel =
2667           SelectInst::Create(I.getCondition(), TrueShadow, FalseShadow, "", &I);
2668       if (ShouldTrackOrigins) {
2669         Shadows.push_back(ShadowSel);
2670         Origins.push_back(SelectInst::Create(I.getCondition(), TrueOrigin,
2671                                              FalseOrigin, "", &I));
2672       }
2673     }
2674   }
2675   DFSF.setShadow(&I, ClTrackSelectControlFlow
2676                          ? DFSF.combineShadowsThenConvert(
2677                                I.getType(), CondShadow, ShadowSel, &I)
2678                          : ShadowSel);
2679   if (ShouldTrackOrigins) {
2680     if (ClTrackSelectControlFlow) {
2681       Shadows.push_back(CondShadow);
2682       Origins.push_back(DFSF.getOrigin(I.getCondition()));
2683     }
2684     DFSF.setOrigin(&I, DFSF.combineOrigins(Shadows, Origins, &I));
2685   }
2686 }
2687 
2688 void DFSanVisitor::visitMemSetInst(MemSetInst &I) {
2689   IRBuilder<> IRB(&I);
2690   Value *ValShadow = DFSF.getShadow(I.getValue());
2691   Value *ValOrigin = DFSF.DFS.shouldTrackOrigins()
2692                          ? DFSF.getOrigin(I.getValue())
2693                          : DFSF.DFS.ZeroOrigin;
2694   IRB.CreateCall(
2695       DFSF.DFS.DFSanSetLabelFn,
2696       {ValShadow, ValOrigin,
2697        IRB.CreateBitCast(I.getDest(), Type::getInt8PtrTy(*DFSF.DFS.Ctx)),
2698        IRB.CreateZExtOrTrunc(I.getLength(), DFSF.DFS.IntptrTy)});
2699 }
2700 
2701 void DFSanVisitor::visitMemTransferInst(MemTransferInst &I) {
2702   IRBuilder<> IRB(&I);
2703 
2704   // CopyOrMoveOrigin transfers origins by refering to their shadows. So we
2705   // need to move origins before moving shadows.
2706   if (DFSF.DFS.shouldTrackOrigins()) {
2707     IRB.CreateCall(
2708         DFSF.DFS.DFSanMemOriginTransferFn,
2709         {IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()),
2710          IRB.CreatePointerCast(I.getArgOperand(1), IRB.getInt8PtrTy()),
2711          IRB.CreateIntCast(I.getArgOperand(2), DFSF.DFS.IntptrTy, false)});
2712   }
2713 
2714   Value *RawDestShadow = DFSF.DFS.getShadowAddress(I.getDest(), &I);
2715   Value *SrcShadow = DFSF.DFS.getShadowAddress(I.getSource(), &I);
2716   Value *LenShadow =
2717       IRB.CreateMul(I.getLength(), ConstantInt::get(I.getLength()->getType(),
2718                                                     DFSF.DFS.ShadowWidthBytes));
2719   Type *Int8Ptr = Type::getInt8PtrTy(*DFSF.DFS.Ctx);
2720   Value *DestShadow = IRB.CreateBitCast(RawDestShadow, Int8Ptr);
2721   SrcShadow = IRB.CreateBitCast(SrcShadow, Int8Ptr);
2722   auto *MTI = cast<MemTransferInst>(
2723       IRB.CreateCall(I.getFunctionType(), I.getCalledOperand(),
2724                      {DestShadow, SrcShadow, LenShadow, I.getVolatileCst()}));
2725   if (ClPreserveAlignment) {
2726     MTI->setDestAlignment(I.getDestAlign() * DFSF.DFS.ShadowWidthBytes);
2727     MTI->setSourceAlignment(I.getSourceAlign() * DFSF.DFS.ShadowWidthBytes);
2728   } else {
2729     MTI->setDestAlignment(Align(DFSF.DFS.ShadowWidthBytes));
2730     MTI->setSourceAlignment(Align(DFSF.DFS.ShadowWidthBytes));
2731   }
2732   if (ClEventCallbacks) {
2733     IRB.CreateCall(DFSF.DFS.DFSanMemTransferCallbackFn,
2734                    {RawDestShadow, I.getLength()});
2735   }
2736 }
2737 
2738 void DFSanVisitor::visitReturnInst(ReturnInst &RI) {
2739   if (!DFSF.IsNativeABI && RI.getReturnValue()) {
2740     switch (DFSF.IA) {
2741     case DataFlowSanitizer::IA_TLS: {
2742       Value *S = DFSF.getShadow(RI.getReturnValue());
2743       IRBuilder<> IRB(&RI);
2744       Type *RT = DFSF.F->getFunctionType()->getReturnType();
2745       unsigned Size =
2746           getDataLayout().getTypeAllocSize(DFSF.DFS.getShadowTy(RT));
2747       if (Size <= RetvalTLSSize) {
2748         // If the size overflows, stores nothing. At callsite, oversized return
2749         // shadows are set to zero.
2750         IRB.CreateAlignedStore(S, DFSF.getRetvalTLS(RT, IRB),
2751                                ShadowTLSAlignment);
2752       }
2753       if (DFSF.DFS.shouldTrackOrigins()) {
2754         Value *O = DFSF.getOrigin(RI.getReturnValue());
2755         IRB.CreateStore(O, DFSF.getRetvalOriginTLS());
2756       }
2757       break;
2758     }
2759     case DataFlowSanitizer::IA_Args: {
2760       IRBuilder<> IRB(&RI);
2761       Type *RT = DFSF.F->getFunctionType()->getReturnType();
2762       Value *InsVal =
2763           IRB.CreateInsertValue(UndefValue::get(RT), RI.getReturnValue(), 0);
2764       Value *InsShadow =
2765           IRB.CreateInsertValue(InsVal, DFSF.getShadow(RI.getReturnValue()), 1);
2766       RI.setOperand(0, InsShadow);
2767       break;
2768     }
2769     }
2770   }
2771 }
2772 
2773 void DFSanVisitor::addShadowArguments(Function &F, CallBase &CB,
2774                                       std::vector<Value *> &Args,
2775                                       IRBuilder<> &IRB) {
2776   FunctionType *FT = F.getFunctionType();
2777 
2778   auto *I = CB.arg_begin();
2779 
2780   // Adds non-variable argument shadows.
2781   for (unsigned N = FT->getNumParams(); N != 0; ++I, --N)
2782     Args.push_back(DFSF.collapseToPrimitiveShadow(DFSF.getShadow(*I), &CB));
2783 
2784   // Adds variable argument shadows.
2785   if (FT->isVarArg()) {
2786     auto *LabelVATy = ArrayType::get(DFSF.DFS.PrimitiveShadowTy,
2787                                      CB.arg_size() - FT->getNumParams());
2788     auto *LabelVAAlloca =
2789         new AllocaInst(LabelVATy, getDataLayout().getAllocaAddrSpace(),
2790                        "labelva", &DFSF.F->getEntryBlock().front());
2791 
2792     for (unsigned N = 0; I != CB.arg_end(); ++I, ++N) {
2793       auto *LabelVAPtr = IRB.CreateStructGEP(LabelVATy, LabelVAAlloca, N);
2794       IRB.CreateStore(DFSF.collapseToPrimitiveShadow(DFSF.getShadow(*I), &CB),
2795                       LabelVAPtr);
2796     }
2797 
2798     Args.push_back(IRB.CreateStructGEP(LabelVATy, LabelVAAlloca, 0));
2799   }
2800 
2801   // Adds the return value shadow.
2802   if (!FT->getReturnType()->isVoidTy()) {
2803     if (!DFSF.LabelReturnAlloca) {
2804       DFSF.LabelReturnAlloca = new AllocaInst(
2805           DFSF.DFS.PrimitiveShadowTy, getDataLayout().getAllocaAddrSpace(),
2806           "labelreturn", &DFSF.F->getEntryBlock().front());
2807     }
2808     Args.push_back(DFSF.LabelReturnAlloca);
2809   }
2810 }
2811 
2812 void DFSanVisitor::addOriginArguments(Function &F, CallBase &CB,
2813                                       std::vector<Value *> &Args,
2814                                       IRBuilder<> &IRB) {
2815   FunctionType *FT = F.getFunctionType();
2816 
2817   auto *I = CB.arg_begin();
2818 
2819   // Add non-variable argument origins.
2820   for (unsigned N = FT->getNumParams(); N != 0; ++I, --N)
2821     Args.push_back(DFSF.getOrigin(*I));
2822 
2823   // Add variable argument origins.
2824   if (FT->isVarArg()) {
2825     auto *OriginVATy =
2826         ArrayType::get(DFSF.DFS.OriginTy, CB.arg_size() - FT->getNumParams());
2827     auto *OriginVAAlloca =
2828         new AllocaInst(OriginVATy, getDataLayout().getAllocaAddrSpace(),
2829                        "originva", &DFSF.F->getEntryBlock().front());
2830 
2831     for (unsigned N = 0; I != CB.arg_end(); ++I, ++N) {
2832       auto *OriginVAPtr = IRB.CreateStructGEP(OriginVATy, OriginVAAlloca, N);
2833       IRB.CreateStore(DFSF.getOrigin(*I), OriginVAPtr);
2834     }
2835 
2836     Args.push_back(IRB.CreateStructGEP(OriginVATy, OriginVAAlloca, 0));
2837   }
2838 
2839   // Add the return value origin.
2840   if (!FT->getReturnType()->isVoidTy()) {
2841     if (!DFSF.OriginReturnAlloca) {
2842       DFSF.OriginReturnAlloca = new AllocaInst(
2843           DFSF.DFS.OriginTy, getDataLayout().getAllocaAddrSpace(),
2844           "originreturn", &DFSF.F->getEntryBlock().front());
2845     }
2846     Args.push_back(DFSF.OriginReturnAlloca);
2847   }
2848 }
2849 
2850 bool DFSanVisitor::visitWrappedCallBase(Function &F, CallBase &CB) {
2851   IRBuilder<> IRB(&CB);
2852   switch (DFSF.DFS.getWrapperKind(&F)) {
2853   case DataFlowSanitizer::WK_Warning:
2854     CB.setCalledFunction(&F);
2855     IRB.CreateCall(DFSF.DFS.DFSanUnimplementedFn,
2856                    IRB.CreateGlobalStringPtr(F.getName()));
2857     DFSF.setShadow(&CB, DFSF.DFS.getZeroShadow(&CB));
2858     DFSF.setOrigin(&CB, DFSF.DFS.ZeroOrigin);
2859     return true;
2860   case DataFlowSanitizer::WK_Discard:
2861     CB.setCalledFunction(&F);
2862     DFSF.setShadow(&CB, DFSF.DFS.getZeroShadow(&CB));
2863     DFSF.setOrigin(&CB, DFSF.DFS.ZeroOrigin);
2864     return true;
2865   case DataFlowSanitizer::WK_Functional:
2866     CB.setCalledFunction(&F);
2867     visitInstOperands(CB);
2868     return true;
2869   case DataFlowSanitizer::WK_Custom:
2870     // Don't try to handle invokes of custom functions, it's too complicated.
2871     // Instead, invoke the dfsw$ wrapper, which will in turn call the __dfsw_
2872     // wrapper.
2873     CallInst *CI = dyn_cast<CallInst>(&CB);
2874     if (!CI)
2875       return false;
2876 
2877     const bool ShouldTrackOrigins = DFSF.DFS.shouldTrackOrigins();
2878     FunctionType *FT = F.getFunctionType();
2879     TransformedFunction CustomFn = DFSF.DFS.getCustomFunctionType(FT);
2880     std::string CustomFName = ShouldTrackOrigins ? "__dfso_" : "__dfsw_";
2881     CustomFName += F.getName();
2882     FunctionCallee CustomF = DFSF.DFS.Mod->getOrInsertFunction(
2883         CustomFName, CustomFn.TransformedType);
2884     if (Function *CustomFn = dyn_cast<Function>(CustomF.getCallee())) {
2885       CustomFn->copyAttributesFrom(&F);
2886 
2887       // Custom functions returning non-void will write to the return label.
2888       if (!FT->getReturnType()->isVoidTy()) {
2889         CustomFn->removeAttributes(AttributeList::FunctionIndex,
2890                                    DFSF.DFS.ReadOnlyNoneAttrs);
2891       }
2892     }
2893 
2894     std::vector<Value *> Args;
2895 
2896     // Adds non-variable arguments.
2897     auto *I = CB.arg_begin();
2898     for (unsigned N = FT->getNumParams(); N != 0; ++I, --N) {
2899       Type *T = (*I)->getType();
2900       FunctionType *ParamFT;
2901       if (isa<PointerType>(T) &&
2902           (ParamFT = dyn_cast<FunctionType>(T->getPointerElementType()))) {
2903         std::string TName = "dfst";
2904         TName += utostr(FT->getNumParams() - N);
2905         TName += "$";
2906         TName += F.getName();
2907         Constant *T = DFSF.DFS.getOrBuildTrampolineFunction(ParamFT, TName);
2908         Args.push_back(T);
2909         Args.push_back(
2910             IRB.CreateBitCast(*I, Type::getInt8PtrTy(*DFSF.DFS.Ctx)));
2911       } else {
2912         Args.push_back(*I);
2913       }
2914     }
2915 
2916     // Adds shadow arguments.
2917     const unsigned ShadowArgStart = Args.size();
2918     addShadowArguments(F, CB, Args, IRB);
2919 
2920     // Adds origin arguments.
2921     const unsigned OriginArgStart = Args.size();
2922     if (ShouldTrackOrigins)
2923       addOriginArguments(F, CB, Args, IRB);
2924 
2925     // Adds variable arguments.
2926     append_range(Args, drop_begin(CB.args(), FT->getNumParams()));
2927 
2928     CallInst *CustomCI = IRB.CreateCall(CustomF, Args);
2929     CustomCI->setCallingConv(CI->getCallingConv());
2930     CustomCI->setAttributes(transformFunctionAttributes(
2931         CustomFn, CI->getContext(), CI->getAttributes()));
2932 
2933     // Update the parameter attributes of the custom call instruction to
2934     // zero extend the shadow parameters. This is required for targets
2935     // which consider PrimitiveShadowTy an illegal type.
2936     for (unsigned N = 0; N < FT->getNumParams(); N++) {
2937       const unsigned ArgNo = ShadowArgStart + N;
2938       if (CustomCI->getArgOperand(ArgNo)->getType() ==
2939           DFSF.DFS.PrimitiveShadowTy)
2940         CustomCI->addParamAttr(ArgNo, Attribute::ZExt);
2941       if (ShouldTrackOrigins) {
2942         const unsigned OriginArgNo = OriginArgStart + N;
2943         if (CustomCI->getArgOperand(OriginArgNo)->getType() ==
2944             DFSF.DFS.OriginTy)
2945           CustomCI->addParamAttr(OriginArgNo, Attribute::ZExt);
2946       }
2947     }
2948 
2949     // Loads the return value shadow and origin.
2950     if (!FT->getReturnType()->isVoidTy()) {
2951       LoadInst *LabelLoad =
2952           IRB.CreateLoad(DFSF.DFS.PrimitiveShadowTy, DFSF.LabelReturnAlloca);
2953       DFSF.setShadow(CustomCI, DFSF.expandFromPrimitiveShadow(
2954                                    FT->getReturnType(), LabelLoad, &CB));
2955       if (ShouldTrackOrigins) {
2956         LoadInst *OriginLoad =
2957             IRB.CreateLoad(DFSF.DFS.OriginTy, DFSF.OriginReturnAlloca);
2958         DFSF.setOrigin(CustomCI, OriginLoad);
2959       }
2960     }
2961 
2962     CI->replaceAllUsesWith(CustomCI);
2963     CI->eraseFromParent();
2964     return true;
2965   }
2966   return false;
2967 }
2968 
2969 void DFSanVisitor::visitCallBase(CallBase &CB) {
2970   Function *F = CB.getCalledFunction();
2971   if ((F && F->isIntrinsic()) || CB.isInlineAsm()) {
2972     visitInstOperands(CB);
2973     return;
2974   }
2975 
2976   // Calls to this function are synthesized in wrappers, and we shouldn't
2977   // instrument them.
2978   if (F == DFSF.DFS.DFSanVarargWrapperFn.getCallee()->stripPointerCasts())
2979     return;
2980 
2981   DenseMap<Value *, Function *>::iterator UnwrappedFnIt =
2982       DFSF.DFS.UnwrappedFnMap.find(CB.getCalledOperand());
2983   if (UnwrappedFnIt != DFSF.DFS.UnwrappedFnMap.end())
2984     if (visitWrappedCallBase(*UnwrappedFnIt->second, CB))
2985       return;
2986 
2987   IRBuilder<> IRB(&CB);
2988 
2989   const bool ShouldTrackOrigins = DFSF.DFS.shouldTrackOrigins();
2990   FunctionType *FT = CB.getFunctionType();
2991   if (DFSF.DFS.getInstrumentedABI() == DataFlowSanitizer::IA_TLS) {
2992     // Stores argument shadows.
2993     unsigned ArgOffset = 0;
2994     const DataLayout &DL = getDataLayout();
2995     for (unsigned I = 0, N = FT->getNumParams(); I != N; ++I) {
2996       if (ShouldTrackOrigins) {
2997         // Ignore overflowed origins
2998         Value *ArgShadow = DFSF.getShadow(CB.getArgOperand(I));
2999         if (I < DFSF.DFS.NumOfElementsInArgOrgTLS &&
3000             !DFSF.DFS.isZeroShadow(ArgShadow))
3001           IRB.CreateStore(DFSF.getOrigin(CB.getArgOperand(I)),
3002                           DFSF.getArgOriginTLS(I, IRB));
3003       }
3004 
3005       unsigned Size =
3006           DL.getTypeAllocSize(DFSF.DFS.getShadowTy(FT->getParamType(I)));
3007       // Stop storing if arguments' size overflows. Inside a function, arguments
3008       // after overflow have zero shadow values.
3009       if (ArgOffset + Size > ArgTLSSize)
3010         break;
3011       IRB.CreateAlignedStore(
3012           DFSF.getShadow(CB.getArgOperand(I)),
3013           DFSF.getArgTLS(FT->getParamType(I), ArgOffset, IRB),
3014           ShadowTLSAlignment);
3015       ArgOffset += alignTo(Size, ShadowTLSAlignment);
3016     }
3017   }
3018 
3019   Instruction *Next = nullptr;
3020   if (!CB.getType()->isVoidTy()) {
3021     if (InvokeInst *II = dyn_cast<InvokeInst>(&CB)) {
3022       if (II->getNormalDest()->getSinglePredecessor()) {
3023         Next = &II->getNormalDest()->front();
3024       } else {
3025         BasicBlock *NewBB =
3026             SplitEdge(II->getParent(), II->getNormalDest(), &DFSF.DT);
3027         Next = &NewBB->front();
3028       }
3029     } else {
3030       assert(CB.getIterator() != CB.getParent()->end());
3031       Next = CB.getNextNode();
3032     }
3033 
3034     if (DFSF.DFS.getInstrumentedABI() == DataFlowSanitizer::IA_TLS) {
3035       // Loads the return value shadow.
3036       IRBuilder<> NextIRB(Next);
3037       const DataLayout &DL = getDataLayout();
3038       unsigned Size = DL.getTypeAllocSize(DFSF.DFS.getShadowTy(&CB));
3039       if (Size > RetvalTLSSize) {
3040         // Set overflowed return shadow to be zero.
3041         DFSF.setShadow(&CB, DFSF.DFS.getZeroShadow(&CB));
3042       } else {
3043         LoadInst *LI = NextIRB.CreateAlignedLoad(
3044             DFSF.DFS.getShadowTy(&CB), DFSF.getRetvalTLS(CB.getType(), NextIRB),
3045             ShadowTLSAlignment, "_dfsret");
3046         DFSF.SkipInsts.insert(LI);
3047         DFSF.setShadow(&CB, LI);
3048         DFSF.NonZeroChecks.push_back(LI);
3049       }
3050 
3051       if (ShouldTrackOrigins) {
3052         LoadInst *LI = NextIRB.CreateLoad(
3053             DFSF.DFS.OriginTy, DFSF.getRetvalOriginTLS(), "_dfsret_o");
3054         DFSF.SkipInsts.insert(LI);
3055         DFSF.setOrigin(&CB, LI);
3056       }
3057     }
3058   }
3059 
3060   // Do all instrumentation for IA_Args down here to defer tampering with the
3061   // CFG in a way that SplitEdge may be able to detect.
3062   if (DFSF.DFS.getInstrumentedABI() == DataFlowSanitizer::IA_Args) {
3063     FunctionType *NewFT = DFSF.DFS.getArgsFunctionType(FT);
3064     Value *Func =
3065         IRB.CreateBitCast(CB.getCalledOperand(), PointerType::getUnqual(NewFT));
3066 
3067     const unsigned NumParams = FT->getNumParams();
3068 
3069     // Copy original arguments.
3070     auto *ArgIt = CB.arg_begin(), *ArgEnd = CB.arg_end();
3071     std::vector<Value *> Args(NumParams);
3072     std::copy_n(ArgIt, NumParams, Args.begin());
3073 
3074     // Add shadow arguments by transforming original arguments.
3075     std::generate_n(std::back_inserter(Args), NumParams,
3076                     [&]() { return DFSF.getShadow(*ArgIt++); });
3077 
3078     if (FT->isVarArg()) {
3079       unsigned VarArgSize = CB.arg_size() - NumParams;
3080       ArrayType *VarArgArrayTy =
3081           ArrayType::get(DFSF.DFS.PrimitiveShadowTy, VarArgSize);
3082       AllocaInst *VarArgShadow =
3083           new AllocaInst(VarArgArrayTy, getDataLayout().getAllocaAddrSpace(),
3084                          "", &DFSF.F->getEntryBlock().front());
3085       Args.push_back(IRB.CreateConstGEP2_32(VarArgArrayTy, VarArgShadow, 0, 0));
3086 
3087       // Copy remaining var args.
3088       unsigned GepIndex = 0;
3089       std::for_each(ArgIt, ArgEnd, [&](Value *Arg) {
3090         IRB.CreateStore(
3091             DFSF.getShadow(Arg),
3092             IRB.CreateConstGEP2_32(VarArgArrayTy, VarArgShadow, 0, GepIndex++));
3093         Args.push_back(Arg);
3094       });
3095     }
3096 
3097     CallBase *NewCB;
3098     if (InvokeInst *II = dyn_cast<InvokeInst>(&CB)) {
3099       NewCB = IRB.CreateInvoke(NewFT, Func, II->getNormalDest(),
3100                                II->getUnwindDest(), Args);
3101     } else {
3102       NewCB = IRB.CreateCall(NewFT, Func, Args);
3103     }
3104     NewCB->setCallingConv(CB.getCallingConv());
3105     NewCB->setAttributes(CB.getAttributes().removeAttributes(
3106         *DFSF.DFS.Ctx, AttributeList::ReturnIndex,
3107         AttributeFuncs::typeIncompatible(NewCB->getType())));
3108 
3109     if (Next) {
3110       ExtractValueInst *ExVal = ExtractValueInst::Create(NewCB, 0, "", Next);
3111       DFSF.SkipInsts.insert(ExVal);
3112       ExtractValueInst *ExShadow = ExtractValueInst::Create(NewCB, 1, "", Next);
3113       DFSF.SkipInsts.insert(ExShadow);
3114       DFSF.setShadow(ExVal, ExShadow);
3115       DFSF.NonZeroChecks.push_back(ExShadow);
3116 
3117       CB.replaceAllUsesWith(ExVal);
3118     }
3119 
3120     CB.eraseFromParent();
3121   }
3122 }
3123 
3124 void DFSanVisitor::visitPHINode(PHINode &PN) {
3125   Type *ShadowTy = DFSF.DFS.getShadowTy(&PN);
3126   PHINode *ShadowPN =
3127       PHINode::Create(ShadowTy, PN.getNumIncomingValues(), "", &PN);
3128 
3129   // Give the shadow phi node valid predecessors to fool SplitEdge into working.
3130   Value *UndefShadow = UndefValue::get(ShadowTy);
3131   for (BasicBlock *BB : PN.blocks())
3132     ShadowPN->addIncoming(UndefShadow, BB);
3133 
3134   DFSF.setShadow(&PN, ShadowPN);
3135 
3136   PHINode *OriginPN = nullptr;
3137   if (DFSF.DFS.shouldTrackOrigins()) {
3138     OriginPN =
3139         PHINode::Create(DFSF.DFS.OriginTy, PN.getNumIncomingValues(), "", &PN);
3140     Value *UndefOrigin = UndefValue::get(DFSF.DFS.OriginTy);
3141     for (BasicBlock *BB : PN.blocks())
3142       OriginPN->addIncoming(UndefOrigin, BB);
3143     DFSF.setOrigin(&PN, OriginPN);
3144   }
3145 
3146   DFSF.PHIFixups.push_back({&PN, ShadowPN, OriginPN});
3147 }
3148 
3149 namespace {
3150 class DataFlowSanitizerLegacyPass : public ModulePass {
3151 private:
3152   std::vector<std::string> ABIListFiles;
3153 
3154 public:
3155   static char ID;
3156 
3157   DataFlowSanitizerLegacyPass(
3158       const std::vector<std::string> &ABIListFiles = std::vector<std::string>())
3159       : ModulePass(ID), ABIListFiles(ABIListFiles) {}
3160 
3161   bool runOnModule(Module &M) override {
3162     return DataFlowSanitizer(ABIListFiles).runImpl(M);
3163   }
3164 };
3165 } // namespace
3166 
3167 char DataFlowSanitizerLegacyPass::ID;
3168 
3169 INITIALIZE_PASS(DataFlowSanitizerLegacyPass, "dfsan",
3170                 "DataFlowSanitizer: dynamic data flow analysis.", false, false)
3171 
3172 ModulePass *llvm::createDataFlowSanitizerLegacyPassPass(
3173     const std::vector<std::string> &ABIListFiles) {
3174   return new DataFlowSanitizerLegacyPass(ABIListFiles);
3175 }
3176 
3177 PreservedAnalyses DataFlowSanitizerPass::run(Module &M,
3178                                              ModuleAnalysisManager &AM) {
3179   if (DataFlowSanitizer(ABIListFiles).runImpl(M)) {
3180     return PreservedAnalyses::none();
3181   }
3182   return PreservedAnalyses::all();
3183 }
3184