1 //===- DataFlowSanitizer.cpp - dynamic data flow analysis -----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// This file is a part of DataFlowSanitizer, a generalised dynamic data flow
11 /// analysis.
12 ///
13 /// Unlike other Sanitizer tools, this tool is not designed to detect a specific
14 /// class of bugs on its own.  Instead, it provides a generic dynamic data flow
15 /// analysis framework to be used by clients to help detect application-specific
16 /// issues within their own code.
17 ///
18 /// The analysis is based on automatic propagation of data flow labels (also
19 /// known as taint labels) through a program as it performs computation.  Each
20 /// byte of application memory is backed by two bytes of shadow memory which
21 /// hold the label.  On Linux/x86_64, memory is laid out as follows:
22 ///
23 /// +--------------------+ 0x800000000000 (top of memory)
24 /// | application memory |
25 /// +--------------------+ 0x700000008000 (kAppAddr)
26 /// |                    |
27 /// |       unused       |
28 /// |                    |
29 /// +--------------------+ 0x300200000000 (kUnusedAddr)
30 /// |    union table     |
31 /// +--------------------+ 0x300000000000 (kUnionTableAddr)
32 /// |       origin       |
33 /// +--------------------+ 0x200000008000 (kOriginAddr)
34 /// |   shadow memory    |
35 /// +--------------------+ 0x000000010000 (kShadowAddr)
36 /// | reserved by kernel |
37 /// +--------------------+ 0x000000000000
38 ///
39 /// To derive a shadow memory address from an application memory address,
40 /// bits 44-46 are cleared to bring the address into the range
41 /// [0x000000008000,0x100000000000).  Then the address is shifted left by 1 to
42 /// account for the double byte representation of shadow labels and move the
43 /// address into the shadow memory range.  See the function
44 /// DataFlowSanitizer::getShadowAddress below.
45 ///
46 /// For more information, please refer to the design document:
47 /// http://clang.llvm.org/docs/DataFlowSanitizerDesign.html
48 //
49 //===----------------------------------------------------------------------===//
50 
51 #include "llvm/Transforms/Instrumentation/DataFlowSanitizer.h"
52 #include "llvm/ADT/DenseMap.h"
53 #include "llvm/ADT/DenseSet.h"
54 #include "llvm/ADT/DepthFirstIterator.h"
55 #include "llvm/ADT/None.h"
56 #include "llvm/ADT/SmallPtrSet.h"
57 #include "llvm/ADT/SmallVector.h"
58 #include "llvm/ADT/StringExtras.h"
59 #include "llvm/ADT/StringRef.h"
60 #include "llvm/ADT/Triple.h"
61 #include "llvm/ADT/iterator.h"
62 #include "llvm/Analysis/ValueTracking.h"
63 #include "llvm/IR/Argument.h"
64 #include "llvm/IR/Attributes.h"
65 #include "llvm/IR/BasicBlock.h"
66 #include "llvm/IR/Constant.h"
67 #include "llvm/IR/Constants.h"
68 #include "llvm/IR/DataLayout.h"
69 #include "llvm/IR/DerivedTypes.h"
70 #include "llvm/IR/Dominators.h"
71 #include "llvm/IR/Function.h"
72 #include "llvm/IR/GlobalAlias.h"
73 #include "llvm/IR/GlobalValue.h"
74 #include "llvm/IR/GlobalVariable.h"
75 #include "llvm/IR/IRBuilder.h"
76 #include "llvm/IR/InlineAsm.h"
77 #include "llvm/IR/InstVisitor.h"
78 #include "llvm/IR/InstrTypes.h"
79 #include "llvm/IR/Instruction.h"
80 #include "llvm/IR/Instructions.h"
81 #include "llvm/IR/IntrinsicInst.h"
82 #include "llvm/IR/LLVMContext.h"
83 #include "llvm/IR/MDBuilder.h"
84 #include "llvm/IR/Module.h"
85 #include "llvm/IR/PassManager.h"
86 #include "llvm/IR/Type.h"
87 #include "llvm/IR/User.h"
88 #include "llvm/IR/Value.h"
89 #include "llvm/InitializePasses.h"
90 #include "llvm/Pass.h"
91 #include "llvm/Support/Alignment.h"
92 #include "llvm/Support/Casting.h"
93 #include "llvm/Support/CommandLine.h"
94 #include "llvm/Support/ErrorHandling.h"
95 #include "llvm/Support/SpecialCaseList.h"
96 #include "llvm/Support/VirtualFileSystem.h"
97 #include "llvm/Transforms/Instrumentation.h"
98 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
99 #include "llvm/Transforms/Utils/Local.h"
100 #include <algorithm>
101 #include <cassert>
102 #include <cstddef>
103 #include <cstdint>
104 #include <iterator>
105 #include <memory>
106 #include <set>
107 #include <string>
108 #include <utility>
109 #include <vector>
110 
111 using namespace llvm;
112 
113 // This must be consistent with ShadowWidthBits.
114 static const Align ShadowTLSAlignment = Align(2);
115 
116 static const Align MinOriginAlignment = Align(4);
117 
118 // The size of TLS variables. These constants must be kept in sync with the ones
119 // in dfsan.cpp.
120 static const unsigned ArgTLSSize = 800;
121 static const unsigned RetvalTLSSize = 800;
122 
123 // External symbol to be used when generating the shadow address for
124 // architectures with multiple VMAs. Instead of using a constant integer
125 // the runtime will set the external mask based on the VMA range.
126 const char DFSanExternShadowPtrMask[] = "__dfsan_shadow_ptr_mask";
127 
128 // The -dfsan-preserve-alignment flag controls whether this pass assumes that
129 // alignment requirements provided by the input IR are correct.  For example,
130 // if the input IR contains a load with alignment 8, this flag will cause
131 // the shadow load to have alignment 16.  This flag is disabled by default as
132 // we have unfortunately encountered too much code (including Clang itself;
133 // see PR14291) which performs misaligned access.
134 static cl::opt<bool> ClPreserveAlignment(
135     "dfsan-preserve-alignment",
136     cl::desc("respect alignment requirements provided by input IR"), cl::Hidden,
137     cl::init(false));
138 
139 // The ABI list files control how shadow parameters are passed. The pass treats
140 // every function labelled "uninstrumented" in the ABI list file as conforming
141 // to the "native" (i.e. unsanitized) ABI.  Unless the ABI list contains
142 // additional annotations for those functions, a call to one of those functions
143 // will produce a warning message, as the labelling behaviour of the function is
144 // unknown.  The other supported annotations are "functional" and "discard",
145 // which are described below under DataFlowSanitizer::WrapperKind.
146 static cl::list<std::string> ClABIListFiles(
147     "dfsan-abilist",
148     cl::desc("File listing native ABI functions and how the pass treats them"),
149     cl::Hidden);
150 
151 // Controls whether the pass uses IA_Args or IA_TLS as the ABI for instrumented
152 // functions (see DataFlowSanitizer::InstrumentedABI below).
153 static cl::opt<bool>
154     ClArgsABI("dfsan-args-abi",
155               cl::desc("Use the argument ABI rather than the TLS ABI"),
156               cl::Hidden);
157 
158 // Controls whether the pass includes or ignores the labels of pointers in load
159 // instructions.
160 static cl::opt<bool> ClCombinePointerLabelsOnLoad(
161     "dfsan-combine-pointer-labels-on-load",
162     cl::desc("Combine the label of the pointer with the label of the data when "
163              "loading from memory."),
164     cl::Hidden, cl::init(true));
165 
166 // Controls whether the pass includes or ignores the labels of pointers in
167 // stores instructions.
168 static cl::opt<bool> ClCombinePointerLabelsOnStore(
169     "dfsan-combine-pointer-labels-on-store",
170     cl::desc("Combine the label of the pointer with the label of the data when "
171              "storing in memory."),
172     cl::Hidden, cl::init(false));
173 
174 static cl::opt<bool> ClDebugNonzeroLabels(
175     "dfsan-debug-nonzero-labels",
176     cl::desc("Insert calls to __dfsan_nonzero_label on observing a parameter, "
177              "load or return with a nonzero label"),
178     cl::Hidden);
179 
180 // Experimental feature that inserts callbacks for certain data events.
181 // Currently callbacks are only inserted for loads, stores, memory transfers
182 // (i.e. memcpy and memmove), and comparisons.
183 //
184 // If this flag is set to true, the user must provide definitions for the
185 // following callback functions:
186 //   void __dfsan_load_callback(dfsan_label Label, void* addr);
187 //   void __dfsan_store_callback(dfsan_label Label, void* addr);
188 //   void __dfsan_mem_transfer_callback(dfsan_label *Start, size_t Len);
189 //   void __dfsan_cmp_callback(dfsan_label CombinedLabel);
190 static cl::opt<bool> ClEventCallbacks(
191     "dfsan-event-callbacks",
192     cl::desc("Insert calls to __dfsan_*_callback functions on data events."),
193     cl::Hidden, cl::init(false));
194 
195 // Use a distinct bit for each base label, enabling faster unions with less
196 // instrumentation.  Limits the max number of base labels to 16.
197 static cl::opt<bool> ClFast16Labels(
198     "dfsan-fast-16-labels",
199     cl::desc("Use more efficient instrumentation, limiting the number of "
200              "labels to 16."),
201     cl::Hidden, cl::init(false));
202 
203 // Controls whether the pass tracks the control flow of select instructions.
204 static cl::opt<bool> ClTrackSelectControlFlow(
205     "dfsan-track-select-control-flow",
206     cl::desc("Propagate labels from condition values of select instructions "
207              "to results."),
208     cl::Hidden, cl::init(true));
209 
210 // TODO: This default value follows MSan. DFSan may use a different value.
211 static cl::opt<int> ClInstrumentWithCallThreshold(
212     "dfsan-instrument-with-call-threshold",
213     cl::desc("If the function being instrumented requires more than "
214              "this number of origin stores, use callbacks instead of "
215              "inline checks (-1 means never use callbacks)."),
216     cl::Hidden, cl::init(3500));
217 
218 // Controls how to track origins.
219 // * 0: do not track origins.
220 // * 1: track origins at memory store operations.
221 // * 2: TODO: track origins at memory store operations and callsites.
222 static cl::opt<int> ClTrackOrigins("dfsan-track-origins",
223                                    cl::desc("Track origins of labels"),
224                                    cl::Hidden, cl::init(0));
225 
226 static StringRef getGlobalTypeString(const GlobalValue &G) {
227   // Types of GlobalVariables are always pointer types.
228   Type *GType = G.getValueType();
229   // For now we support excluding struct types only.
230   if (StructType *SGType = dyn_cast<StructType>(GType)) {
231     if (!SGType->isLiteral())
232       return SGType->getName();
233   }
234   return "<unknown type>";
235 }
236 
237 namespace {
238 
239 class DFSanABIList {
240   std::unique_ptr<SpecialCaseList> SCL;
241 
242 public:
243   DFSanABIList() = default;
244 
245   void set(std::unique_ptr<SpecialCaseList> List) { SCL = std::move(List); }
246 
247   /// Returns whether either this function or its source file are listed in the
248   /// given category.
249   bool isIn(const Function &F, StringRef Category) const {
250     return isIn(*F.getParent(), Category) ||
251            SCL->inSection("dataflow", "fun", F.getName(), Category);
252   }
253 
254   /// Returns whether this global alias is listed in the given category.
255   ///
256   /// If GA aliases a function, the alias's name is matched as a function name
257   /// would be.  Similarly, aliases of globals are matched like globals.
258   bool isIn(const GlobalAlias &GA, StringRef Category) const {
259     if (isIn(*GA.getParent(), Category))
260       return true;
261 
262     if (isa<FunctionType>(GA.getValueType()))
263       return SCL->inSection("dataflow", "fun", GA.getName(), Category);
264 
265     return SCL->inSection("dataflow", "global", GA.getName(), Category) ||
266            SCL->inSection("dataflow", "type", getGlobalTypeString(GA),
267                           Category);
268   }
269 
270   /// Returns whether this module is listed in the given category.
271   bool isIn(const Module &M, StringRef Category) const {
272     return SCL->inSection("dataflow", "src", M.getModuleIdentifier(), Category);
273   }
274 };
275 
276 /// TransformedFunction is used to express the result of transforming one
277 /// function type into another.  This struct is immutable.  It holds metadata
278 /// useful for updating calls of the old function to the new type.
279 struct TransformedFunction {
280   TransformedFunction(FunctionType *OriginalType, FunctionType *TransformedType,
281                       std::vector<unsigned> ArgumentIndexMapping)
282       : OriginalType(OriginalType), TransformedType(TransformedType),
283         ArgumentIndexMapping(ArgumentIndexMapping) {}
284 
285   // Disallow copies.
286   TransformedFunction(const TransformedFunction &) = delete;
287   TransformedFunction &operator=(const TransformedFunction &) = delete;
288 
289   // Allow moves.
290   TransformedFunction(TransformedFunction &&) = default;
291   TransformedFunction &operator=(TransformedFunction &&) = default;
292 
293   /// Type of the function before the transformation.
294   FunctionType *OriginalType;
295 
296   /// Type of the function after the transformation.
297   FunctionType *TransformedType;
298 
299   /// Transforming a function may change the position of arguments.  This
300   /// member records the mapping from each argument's old position to its new
301   /// position.  Argument positions are zero-indexed.  If the transformation
302   /// from F to F' made the first argument of F into the third argument of F',
303   /// then ArgumentIndexMapping[0] will equal 2.
304   std::vector<unsigned> ArgumentIndexMapping;
305 };
306 
307 /// Given function attributes from a call site for the original function,
308 /// return function attributes appropriate for a call to the transformed
309 /// function.
310 AttributeList
311 transformFunctionAttributes(const TransformedFunction &TransformedFunction,
312                             LLVMContext &Ctx, AttributeList CallSiteAttrs) {
313 
314   // Construct a vector of AttributeSet for each function argument.
315   std::vector<llvm::AttributeSet> ArgumentAttributes(
316       TransformedFunction.TransformedType->getNumParams());
317 
318   // Copy attributes from the parameter of the original function to the
319   // transformed version.  'ArgumentIndexMapping' holds the mapping from
320   // old argument position to new.
321   for (unsigned I = 0, IE = TransformedFunction.ArgumentIndexMapping.size();
322        I < IE; ++I) {
323     unsigned TransformedIndex = TransformedFunction.ArgumentIndexMapping[I];
324     ArgumentAttributes[TransformedIndex] = CallSiteAttrs.getParamAttributes(I);
325   }
326 
327   // Copy annotations on varargs arguments.
328   for (unsigned I = TransformedFunction.OriginalType->getNumParams(),
329                 IE = CallSiteAttrs.getNumAttrSets();
330        I < IE; ++I) {
331     ArgumentAttributes.push_back(CallSiteAttrs.getParamAttributes(I));
332   }
333 
334   return AttributeList::get(Ctx, CallSiteAttrs.getFnAttributes(),
335                             CallSiteAttrs.getRetAttributes(),
336                             llvm::makeArrayRef(ArgumentAttributes));
337 }
338 
339 class DataFlowSanitizer {
340   friend struct DFSanFunction;
341   friend class DFSanVisitor;
342 
343   enum {
344     ShadowWidthBits = 16,
345     ShadowWidthBytes = ShadowWidthBits / 8,
346     OriginWidthBits = 32,
347     OriginWidthBytes = OriginWidthBits / 8
348   };
349 
350   /// Which ABI should be used for instrumented functions?
351   enum InstrumentedABI {
352     /// Argument and return value labels are passed through additional
353     /// arguments and by modifying the return type.
354     IA_Args,
355 
356     /// Argument and return value labels are passed through TLS variables
357     /// __dfsan_arg_tls and __dfsan_retval_tls.
358     IA_TLS
359   };
360 
361   /// How should calls to uninstrumented functions be handled?
362   enum WrapperKind {
363     /// This function is present in an uninstrumented form but we don't know
364     /// how it should be handled.  Print a warning and call the function anyway.
365     /// Don't label the return value.
366     WK_Warning,
367 
368     /// This function does not write to (user-accessible) memory, and its return
369     /// value is unlabelled.
370     WK_Discard,
371 
372     /// This function does not write to (user-accessible) memory, and the label
373     /// of its return value is the union of the label of its arguments.
374     WK_Functional,
375 
376     /// Instead of calling the function, a custom wrapper __dfsw_F is called,
377     /// where F is the name of the function.  This function may wrap the
378     /// original function or provide its own implementation.  This is similar to
379     /// the IA_Args ABI, except that IA_Args uses a struct return type to
380     /// pass the return value shadow in a register, while WK_Custom uses an
381     /// extra pointer argument to return the shadow.  This allows the wrapped
382     /// form of the function type to be expressed in C.
383     WK_Custom
384   };
385 
386   Module *Mod;
387   LLVMContext *Ctx;
388   Type *Int8Ptr;
389   IntegerType *OriginTy;
390   PointerType *OriginPtrTy;
391   ConstantInt *OriginBase;
392   ConstantInt *ZeroOrigin;
393   /// The shadow type for all primitive types and vector types.
394   IntegerType *PrimitiveShadowTy;
395   PointerType *PrimitiveShadowPtrTy;
396   IntegerType *IntptrTy;
397   ConstantInt *ZeroPrimitiveShadow;
398   ConstantInt *ShadowPtrMask;
399   ConstantInt *ShadowPtrMul;
400   Constant *ArgTLS;
401   ArrayType *ArgOriginTLSTy;
402   Constant *ArgOriginTLS;
403   Constant *RetvalTLS;
404   Constant *RetvalOriginTLS;
405   Constant *ExternalShadowMask;
406   FunctionType *DFSanUnionFnTy;
407   FunctionType *DFSanUnionLoadFnTy;
408   FunctionType *DFSanLoadLabelAndOriginFnTy;
409   FunctionType *DFSanUnimplementedFnTy;
410   FunctionType *DFSanSetLabelFnTy;
411   FunctionType *DFSanNonzeroLabelFnTy;
412   FunctionType *DFSanVarargWrapperFnTy;
413   FunctionType *DFSanCmpCallbackFnTy;
414   FunctionType *DFSanLoadStoreCallbackFnTy;
415   FunctionType *DFSanMemTransferCallbackFnTy;
416   FunctionType *DFSanChainOriginFnTy;
417   FunctionType *DFSanMemOriginTransferFnTy;
418   FunctionType *DFSanMaybeStoreOriginFnTy;
419   FunctionCallee DFSanUnionFn;
420   FunctionCallee DFSanCheckedUnionFn;
421   FunctionCallee DFSanUnionLoadFn;
422   FunctionCallee DFSanUnionLoadFast16LabelsFn;
423   FunctionCallee DFSanLoadLabelAndOriginFn;
424   FunctionCallee DFSanUnimplementedFn;
425   FunctionCallee DFSanSetLabelFn;
426   FunctionCallee DFSanNonzeroLabelFn;
427   FunctionCallee DFSanVarargWrapperFn;
428   FunctionCallee DFSanLoadCallbackFn;
429   FunctionCallee DFSanStoreCallbackFn;
430   FunctionCallee DFSanMemTransferCallbackFn;
431   FunctionCallee DFSanCmpCallbackFn;
432   FunctionCallee DFSanChainOriginFn;
433   FunctionCallee DFSanMemOriginTransferFn;
434   FunctionCallee DFSanMaybeStoreOriginFn;
435   SmallPtrSet<Value *, 16> DFSanRuntimeFunctions;
436   MDNode *ColdCallWeights;
437   MDNode *OriginStoreWeights;
438   DFSanABIList ABIList;
439   DenseMap<Value *, Function *> UnwrappedFnMap;
440   AttrBuilder ReadOnlyNoneAttrs;
441   bool DFSanRuntimeShadowMask = false;
442 
443   Value *getShadowOffset(Value *Addr, IRBuilder<> &IRB);
444   Value *getShadowAddress(Value *Addr, Instruction *Pos);
445   std::pair<Value *, Value *>
446   getShadowOriginAddress(Value *Addr, Align InstAlignment, Instruction *Pos);
447   bool isInstrumented(const Function *F);
448   bool isInstrumented(const GlobalAlias *GA);
449   FunctionType *getArgsFunctionType(FunctionType *T);
450   FunctionType *getTrampolineFunctionType(FunctionType *T);
451   TransformedFunction getCustomFunctionType(FunctionType *T);
452   InstrumentedABI getInstrumentedABI();
453   WrapperKind getWrapperKind(Function *F);
454   void addGlobalNamePrefix(GlobalValue *GV);
455   Function *buildWrapperFunction(Function *F, StringRef NewFName,
456                                  GlobalValue::LinkageTypes NewFLink,
457                                  FunctionType *NewFT);
458   Constant *getOrBuildTrampolineFunction(FunctionType *FT, StringRef FName);
459   void initializeCallbackFunctions(Module &M);
460   void initializeRuntimeFunctions(Module &M);
461   void injectMetadataGlobals(Module &M);
462 
463   bool init(Module &M);
464 
465   /// Returns whether the pass tracks origins. Support only fast16 mode in TLS
466   /// ABI mode.
467   bool shouldTrackOrigins();
468 
469   /// Returns whether the pass tracks labels for struct fields and array
470   /// indices. Support only fast16 mode in TLS ABI mode.
471   bool shouldTrackFieldsAndIndices();
472 
473   /// Returns a zero constant with the shadow type of OrigTy.
474   ///
475   /// getZeroShadow({T1,T2,...}) = {getZeroShadow(T1),getZeroShadow(T2,...}
476   /// getZeroShadow([n x T]) = [n x getZeroShadow(T)]
477   /// getZeroShadow(other type) = i16(0)
478   ///
479   /// Note that a zero shadow is always i16(0) when shouldTrackFieldsAndIndices
480   /// returns false.
481   Constant *getZeroShadow(Type *OrigTy);
482   /// Returns a zero constant with the shadow type of V's type.
483   Constant *getZeroShadow(Value *V);
484 
485   /// Checks if V is a zero shadow.
486   bool isZeroShadow(Value *V);
487 
488   /// Returns the shadow type of OrigTy.
489   ///
490   /// getShadowTy({T1,T2,...}) = {getShadowTy(T1),getShadowTy(T2),...}
491   /// getShadowTy([n x T]) = [n x getShadowTy(T)]
492   /// getShadowTy(other type) = i16
493   ///
494   /// Note that a shadow type is always i16 when shouldTrackFieldsAndIndices
495   /// returns false.
496   Type *getShadowTy(Type *OrigTy);
497   /// Returns the shadow type of of V's type.
498   Type *getShadowTy(Value *V);
499 
500   const uint64_t NumOfElementsInArgOrgTLS = ArgTLSSize / OriginWidthBytes;
501 
502 public:
503   DataFlowSanitizer(const std::vector<std::string> &ABIListFiles);
504 
505   bool runImpl(Module &M);
506 };
507 
508 struct DFSanFunction {
509   DataFlowSanitizer &DFS;
510   Function *F;
511   DominatorTree DT;
512   DataFlowSanitizer::InstrumentedABI IA;
513   bool IsNativeABI;
514   AllocaInst *LabelReturnAlloca = nullptr;
515   AllocaInst *OriginReturnAlloca = nullptr;
516   DenseMap<Value *, Value *> ValShadowMap;
517   DenseMap<Value *, Value *> ValOriginMap;
518   DenseMap<AllocaInst *, AllocaInst *> AllocaShadowMap;
519   DenseMap<AllocaInst *, AllocaInst *> AllocaOriginMap;
520 
521   std::vector<std::pair<PHINode *, PHINode *>> PHIFixups;
522   DenseSet<Instruction *> SkipInsts;
523   std::vector<Value *> NonZeroChecks;
524   bool AvoidNewBlocks;
525 
526   struct CachedShadow {
527     BasicBlock *Block; // The block where Shadow is defined.
528     Value *Shadow;
529   };
530   /// Maps a value to its latest shadow value in terms of domination tree.
531   DenseMap<std::pair<Value *, Value *>, CachedShadow> CachedShadows;
532   /// Maps a value to its latest collapsed shadow value it was converted to in
533   /// terms of domination tree. When ClDebugNonzeroLabels is on, this cache is
534   /// used at a post process where CFG blocks are split. So it does not cache
535   /// BasicBlock like CachedShadows, but uses domination between values.
536   DenseMap<Value *, Value *> CachedCollapsedShadows;
537   DenseMap<Value *, std::set<Value *>> ShadowElements;
538 
539   DFSanFunction(DataFlowSanitizer &DFS, Function *F, bool IsNativeABI)
540       : DFS(DFS), F(F), IA(DFS.getInstrumentedABI()), IsNativeABI(IsNativeABI) {
541     DT.recalculate(*F);
542     // FIXME: Need to track down the register allocator issue which causes poor
543     // performance in pathological cases with large numbers of basic blocks.
544     AvoidNewBlocks = F->size() > 1000;
545   }
546 
547   /// Computes the shadow address for a given function argument.
548   ///
549   /// Shadow = ArgTLS+ArgOffset.
550   Value *getArgTLS(Type *T, unsigned ArgOffset, IRBuilder<> &IRB);
551 
552   /// Computes the shadow address for a return value.
553   Value *getRetvalTLS(Type *T, IRBuilder<> &IRB);
554 
555   /// Computes the origin address for a given function argument.
556   ///
557   /// Origin = ArgOriginTLS[ArgNo].
558   Value *getArgOriginTLS(unsigned ArgNo, IRBuilder<> &IRB);
559 
560   /// Computes the origin address for a return value.
561   Value *getRetvalOriginTLS();
562 
563   Value *getOrigin(Value *V);
564   void setOrigin(Instruction *I, Value *Origin);
565   /// Generates IR to compute the origin of the last operand with a taint label.
566   Value *combineOperandOrigins(Instruction *Inst);
567   /// Before the instruction Pos, generates IR to compute the last origin with a
568   /// taint label. Labels and origins are from vectors Shadows and Origins
569   /// correspondingly. The generated IR is like
570   ///   Sn-1 != Zero ? On-1: ... S2 != Zero ? O2: S1 != Zero ? O1: O0
571   /// When Zero is nullptr, it uses ZeroPrimitiveShadow. Otherwise it can be
572   /// zeros with other bitwidths.
573   Value *combineOrigins(const std::vector<Value *> &Shadows,
574                         const std::vector<Value *> &Origins, Instruction *Pos,
575                         ConstantInt *Zero = nullptr);
576 
577   Value *getShadow(Value *V);
578   void setShadow(Instruction *I, Value *Shadow);
579   /// Generates IR to compute the union of the two given shadows, inserting it
580   /// before Pos. The combined value is with primitive type.
581   Value *combineShadows(Value *V1, Value *V2, Instruction *Pos);
582   /// Combines the shadow values of V1 and V2, then converts the combined value
583   /// with primitive type into a shadow value with the original type T.
584   Value *combineShadowsThenConvert(Type *T, Value *V1, Value *V2,
585                                    Instruction *Pos);
586   Value *combineOperandShadows(Instruction *Inst);
587   std::pair<Value *, Value *> loadShadowOrigin(Value *ShadowAddr, uint64_t Size,
588                                                Align InstAlignment,
589                                                Instruction *Pos);
590   void storePrimitiveShadowOrigin(Value *Addr, uint64_t Size,
591                                   Align InstAlignment, Value *PrimitiveShadow,
592                                   Value *Origin, Instruction *Pos);
593   /// Applies PrimitiveShadow to all primitive subtypes of T, returning
594   /// the expanded shadow value.
595   ///
596   /// EFP({T1,T2, ...}, PS) = {EFP(T1,PS),EFP(T2,PS),...}
597   /// EFP([n x T], PS) = [n x EFP(T,PS)]
598   /// EFP(other types, PS) = PS
599   Value *expandFromPrimitiveShadow(Type *T, Value *PrimitiveShadow,
600                                    Instruction *Pos);
601   /// Collapses Shadow into a single primitive shadow value, unioning all
602   /// primitive shadow values in the process. Returns the final primitive
603   /// shadow value.
604   ///
605   /// CTP({V1,V2, ...}) = UNION(CFP(V1,PS),CFP(V2,PS),...)
606   /// CTP([V1,V2,...]) = UNION(CFP(V1,PS),CFP(V2,PS),...)
607   /// CTP(other types, PS) = PS
608   Value *collapseToPrimitiveShadow(Value *Shadow, Instruction *Pos);
609 
610   void storeZeroPrimitiveShadow(Value *Addr, uint64_t Size, Align ShadowAlign,
611                                 Instruction *Pos);
612 
613   Align getShadowAlign(Align InstAlignment);
614 
615 private:
616   /// Collapses the shadow with aggregate type into a single primitive shadow
617   /// value.
618   template <class AggregateType>
619   Value *collapseAggregateShadow(AggregateType *AT, Value *Shadow,
620                                  IRBuilder<> &IRB);
621 
622   Value *collapseToPrimitiveShadow(Value *Shadow, IRBuilder<> &IRB);
623 
624   /// Returns the shadow value of an argument A.
625   Value *getShadowForTLSArgument(Argument *A);
626 
627   /// The fast path of loading shadow in legacy mode.
628   Value *loadLegacyShadowFast(Value *ShadowAddr, uint64_t Size,
629                               Align ShadowAlign, Instruction *Pos);
630 
631   /// The fast path of loading shadow in fast-16-label mode.
632   std::pair<Value *, Value *>
633   loadFast16ShadowFast(Value *ShadowAddr, Value *OriginAddr, uint64_t Size,
634                        Align ShadowAlign, Align OriginAlign, Value *FirstOrigin,
635                        Instruction *Pos);
636 
637   Align getOriginAlign(Align InstAlignment);
638 
639   /// Because 4 contiguous bytes share one 4-byte origin, the most accurate load
640   /// is __dfsan_load_label_and_origin. This function returns the union of all
641   /// labels and the origin of the first taint label. However this is an
642   /// additional call with many instructions. To ensure common cases are fast,
643   /// checks if it is possible to load labels and origins without using the
644   /// callback function.
645   bool useCallbackLoadLabelAndOrigin(uint64_t Size, Align InstAlignment);
646 
647   /// Returns a chain at the current stack with previous origin V.
648   Value *updateOrigin(Value *V, IRBuilder<> &IRB);
649 
650   /// Creates an Intptr = Origin | Origin << 32 if Intptr's size is 64. Returns
651   /// Origin otherwise.
652   Value *originToIntptr(IRBuilder<> &IRB, Value *Origin);
653 
654   /// Stores Origin into the address range [StoreOriginAddr, StoreOriginAddr +
655   /// Size).
656   void paintOrigin(IRBuilder<> &IRB, Value *Origin, Value *StoreOriginAddr,
657                    uint64_t StoreOriginSize, Align Alignment);
658 
659   /// Stores Origin in terms of its Shadow value.
660   /// * Do not write origins for zero shadows because we do not trace origins
661   ///   for untainted sinks.
662   /// * Use __dfsan_maybe_store_origin if there are too many origin store
663   ///   instrumentations.
664   void storeOrigin(Instruction *Pos, Value *Addr, uint64_t Size, Value *Shadow,
665                    Value *Origin, Value *StoreOriginAddr, Align InstAlignment);
666 
667   /// Convert a scalar value to an i1 by comparing with 0.
668   Value *convertToBool(Value *V, IRBuilder<> &IRB, const Twine &Name = "");
669 
670   bool shouldInstrumentWithCall();
671 
672   int NumOriginStores = 0;
673 };
674 
675 class DFSanVisitor : public InstVisitor<DFSanVisitor> {
676 public:
677   DFSanFunction &DFSF;
678 
679   DFSanVisitor(DFSanFunction &DFSF) : DFSF(DFSF) {}
680 
681   const DataLayout &getDataLayout() const {
682     return DFSF.F->getParent()->getDataLayout();
683   }
684 
685   // Combines shadow values and origins for all of I's operands.
686   void visitInstOperands(Instruction &I);
687 
688   void visitUnaryOperator(UnaryOperator &UO);
689   void visitBinaryOperator(BinaryOperator &BO);
690   void visitCastInst(CastInst &CI);
691   void visitCmpInst(CmpInst &CI);
692   void visitGetElementPtrInst(GetElementPtrInst &GEPI);
693   void visitLoadInst(LoadInst &LI);
694   void visitStoreInst(StoreInst &SI);
695   void visitAtomicRMWInst(AtomicRMWInst &I);
696   void visitAtomicCmpXchgInst(AtomicCmpXchgInst &I);
697   void visitReturnInst(ReturnInst &RI);
698   void visitCallBase(CallBase &CB);
699   void visitPHINode(PHINode &PN);
700   void visitExtractElementInst(ExtractElementInst &I);
701   void visitInsertElementInst(InsertElementInst &I);
702   void visitShuffleVectorInst(ShuffleVectorInst &I);
703   void visitExtractValueInst(ExtractValueInst &I);
704   void visitInsertValueInst(InsertValueInst &I);
705   void visitAllocaInst(AllocaInst &I);
706   void visitSelectInst(SelectInst &I);
707   void visitMemSetInst(MemSetInst &I);
708   void visitMemTransferInst(MemTransferInst &I);
709 
710 private:
711   void visitCASOrRMW(Align InstAlignment, Instruction &I);
712 
713   // Returns false when this is an invoke of a custom function.
714   bool visitWrappedCallBase(Function &F, CallBase &CB);
715 
716   // Combines origins for all of I's operands.
717   void visitInstOperandOrigins(Instruction &I);
718 
719   void addShadowArguments(Function &F, CallBase &CB, std::vector<Value *> &Args,
720                           IRBuilder<> &IRB);
721 
722   void addOriginArguments(Function &F, CallBase &CB, std::vector<Value *> &Args,
723                           IRBuilder<> &IRB);
724 };
725 
726 } // end anonymous namespace
727 
728 DataFlowSanitizer::DataFlowSanitizer(
729     const std::vector<std::string> &ABIListFiles) {
730   std::vector<std::string> AllABIListFiles(std::move(ABIListFiles));
731   llvm::append_range(AllABIListFiles, ClABIListFiles);
732   // FIXME: should we propagate vfs::FileSystem to this constructor?
733   ABIList.set(
734       SpecialCaseList::createOrDie(AllABIListFiles, *vfs::getRealFileSystem()));
735 }
736 
737 FunctionType *DataFlowSanitizer::getArgsFunctionType(FunctionType *T) {
738   SmallVector<Type *, 4> ArgTypes(T->param_begin(), T->param_end());
739   ArgTypes.append(T->getNumParams(), PrimitiveShadowTy);
740   if (T->isVarArg())
741     ArgTypes.push_back(PrimitiveShadowPtrTy);
742   Type *RetType = T->getReturnType();
743   if (!RetType->isVoidTy())
744     RetType = StructType::get(RetType, PrimitiveShadowTy);
745   return FunctionType::get(RetType, ArgTypes, T->isVarArg());
746 }
747 
748 FunctionType *DataFlowSanitizer::getTrampolineFunctionType(FunctionType *T) {
749   assert(!T->isVarArg());
750   SmallVector<Type *, 4> ArgTypes;
751   ArgTypes.push_back(T->getPointerTo());
752   ArgTypes.append(T->param_begin(), T->param_end());
753   ArgTypes.append(T->getNumParams(), PrimitiveShadowTy);
754   Type *RetType = T->getReturnType();
755   if (!RetType->isVoidTy())
756     ArgTypes.push_back(PrimitiveShadowPtrTy);
757 
758   if (shouldTrackOrigins()) {
759     ArgTypes.append(T->getNumParams(), OriginTy);
760     if (!RetType->isVoidTy())
761       ArgTypes.push_back(OriginPtrTy);
762   }
763 
764   return FunctionType::get(T->getReturnType(), ArgTypes, false);
765 }
766 
767 TransformedFunction DataFlowSanitizer::getCustomFunctionType(FunctionType *T) {
768   SmallVector<Type *, 4> ArgTypes;
769 
770   // Some parameters of the custom function being constructed are
771   // parameters of T.  Record the mapping from parameters of T to
772   // parameters of the custom function, so that parameter attributes
773   // at call sites can be updated.
774   std::vector<unsigned> ArgumentIndexMapping;
775   for (unsigned I = 0, E = T->getNumParams(); I != E; ++I) {
776     Type *ParamType = T->getParamType(I);
777     FunctionType *FT;
778     if (isa<PointerType>(ParamType) &&
779         (FT = dyn_cast<FunctionType>(ParamType->getPointerElementType()))) {
780       ArgumentIndexMapping.push_back(ArgTypes.size());
781       ArgTypes.push_back(getTrampolineFunctionType(FT)->getPointerTo());
782       ArgTypes.push_back(Type::getInt8PtrTy(*Ctx));
783     } else {
784       ArgumentIndexMapping.push_back(ArgTypes.size());
785       ArgTypes.push_back(ParamType);
786     }
787   }
788   for (unsigned I = 0, E = T->getNumParams(); I != E; ++I)
789     ArgTypes.push_back(PrimitiveShadowTy);
790   if (T->isVarArg())
791     ArgTypes.push_back(PrimitiveShadowPtrTy);
792   Type *RetType = T->getReturnType();
793   if (!RetType->isVoidTy())
794     ArgTypes.push_back(PrimitiveShadowPtrTy);
795 
796   if (shouldTrackOrigins()) {
797     for (unsigned I = 0, E = T->getNumParams(); I != E; ++I)
798       ArgTypes.push_back(OriginTy);
799     if (T->isVarArg())
800       ArgTypes.push_back(OriginPtrTy);
801     if (!RetType->isVoidTy())
802       ArgTypes.push_back(OriginPtrTy);
803   }
804 
805   return TransformedFunction(
806       T, FunctionType::get(T->getReturnType(), ArgTypes, T->isVarArg()),
807       ArgumentIndexMapping);
808 }
809 
810 bool DataFlowSanitizer::isZeroShadow(Value *V) {
811   if (!shouldTrackFieldsAndIndices())
812     return ZeroPrimitiveShadow == V;
813 
814   Type *T = V->getType();
815   if (!isa<ArrayType>(T) && !isa<StructType>(T)) {
816     if (const ConstantInt *CI = dyn_cast<ConstantInt>(V))
817       return CI->isZero();
818     return false;
819   }
820 
821   return isa<ConstantAggregateZero>(V);
822 }
823 
824 bool DataFlowSanitizer::shouldTrackOrigins() {
825   static const bool ShouldTrackOrigins =
826       ClTrackOrigins && getInstrumentedABI() == DataFlowSanitizer::IA_TLS &&
827       ClFast16Labels;
828   return ShouldTrackOrigins;
829 }
830 
831 bool DataFlowSanitizer::shouldTrackFieldsAndIndices() {
832   return getInstrumentedABI() == DataFlowSanitizer::IA_TLS && ClFast16Labels;
833 }
834 
835 Constant *DataFlowSanitizer::getZeroShadow(Type *OrigTy) {
836   if (!shouldTrackFieldsAndIndices())
837     return ZeroPrimitiveShadow;
838 
839   if (!isa<ArrayType>(OrigTy) && !isa<StructType>(OrigTy))
840     return ZeroPrimitiveShadow;
841   Type *ShadowTy = getShadowTy(OrigTy);
842   return ConstantAggregateZero::get(ShadowTy);
843 }
844 
845 Constant *DataFlowSanitizer::getZeroShadow(Value *V) {
846   return getZeroShadow(V->getType());
847 }
848 
849 static Value *expandFromPrimitiveShadowRecursive(
850     Value *Shadow, SmallVector<unsigned, 4> &Indices, Type *SubShadowTy,
851     Value *PrimitiveShadow, IRBuilder<> &IRB) {
852   if (!isa<ArrayType>(SubShadowTy) && !isa<StructType>(SubShadowTy))
853     return IRB.CreateInsertValue(Shadow, PrimitiveShadow, Indices);
854 
855   if (ArrayType *AT = dyn_cast<ArrayType>(SubShadowTy)) {
856     for (unsigned Idx = 0; Idx < AT->getNumElements(); Idx++) {
857       Indices.push_back(Idx);
858       Shadow = expandFromPrimitiveShadowRecursive(
859           Shadow, Indices, AT->getElementType(), PrimitiveShadow, IRB);
860       Indices.pop_back();
861     }
862     return Shadow;
863   }
864 
865   if (StructType *ST = dyn_cast<StructType>(SubShadowTy)) {
866     for (unsigned Idx = 0; Idx < ST->getNumElements(); Idx++) {
867       Indices.push_back(Idx);
868       Shadow = expandFromPrimitiveShadowRecursive(
869           Shadow, Indices, ST->getElementType(Idx), PrimitiveShadow, IRB);
870       Indices.pop_back();
871     }
872     return Shadow;
873   }
874   llvm_unreachable("Unexpected shadow type");
875 }
876 
877 bool DFSanFunction::shouldInstrumentWithCall() {
878   return ClInstrumentWithCallThreshold >= 0 &&
879          NumOriginStores >= ClInstrumentWithCallThreshold;
880 }
881 
882 Value *DFSanFunction::expandFromPrimitiveShadow(Type *T, Value *PrimitiveShadow,
883                                                 Instruction *Pos) {
884   Type *ShadowTy = DFS.getShadowTy(T);
885 
886   if (!isa<ArrayType>(ShadowTy) && !isa<StructType>(ShadowTy))
887     return PrimitiveShadow;
888 
889   if (DFS.isZeroShadow(PrimitiveShadow))
890     return DFS.getZeroShadow(ShadowTy);
891 
892   IRBuilder<> IRB(Pos);
893   SmallVector<unsigned, 4> Indices;
894   Value *Shadow = UndefValue::get(ShadowTy);
895   Shadow = expandFromPrimitiveShadowRecursive(Shadow, Indices, ShadowTy,
896                                               PrimitiveShadow, IRB);
897 
898   // Caches the primitive shadow value that built the shadow value.
899   CachedCollapsedShadows[Shadow] = PrimitiveShadow;
900   return Shadow;
901 }
902 
903 template <class AggregateType>
904 Value *DFSanFunction::collapseAggregateShadow(AggregateType *AT, Value *Shadow,
905                                               IRBuilder<> &IRB) {
906   if (!AT->getNumElements())
907     return DFS.ZeroPrimitiveShadow;
908 
909   Value *FirstItem = IRB.CreateExtractValue(Shadow, 0);
910   Value *Aggregator = collapseToPrimitiveShadow(FirstItem, IRB);
911 
912   for (unsigned Idx = 1; Idx < AT->getNumElements(); Idx++) {
913     Value *ShadowItem = IRB.CreateExtractValue(Shadow, Idx);
914     Value *ShadowInner = collapseToPrimitiveShadow(ShadowItem, IRB);
915     Aggregator = IRB.CreateOr(Aggregator, ShadowInner);
916   }
917   return Aggregator;
918 }
919 
920 Value *DFSanFunction::collapseToPrimitiveShadow(Value *Shadow,
921                                                 IRBuilder<> &IRB) {
922   Type *ShadowTy = Shadow->getType();
923   if (!isa<ArrayType>(ShadowTy) && !isa<StructType>(ShadowTy))
924     return Shadow;
925   if (ArrayType *AT = dyn_cast<ArrayType>(ShadowTy))
926     return collapseAggregateShadow<>(AT, Shadow, IRB);
927   if (StructType *ST = dyn_cast<StructType>(ShadowTy))
928     return collapseAggregateShadow<>(ST, Shadow, IRB);
929   llvm_unreachable("Unexpected shadow type");
930 }
931 
932 Value *DFSanFunction::collapseToPrimitiveShadow(Value *Shadow,
933                                                 Instruction *Pos) {
934   Type *ShadowTy = Shadow->getType();
935   if (!isa<ArrayType>(ShadowTy) && !isa<StructType>(ShadowTy))
936     return Shadow;
937 
938   assert(DFS.shouldTrackFieldsAndIndices());
939 
940   // Checks if the cached collapsed shadow value dominates Pos.
941   Value *&CS = CachedCollapsedShadows[Shadow];
942   if (CS && DT.dominates(CS, Pos))
943     return CS;
944 
945   IRBuilder<> IRB(Pos);
946   Value *PrimitiveShadow = collapseToPrimitiveShadow(Shadow, IRB);
947   // Caches the converted primitive shadow value.
948   CS = PrimitiveShadow;
949   return PrimitiveShadow;
950 }
951 
952 Type *DataFlowSanitizer::getShadowTy(Type *OrigTy) {
953   if (!shouldTrackFieldsAndIndices())
954     return PrimitiveShadowTy;
955 
956   if (!OrigTy->isSized())
957     return PrimitiveShadowTy;
958   if (isa<IntegerType>(OrigTy))
959     return PrimitiveShadowTy;
960   if (isa<VectorType>(OrigTy))
961     return PrimitiveShadowTy;
962   if (ArrayType *AT = dyn_cast<ArrayType>(OrigTy))
963     return ArrayType::get(getShadowTy(AT->getElementType()),
964                           AT->getNumElements());
965   if (StructType *ST = dyn_cast<StructType>(OrigTy)) {
966     SmallVector<Type *, 4> Elements;
967     for (unsigned I = 0, N = ST->getNumElements(); I < N; ++I)
968       Elements.push_back(getShadowTy(ST->getElementType(I)));
969     return StructType::get(*Ctx, Elements);
970   }
971   return PrimitiveShadowTy;
972 }
973 
974 Type *DataFlowSanitizer::getShadowTy(Value *V) {
975   return getShadowTy(V->getType());
976 }
977 
978 bool DataFlowSanitizer::init(Module &M) {
979   Triple TargetTriple(M.getTargetTriple());
980   const DataLayout &DL = M.getDataLayout();
981 
982   Mod = &M;
983   Ctx = &M.getContext();
984   Int8Ptr = Type::getInt8PtrTy(*Ctx);
985   OriginTy = IntegerType::get(*Ctx, OriginWidthBits);
986   OriginPtrTy = PointerType::getUnqual(OriginTy);
987   PrimitiveShadowTy = IntegerType::get(*Ctx, ShadowWidthBits);
988   PrimitiveShadowPtrTy = PointerType::getUnqual(PrimitiveShadowTy);
989   IntptrTy = DL.getIntPtrType(*Ctx);
990   ZeroPrimitiveShadow = ConstantInt::getSigned(PrimitiveShadowTy, 0);
991   ShadowPtrMul = ConstantInt::getSigned(IntptrTy, ShadowWidthBytes);
992   OriginBase = ConstantInt::get(IntptrTy, 0x200000000000LL);
993   ZeroOrigin = ConstantInt::getSigned(OriginTy, 0);
994 
995   switch (TargetTriple.getArch()) {
996   case Triple::x86_64:
997     ShadowPtrMask = ConstantInt::getSigned(IntptrTy, ~0x700000000000LL);
998     break;
999   case Triple::mips64:
1000   case Triple::mips64el:
1001     ShadowPtrMask = ConstantInt::getSigned(IntptrTy, ~0xF000000000LL);
1002     break;
1003   case Triple::aarch64:
1004   case Triple::aarch64_be:
1005     // AArch64 supports multiple VMAs and the shadow mask is set at runtime.
1006     DFSanRuntimeShadowMask = true;
1007     break;
1008   default:
1009     report_fatal_error("unsupported triple");
1010   }
1011 
1012   Type *DFSanUnionArgs[2] = {PrimitiveShadowTy, PrimitiveShadowTy};
1013   DFSanUnionFnTy =
1014       FunctionType::get(PrimitiveShadowTy, DFSanUnionArgs, /*isVarArg=*/false);
1015   Type *DFSanUnionLoadArgs[2] = {PrimitiveShadowPtrTy, IntptrTy};
1016   DFSanUnionLoadFnTy = FunctionType::get(PrimitiveShadowTy, DFSanUnionLoadArgs,
1017                                          /*isVarArg=*/false);
1018   Type *DFSanLoadLabelAndOriginArgs[2] = {Int8Ptr, IntptrTy};
1019   DFSanLoadLabelAndOriginFnTy =
1020       FunctionType::get(IntegerType::get(*Ctx, 64), DFSanLoadLabelAndOriginArgs,
1021                         /*isVarArg=*/false);
1022   DFSanUnimplementedFnTy = FunctionType::get(
1023       Type::getVoidTy(*Ctx), Type::getInt8PtrTy(*Ctx), /*isVarArg=*/false);
1024   Type *DFSanSetLabelArgs[4] = {PrimitiveShadowTy, OriginTy,
1025                                 Type::getInt8PtrTy(*Ctx), IntptrTy};
1026   DFSanSetLabelFnTy = FunctionType::get(Type::getVoidTy(*Ctx),
1027                                         DFSanSetLabelArgs, /*isVarArg=*/false);
1028   DFSanNonzeroLabelFnTy =
1029       FunctionType::get(Type::getVoidTy(*Ctx), None, /*isVarArg=*/false);
1030   DFSanVarargWrapperFnTy = FunctionType::get(
1031       Type::getVoidTy(*Ctx), Type::getInt8PtrTy(*Ctx), /*isVarArg=*/false);
1032   DFSanCmpCallbackFnTy =
1033       FunctionType::get(Type::getVoidTy(*Ctx), PrimitiveShadowTy,
1034                         /*isVarArg=*/false);
1035   DFSanChainOriginFnTy =
1036       FunctionType::get(OriginTy, OriginTy, /*isVarArg=*/false);
1037   Type *DFSanMaybeStoreOriginArgs[4] = {IntegerType::get(*Ctx, ShadowWidthBits),
1038                                         Int8Ptr, IntptrTy, OriginTy};
1039   DFSanMaybeStoreOriginFnTy = FunctionType::get(
1040       Type::getVoidTy(*Ctx), DFSanMaybeStoreOriginArgs, /*isVarArg=*/false);
1041   Type *DFSanMemOriginTransferArgs[3] = {Int8Ptr, Int8Ptr, IntptrTy};
1042   DFSanMemOriginTransferFnTy = FunctionType::get(
1043       Type::getVoidTy(*Ctx), DFSanMemOriginTransferArgs, /*isVarArg=*/false);
1044   Type *DFSanLoadStoreCallbackArgs[2] = {PrimitiveShadowTy, Int8Ptr};
1045   DFSanLoadStoreCallbackFnTy =
1046       FunctionType::get(Type::getVoidTy(*Ctx), DFSanLoadStoreCallbackArgs,
1047                         /*isVarArg=*/false);
1048   Type *DFSanMemTransferCallbackArgs[2] = {PrimitiveShadowPtrTy, IntptrTy};
1049   DFSanMemTransferCallbackFnTy =
1050       FunctionType::get(Type::getVoidTy(*Ctx), DFSanMemTransferCallbackArgs,
1051                         /*isVarArg=*/false);
1052 
1053   ColdCallWeights = MDBuilder(*Ctx).createBranchWeights(1, 1000);
1054   OriginStoreWeights = MDBuilder(*Ctx).createBranchWeights(1, 1000);
1055   return true;
1056 }
1057 
1058 bool DataFlowSanitizer::isInstrumented(const Function *F) {
1059   return !ABIList.isIn(*F, "uninstrumented");
1060 }
1061 
1062 bool DataFlowSanitizer::isInstrumented(const GlobalAlias *GA) {
1063   return !ABIList.isIn(*GA, "uninstrumented");
1064 }
1065 
1066 DataFlowSanitizer::InstrumentedABI DataFlowSanitizer::getInstrumentedABI() {
1067   return ClArgsABI ? IA_Args : IA_TLS;
1068 }
1069 
1070 DataFlowSanitizer::WrapperKind DataFlowSanitizer::getWrapperKind(Function *F) {
1071   if (ABIList.isIn(*F, "functional"))
1072     return WK_Functional;
1073   if (ABIList.isIn(*F, "discard"))
1074     return WK_Discard;
1075   if (ABIList.isIn(*F, "custom"))
1076     return WK_Custom;
1077 
1078   return WK_Warning;
1079 }
1080 
1081 void DataFlowSanitizer::addGlobalNamePrefix(GlobalValue *GV) {
1082   std::string GVName = std::string(GV->getName()), Prefix = "dfs$";
1083   GV->setName(Prefix + GVName);
1084 
1085   // Try to change the name of the function in module inline asm.  We only do
1086   // this for specific asm directives, currently only ".symver", to try to avoid
1087   // corrupting asm which happens to contain the symbol name as a substring.
1088   // Note that the substitution for .symver assumes that the versioned symbol
1089   // also has an instrumented name.
1090   std::string Asm = GV->getParent()->getModuleInlineAsm();
1091   std::string SearchStr = ".symver " + GVName + ",";
1092   size_t Pos = Asm.find(SearchStr);
1093   if (Pos != std::string::npos) {
1094     Asm.replace(Pos, SearchStr.size(),
1095                 ".symver " + Prefix + GVName + "," + Prefix);
1096     GV->getParent()->setModuleInlineAsm(Asm);
1097   }
1098 }
1099 
1100 Function *
1101 DataFlowSanitizer::buildWrapperFunction(Function *F, StringRef NewFName,
1102                                         GlobalValue::LinkageTypes NewFLink,
1103                                         FunctionType *NewFT) {
1104   FunctionType *FT = F->getFunctionType();
1105   Function *NewF = Function::Create(NewFT, NewFLink, F->getAddressSpace(),
1106                                     NewFName, F->getParent());
1107   NewF->copyAttributesFrom(F);
1108   NewF->removeAttributes(
1109       AttributeList::ReturnIndex,
1110       AttributeFuncs::typeIncompatible(NewFT->getReturnType()));
1111 
1112   BasicBlock *BB = BasicBlock::Create(*Ctx, "entry", NewF);
1113   if (F->isVarArg()) {
1114     NewF->removeAttributes(AttributeList::FunctionIndex,
1115                            AttrBuilder().addAttribute("split-stack"));
1116     CallInst::Create(DFSanVarargWrapperFn,
1117                      IRBuilder<>(BB).CreateGlobalStringPtr(F->getName()), "",
1118                      BB);
1119     new UnreachableInst(*Ctx, BB);
1120   } else {
1121     auto ArgIt = pointer_iterator<Argument *>(NewF->arg_begin());
1122     std::vector<Value *> Args(ArgIt, ArgIt + FT->getNumParams());
1123 
1124     CallInst *CI = CallInst::Create(F, Args, "", BB);
1125     if (FT->getReturnType()->isVoidTy())
1126       ReturnInst::Create(*Ctx, BB);
1127     else
1128       ReturnInst::Create(*Ctx, CI, BB);
1129   }
1130 
1131   return NewF;
1132 }
1133 
1134 Constant *DataFlowSanitizer::getOrBuildTrampolineFunction(FunctionType *FT,
1135                                                           StringRef FName) {
1136   FunctionType *FTT = getTrampolineFunctionType(FT);
1137   FunctionCallee C = Mod->getOrInsertFunction(FName, FTT);
1138   Function *F = dyn_cast<Function>(C.getCallee());
1139   if (F && F->isDeclaration()) {
1140     F->setLinkage(GlobalValue::LinkOnceODRLinkage);
1141     BasicBlock *BB = BasicBlock::Create(*Ctx, "entry", F);
1142     std::vector<Value *> Args;
1143     Function::arg_iterator AI = F->arg_begin() + 1;
1144     for (unsigned N = FT->getNumParams(); N != 0; ++AI, --N)
1145       Args.push_back(&*AI);
1146     CallInst *CI = CallInst::Create(FT, &*F->arg_begin(), Args, "", BB);
1147     Type *RetType = FT->getReturnType();
1148     ReturnInst *RI = RetType->isVoidTy() ? ReturnInst::Create(*Ctx, BB)
1149                                          : ReturnInst::Create(*Ctx, CI, BB);
1150 
1151     // F is called by a wrapped custom function with primitive shadows. So
1152     // its arguments and return value need conversion.
1153     DFSanFunction DFSF(*this, F, /*IsNativeABI=*/true);
1154     Function::arg_iterator ValAI = F->arg_begin(), ShadowAI = AI;
1155     ++ValAI;
1156     for (unsigned N = FT->getNumParams(); N != 0; ++ValAI, ++ShadowAI, --N) {
1157       Value *Shadow =
1158           DFSF.expandFromPrimitiveShadow(ValAI->getType(), &*ShadowAI, CI);
1159       DFSF.ValShadowMap[&*ValAI] = Shadow;
1160     }
1161     Function::arg_iterator RetShadowAI = ShadowAI;
1162     const bool ShouldTrackOrigins = shouldTrackOrigins();
1163     if (ShouldTrackOrigins) {
1164       ValAI = F->arg_begin();
1165       ++ValAI;
1166       Function::arg_iterator OriginAI = ShadowAI;
1167       if (!RetType->isVoidTy())
1168         ++OriginAI;
1169       for (unsigned N = FT->getNumParams(); N != 0; ++ValAI, ++OriginAI, --N) {
1170         DFSF.ValOriginMap[&*ValAI] = &*OriginAI;
1171       }
1172     }
1173     DFSanVisitor(DFSF).visitCallInst(*CI);
1174     if (!RetType->isVoidTy()) {
1175       Value *PrimitiveShadow = DFSF.collapseToPrimitiveShadow(
1176           DFSF.getShadow(RI->getReturnValue()), RI);
1177       new StoreInst(PrimitiveShadow, &*RetShadowAI, RI);
1178       if (ShouldTrackOrigins) {
1179         Value *Origin = DFSF.getOrigin(RI->getReturnValue());
1180         new StoreInst(Origin, &*std::prev(F->arg_end()), RI);
1181       }
1182     }
1183   }
1184 
1185   return cast<Constant>(C.getCallee());
1186 }
1187 
1188 // Initialize DataFlowSanitizer runtime functions and declare them in the module
1189 void DataFlowSanitizer::initializeRuntimeFunctions(Module &M) {
1190   {
1191     AttributeList AL;
1192     AL = AL.addAttribute(M.getContext(), AttributeList::FunctionIndex,
1193                          Attribute::NoUnwind);
1194     AL = AL.addAttribute(M.getContext(), AttributeList::FunctionIndex,
1195                          Attribute::ReadNone);
1196     AL = AL.addAttribute(M.getContext(), AttributeList::ReturnIndex,
1197                          Attribute::ZExt);
1198     AL = AL.addParamAttribute(M.getContext(), 0, Attribute::ZExt);
1199     AL = AL.addParamAttribute(M.getContext(), 1, Attribute::ZExt);
1200     DFSanUnionFn =
1201         Mod->getOrInsertFunction("__dfsan_union", DFSanUnionFnTy, AL);
1202   }
1203   {
1204     AttributeList AL;
1205     AL = AL.addAttribute(M.getContext(), AttributeList::FunctionIndex,
1206                          Attribute::NoUnwind);
1207     AL = AL.addAttribute(M.getContext(), AttributeList::FunctionIndex,
1208                          Attribute::ReadNone);
1209     AL = AL.addAttribute(M.getContext(), AttributeList::ReturnIndex,
1210                          Attribute::ZExt);
1211     AL = AL.addParamAttribute(M.getContext(), 0, Attribute::ZExt);
1212     AL = AL.addParamAttribute(M.getContext(), 1, Attribute::ZExt);
1213     DFSanCheckedUnionFn =
1214         Mod->getOrInsertFunction("dfsan_union", DFSanUnionFnTy, AL);
1215   }
1216   {
1217     AttributeList AL;
1218     AL = AL.addAttribute(M.getContext(), AttributeList::FunctionIndex,
1219                          Attribute::NoUnwind);
1220     AL = AL.addAttribute(M.getContext(), AttributeList::FunctionIndex,
1221                          Attribute::ReadOnly);
1222     AL = AL.addAttribute(M.getContext(), AttributeList::ReturnIndex,
1223                          Attribute::ZExt);
1224     DFSanUnionLoadFn =
1225         Mod->getOrInsertFunction("__dfsan_union_load", DFSanUnionLoadFnTy, AL);
1226   }
1227   {
1228     AttributeList AL;
1229     AL = AL.addAttribute(M.getContext(), AttributeList::FunctionIndex,
1230                          Attribute::NoUnwind);
1231     AL = AL.addAttribute(M.getContext(), AttributeList::FunctionIndex,
1232                          Attribute::ReadOnly);
1233     AL = AL.addAttribute(M.getContext(), AttributeList::ReturnIndex,
1234                          Attribute::ZExt);
1235     DFSanUnionLoadFast16LabelsFn = Mod->getOrInsertFunction(
1236         "__dfsan_union_load_fast16labels", DFSanUnionLoadFnTy, AL);
1237   }
1238   {
1239     AttributeList AL;
1240     AL = AL.addAttribute(M.getContext(), AttributeList::FunctionIndex,
1241                          Attribute::NoUnwind);
1242     AL = AL.addAttribute(M.getContext(), AttributeList::FunctionIndex,
1243                          Attribute::ReadOnly);
1244     AL = AL.addAttribute(M.getContext(), AttributeList::ReturnIndex,
1245                          Attribute::ZExt);
1246     DFSanLoadLabelAndOriginFn = Mod->getOrInsertFunction(
1247         "__dfsan_load_label_and_origin", DFSanLoadLabelAndOriginFnTy, AL);
1248   }
1249   DFSanUnimplementedFn =
1250       Mod->getOrInsertFunction("__dfsan_unimplemented", DFSanUnimplementedFnTy);
1251   {
1252     AttributeList AL;
1253     AL = AL.addParamAttribute(M.getContext(), 0, Attribute::ZExt);
1254     AL = AL.addParamAttribute(M.getContext(), 1, Attribute::ZExt);
1255     DFSanSetLabelFn =
1256         Mod->getOrInsertFunction("__dfsan_set_label", DFSanSetLabelFnTy, AL);
1257   }
1258   DFSanNonzeroLabelFn =
1259       Mod->getOrInsertFunction("__dfsan_nonzero_label", DFSanNonzeroLabelFnTy);
1260   DFSanVarargWrapperFn = Mod->getOrInsertFunction("__dfsan_vararg_wrapper",
1261                                                   DFSanVarargWrapperFnTy);
1262   {
1263     AttributeList AL;
1264     AL = AL.addParamAttribute(M.getContext(), 0, Attribute::ZExt);
1265     AL = AL.addAttribute(M.getContext(), AttributeList::ReturnIndex,
1266                          Attribute::ZExt);
1267     DFSanChainOriginFn = Mod->getOrInsertFunction("__dfsan_chain_origin",
1268                                                   DFSanChainOriginFnTy, AL);
1269   }
1270   DFSanMemOriginTransferFn = Mod->getOrInsertFunction(
1271       "__dfsan_mem_origin_transfer", DFSanMemOriginTransferFnTy);
1272 
1273   {
1274     AttributeList AL;
1275     AL = AL.addParamAttribute(M.getContext(), 0, Attribute::ZExt);
1276     AL = AL.addParamAttribute(M.getContext(), 3, Attribute::ZExt);
1277     DFSanMaybeStoreOriginFn = Mod->getOrInsertFunction(
1278         "__dfsan_maybe_store_origin", DFSanMaybeStoreOriginFnTy, AL);
1279   }
1280 
1281   DFSanRuntimeFunctions.insert(DFSanUnionFn.getCallee()->stripPointerCasts());
1282   DFSanRuntimeFunctions.insert(
1283       DFSanCheckedUnionFn.getCallee()->stripPointerCasts());
1284   DFSanRuntimeFunctions.insert(
1285       DFSanUnionLoadFn.getCallee()->stripPointerCasts());
1286   DFSanRuntimeFunctions.insert(
1287       DFSanUnionLoadFast16LabelsFn.getCallee()->stripPointerCasts());
1288   DFSanRuntimeFunctions.insert(
1289       DFSanLoadLabelAndOriginFn.getCallee()->stripPointerCasts());
1290   DFSanRuntimeFunctions.insert(
1291       DFSanUnimplementedFn.getCallee()->stripPointerCasts());
1292   DFSanRuntimeFunctions.insert(
1293       DFSanSetLabelFn.getCallee()->stripPointerCasts());
1294   DFSanRuntimeFunctions.insert(
1295       DFSanNonzeroLabelFn.getCallee()->stripPointerCasts());
1296   DFSanRuntimeFunctions.insert(
1297       DFSanVarargWrapperFn.getCallee()->stripPointerCasts());
1298   DFSanRuntimeFunctions.insert(
1299       DFSanLoadCallbackFn.getCallee()->stripPointerCasts());
1300   DFSanRuntimeFunctions.insert(
1301       DFSanStoreCallbackFn.getCallee()->stripPointerCasts());
1302   DFSanRuntimeFunctions.insert(
1303       DFSanMemTransferCallbackFn.getCallee()->stripPointerCasts());
1304   DFSanRuntimeFunctions.insert(
1305       DFSanCmpCallbackFn.getCallee()->stripPointerCasts());
1306   DFSanRuntimeFunctions.insert(
1307       DFSanChainOriginFn.getCallee()->stripPointerCasts());
1308   DFSanRuntimeFunctions.insert(
1309       DFSanMemOriginTransferFn.getCallee()->stripPointerCasts());
1310   DFSanRuntimeFunctions.insert(
1311       DFSanMaybeStoreOriginFn.getCallee()->stripPointerCasts());
1312 }
1313 
1314 // Initializes event callback functions and declare them in the module
1315 void DataFlowSanitizer::initializeCallbackFunctions(Module &M) {
1316   DFSanLoadCallbackFn = Mod->getOrInsertFunction("__dfsan_load_callback",
1317                                                  DFSanLoadStoreCallbackFnTy);
1318   DFSanStoreCallbackFn = Mod->getOrInsertFunction("__dfsan_store_callback",
1319                                                   DFSanLoadStoreCallbackFnTy);
1320   DFSanMemTransferCallbackFn = Mod->getOrInsertFunction(
1321       "__dfsan_mem_transfer_callback", DFSanMemTransferCallbackFnTy);
1322   DFSanCmpCallbackFn =
1323       Mod->getOrInsertFunction("__dfsan_cmp_callback", DFSanCmpCallbackFnTy);
1324 }
1325 
1326 void DataFlowSanitizer::injectMetadataGlobals(Module &M) {
1327   // These variables can be used:
1328   // - by the runtime (to discover what the shadow width was, during
1329   //   compilation)
1330   // - in testing (to avoid hardcoding the shadow width and type but instead
1331   //   extract them by pattern matching)
1332   Type *IntTy = Type::getInt32Ty(*Ctx);
1333   (void)Mod->getOrInsertGlobal("__dfsan_shadow_width_bits", IntTy, [&] {
1334     return new GlobalVariable(
1335         M, IntTy, /*isConstant=*/true, GlobalValue::WeakODRLinkage,
1336         ConstantInt::get(IntTy, ShadowWidthBits), "__dfsan_shadow_width_bits");
1337   });
1338   (void)Mod->getOrInsertGlobal("__dfsan_shadow_width_bytes", IntTy, [&] {
1339     return new GlobalVariable(M, IntTy, /*isConstant=*/true,
1340                               GlobalValue::WeakODRLinkage,
1341                               ConstantInt::get(IntTy, ShadowWidthBytes),
1342                               "__dfsan_shadow_width_bytes");
1343   });
1344 }
1345 
1346 bool DataFlowSanitizer::runImpl(Module &M) {
1347   init(M);
1348 
1349   if (ABIList.isIn(M, "skip"))
1350     return false;
1351 
1352   const unsigned InitialGlobalSize = M.global_size();
1353   const unsigned InitialModuleSize = M.size();
1354 
1355   bool Changed = false;
1356 
1357   auto GetOrInsertGlobal = [this, &Changed](StringRef Name,
1358                                             Type *Ty) -> Constant * {
1359     Constant *C = Mod->getOrInsertGlobal(Name, Ty);
1360     if (GlobalVariable *G = dyn_cast<GlobalVariable>(C)) {
1361       Changed |= G->getThreadLocalMode() != GlobalVariable::InitialExecTLSModel;
1362       G->setThreadLocalMode(GlobalVariable::InitialExecTLSModel);
1363     }
1364     return C;
1365   };
1366 
1367   // These globals must be kept in sync with the ones in dfsan.cpp.
1368   ArgTLS =
1369       GetOrInsertGlobal("__dfsan_arg_tls",
1370                         ArrayType::get(Type::getInt64Ty(*Ctx), ArgTLSSize / 8));
1371   RetvalTLS = GetOrInsertGlobal(
1372       "__dfsan_retval_tls",
1373       ArrayType::get(Type::getInt64Ty(*Ctx), RetvalTLSSize / 8));
1374   ArgOriginTLSTy = ArrayType::get(OriginTy, NumOfElementsInArgOrgTLS);
1375   ArgOriginTLS = GetOrInsertGlobal("__dfsan_arg_origin_tls", ArgOriginTLSTy);
1376   RetvalOriginTLS = GetOrInsertGlobal("__dfsan_retval_origin_tls", OriginTy);
1377 
1378   (void)Mod->getOrInsertGlobal("__dfsan_track_origins", OriginTy, [&] {
1379     Changed = true;
1380     return new GlobalVariable(
1381         M, OriginTy, true, GlobalValue::WeakODRLinkage,
1382         ConstantInt::getSigned(OriginTy, shouldTrackOrigins()),
1383         "__dfsan_track_origins");
1384   });
1385 
1386   injectMetadataGlobals(M);
1387 
1388   ExternalShadowMask =
1389       Mod->getOrInsertGlobal(DFSanExternShadowPtrMask, IntptrTy);
1390 
1391   initializeCallbackFunctions(M);
1392   initializeRuntimeFunctions(M);
1393 
1394   std::vector<Function *> FnsToInstrument;
1395   SmallPtrSet<Function *, 2> FnsWithNativeABI;
1396   for (Function &F : M)
1397     if (!F.isIntrinsic() && !DFSanRuntimeFunctions.contains(&F))
1398       FnsToInstrument.push_back(&F);
1399 
1400   // Give function aliases prefixes when necessary, and build wrappers where the
1401   // instrumentedness is inconsistent.
1402   for (Module::alias_iterator AI = M.alias_begin(), AE = M.alias_end();
1403        AI != AE;) {
1404     GlobalAlias *GA = &*AI;
1405     ++AI;
1406     // Don't stop on weak.  We assume people aren't playing games with the
1407     // instrumentedness of overridden weak aliases.
1408     auto *F = dyn_cast<Function>(GA->getBaseObject());
1409     if (!F)
1410       continue;
1411 
1412     bool GAInst = isInstrumented(GA), FInst = isInstrumented(F);
1413     if (GAInst && FInst) {
1414       addGlobalNamePrefix(GA);
1415     } else if (GAInst != FInst) {
1416       // Non-instrumented alias of an instrumented function, or vice versa.
1417       // Replace the alias with a native-ABI wrapper of the aliasee.  The pass
1418       // below will take care of instrumenting it.
1419       Function *NewF =
1420           buildWrapperFunction(F, "", GA->getLinkage(), F->getFunctionType());
1421       GA->replaceAllUsesWith(ConstantExpr::getBitCast(NewF, GA->getType()));
1422       NewF->takeName(GA);
1423       GA->eraseFromParent();
1424       FnsToInstrument.push_back(NewF);
1425     }
1426   }
1427 
1428   ReadOnlyNoneAttrs.addAttribute(Attribute::ReadOnly)
1429       .addAttribute(Attribute::ReadNone);
1430 
1431   // First, change the ABI of every function in the module.  ABI-listed
1432   // functions keep their original ABI and get a wrapper function.
1433   for (std::vector<Function *>::iterator FI = FnsToInstrument.begin(),
1434                                          FE = FnsToInstrument.end();
1435        FI != FE; ++FI) {
1436     Function &F = **FI;
1437     FunctionType *FT = F.getFunctionType();
1438 
1439     bool IsZeroArgsVoidRet = (FT->getNumParams() == 0 && !FT->isVarArg() &&
1440                               FT->getReturnType()->isVoidTy());
1441 
1442     if (isInstrumented(&F)) {
1443       // Instrumented functions get a 'dfs$' prefix.  This allows us to more
1444       // easily identify cases of mismatching ABIs.
1445       if (getInstrumentedABI() == IA_Args && !IsZeroArgsVoidRet) {
1446         FunctionType *NewFT = getArgsFunctionType(FT);
1447         Function *NewF = Function::Create(NewFT, F.getLinkage(),
1448                                           F.getAddressSpace(), "", &M);
1449         NewF->copyAttributesFrom(&F);
1450         NewF->removeAttributes(
1451             AttributeList::ReturnIndex,
1452             AttributeFuncs::typeIncompatible(NewFT->getReturnType()));
1453         for (Function::arg_iterator FArg = F.arg_begin(),
1454                                     NewFArg = NewF->arg_begin(),
1455                                     FArgEnd = F.arg_end();
1456              FArg != FArgEnd; ++FArg, ++NewFArg) {
1457           FArg->replaceAllUsesWith(&*NewFArg);
1458         }
1459         NewF->getBasicBlockList().splice(NewF->begin(), F.getBasicBlockList());
1460 
1461         for (Function::user_iterator UI = F.user_begin(), UE = F.user_end();
1462              UI != UE;) {
1463           BlockAddress *BA = dyn_cast<BlockAddress>(*UI);
1464           ++UI;
1465           if (BA) {
1466             BA->replaceAllUsesWith(
1467                 BlockAddress::get(NewF, BA->getBasicBlock()));
1468             delete BA;
1469           }
1470         }
1471         F.replaceAllUsesWith(
1472             ConstantExpr::getBitCast(NewF, PointerType::getUnqual(FT)));
1473         NewF->takeName(&F);
1474         F.eraseFromParent();
1475         *FI = NewF;
1476         addGlobalNamePrefix(NewF);
1477       } else {
1478         addGlobalNamePrefix(&F);
1479       }
1480     } else if (!IsZeroArgsVoidRet || getWrapperKind(&F) == WK_Custom) {
1481       // Build a wrapper function for F.  The wrapper simply calls F, and is
1482       // added to FnsToInstrument so that any instrumentation according to its
1483       // WrapperKind is done in the second pass below.
1484       FunctionType *NewFT =
1485           getInstrumentedABI() == IA_Args ? getArgsFunctionType(FT) : FT;
1486 
1487       // If the function being wrapped has local linkage, then preserve the
1488       // function's linkage in the wrapper function.
1489       GlobalValue::LinkageTypes WrapperLinkage =
1490           F.hasLocalLinkage() ? F.getLinkage()
1491                               : GlobalValue::LinkOnceODRLinkage;
1492 
1493       Function *NewF = buildWrapperFunction(
1494           &F,
1495           (shouldTrackOrigins() ? std::string("dfso$") : std::string("dfsw$")) +
1496               std::string(F.getName()),
1497           WrapperLinkage, NewFT);
1498       if (getInstrumentedABI() == IA_TLS)
1499         NewF->removeAttributes(AttributeList::FunctionIndex, ReadOnlyNoneAttrs);
1500 
1501       Value *WrappedFnCst =
1502           ConstantExpr::getBitCast(NewF, PointerType::getUnqual(FT));
1503       F.replaceAllUsesWith(WrappedFnCst);
1504 
1505       UnwrappedFnMap[WrappedFnCst] = &F;
1506       *FI = NewF;
1507 
1508       if (!F.isDeclaration()) {
1509         // This function is probably defining an interposition of an
1510         // uninstrumented function and hence needs to keep the original ABI.
1511         // But any functions it may call need to use the instrumented ABI, so
1512         // we instrument it in a mode which preserves the original ABI.
1513         FnsWithNativeABI.insert(&F);
1514 
1515         // This code needs to rebuild the iterators, as they may be invalidated
1516         // by the push_back, taking care that the new range does not include
1517         // any functions added by this code.
1518         size_t N = FI - FnsToInstrument.begin(),
1519                Count = FE - FnsToInstrument.begin();
1520         FnsToInstrument.push_back(&F);
1521         FI = FnsToInstrument.begin() + N;
1522         FE = FnsToInstrument.begin() + Count;
1523       }
1524       // Hopefully, nobody will try to indirectly call a vararg
1525       // function... yet.
1526     } else if (FT->isVarArg()) {
1527       UnwrappedFnMap[&F] = &F;
1528       *FI = nullptr;
1529     }
1530   }
1531 
1532   for (Function *F : FnsToInstrument) {
1533     if (!F || F->isDeclaration())
1534       continue;
1535 
1536     removeUnreachableBlocks(*F);
1537 
1538     DFSanFunction DFSF(*this, F, FnsWithNativeABI.count(F));
1539 
1540     // DFSanVisitor may create new basic blocks, which confuses df_iterator.
1541     // Build a copy of the list before iterating over it.
1542     SmallVector<BasicBlock *, 4> BBList(depth_first(&F->getEntryBlock()));
1543 
1544     for (BasicBlock *BB : BBList) {
1545       Instruction *Inst = &BB->front();
1546       while (true) {
1547         // DFSanVisitor may split the current basic block, changing the current
1548         // instruction's next pointer and moving the next instruction to the
1549         // tail block from which we should continue.
1550         Instruction *Next = Inst->getNextNode();
1551         // DFSanVisitor may delete Inst, so keep track of whether it was a
1552         // terminator.
1553         bool IsTerminator = Inst->isTerminator();
1554         if (!DFSF.SkipInsts.count(Inst))
1555           DFSanVisitor(DFSF).visit(Inst);
1556         if (IsTerminator)
1557           break;
1558         Inst = Next;
1559       }
1560     }
1561 
1562     // We will not necessarily be able to compute the shadow for every phi node
1563     // until we have visited every block.  Therefore, the code that handles phi
1564     // nodes adds them to the PHIFixups list so that they can be properly
1565     // handled here.
1566     for (auto PHIFixup : DFSF.PHIFixups) {
1567       PHINode *PN, *ShadowPN;
1568       std::tie(PN, ShadowPN) = PHIFixup;
1569       for (unsigned Val = 0, N = PN->getNumIncomingValues(); Val < N; ++Val) {
1570         ShadowPN->setIncomingValue(Val,
1571                                    DFSF.getShadow(PN->getIncomingValue(Val)));
1572       }
1573     }
1574 
1575     // -dfsan-debug-nonzero-labels will split the CFG in all kinds of crazy
1576     // places (i.e. instructions in basic blocks we haven't even begun visiting
1577     // yet).  To make our life easier, do this work in a pass after the main
1578     // instrumentation.
1579     if (ClDebugNonzeroLabels) {
1580       for (Value *V : DFSF.NonZeroChecks) {
1581         Instruction *Pos;
1582         if (Instruction *I = dyn_cast<Instruction>(V))
1583           Pos = I->getNextNode();
1584         else
1585           Pos = &DFSF.F->getEntryBlock().front();
1586         while (isa<PHINode>(Pos) || isa<AllocaInst>(Pos))
1587           Pos = Pos->getNextNode();
1588         IRBuilder<> IRB(Pos);
1589         Value *PrimitiveShadow = DFSF.collapseToPrimitiveShadow(V, Pos);
1590         Value *Ne =
1591             IRB.CreateICmpNE(PrimitiveShadow, DFSF.DFS.ZeroPrimitiveShadow);
1592         BranchInst *BI = cast<BranchInst>(SplitBlockAndInsertIfThen(
1593             Ne, Pos, /*Unreachable=*/false, ColdCallWeights));
1594         IRBuilder<> ThenIRB(BI);
1595         ThenIRB.CreateCall(DFSF.DFS.DFSanNonzeroLabelFn, {});
1596       }
1597     }
1598   }
1599 
1600   return Changed || !FnsToInstrument.empty() ||
1601          M.global_size() != InitialGlobalSize || M.size() != InitialModuleSize;
1602 }
1603 
1604 Value *DFSanFunction::getArgTLS(Type *T, unsigned ArgOffset, IRBuilder<> &IRB) {
1605   Value *Base = IRB.CreatePointerCast(DFS.ArgTLS, DFS.IntptrTy);
1606   if (ArgOffset)
1607     Base = IRB.CreateAdd(Base, ConstantInt::get(DFS.IntptrTy, ArgOffset));
1608   return IRB.CreateIntToPtr(Base, PointerType::get(DFS.getShadowTy(T), 0),
1609                             "_dfsarg");
1610 }
1611 
1612 Value *DFSanFunction::getRetvalTLS(Type *T, IRBuilder<> &IRB) {
1613   return IRB.CreatePointerCast(
1614       DFS.RetvalTLS, PointerType::get(DFS.getShadowTy(T), 0), "_dfsret");
1615 }
1616 
1617 Value *DFSanFunction::getRetvalOriginTLS() { return DFS.RetvalOriginTLS; }
1618 
1619 Value *DFSanFunction::getArgOriginTLS(unsigned ArgNo, IRBuilder<> &IRB) {
1620   return IRB.CreateConstGEP2_64(DFS.ArgOriginTLSTy, DFS.ArgOriginTLS, 0, ArgNo,
1621                                 "_dfsarg_o");
1622 }
1623 
1624 Value *DFSanFunction::getOrigin(Value *V) {
1625   assert(DFS.shouldTrackOrigins());
1626   if (!isa<Argument>(V) && !isa<Instruction>(V))
1627     return DFS.ZeroOrigin;
1628   Value *&Origin = ValOriginMap[V];
1629   if (!Origin) {
1630     if (Argument *A = dyn_cast<Argument>(V)) {
1631       if (IsNativeABI)
1632         return DFS.ZeroOrigin;
1633       switch (IA) {
1634       case DataFlowSanitizer::IA_TLS: {
1635         if (A->getArgNo() < DFS.NumOfElementsInArgOrgTLS) {
1636           Instruction *ArgOriginTLSPos = &*F->getEntryBlock().begin();
1637           IRBuilder<> IRB(ArgOriginTLSPos);
1638           Value *ArgOriginPtr = getArgOriginTLS(A->getArgNo(), IRB);
1639           Origin = IRB.CreateLoad(DFS.OriginTy, ArgOriginPtr);
1640         } else {
1641           // Overflow
1642           Origin = DFS.ZeroOrigin;
1643         }
1644         break;
1645       }
1646       case DataFlowSanitizer::IA_Args: {
1647         Origin = DFS.ZeroOrigin;
1648         break;
1649       }
1650       }
1651     } else {
1652       Origin = DFS.ZeroOrigin;
1653     }
1654   }
1655   return Origin;
1656 }
1657 
1658 void DFSanFunction::setOrigin(Instruction *I, Value *Origin) {
1659   if (!DFS.shouldTrackOrigins())
1660     return;
1661   assert(!ValOriginMap.count(I));
1662   assert(Origin->getType() == DFS.OriginTy);
1663   ValOriginMap[I] = Origin;
1664 }
1665 
1666 Value *DFSanFunction::getShadowForTLSArgument(Argument *A) {
1667   unsigned ArgOffset = 0;
1668   const DataLayout &DL = F->getParent()->getDataLayout();
1669   for (auto &FArg : F->args()) {
1670     if (!FArg.getType()->isSized()) {
1671       if (A == &FArg)
1672         break;
1673       continue;
1674     }
1675 
1676     unsigned Size = DL.getTypeAllocSize(DFS.getShadowTy(&FArg));
1677     if (A != &FArg) {
1678       ArgOffset += alignTo(Size, ShadowTLSAlignment);
1679       if (ArgOffset > ArgTLSSize)
1680         break; // ArgTLS overflows, uses a zero shadow.
1681       continue;
1682     }
1683 
1684     if (ArgOffset + Size > ArgTLSSize)
1685       break; // ArgTLS overflows, uses a zero shadow.
1686 
1687     Instruction *ArgTLSPos = &*F->getEntryBlock().begin();
1688     IRBuilder<> IRB(ArgTLSPos);
1689     Value *ArgShadowPtr = getArgTLS(FArg.getType(), ArgOffset, IRB);
1690     return IRB.CreateAlignedLoad(DFS.getShadowTy(&FArg), ArgShadowPtr,
1691                                  ShadowTLSAlignment);
1692   }
1693 
1694   return DFS.getZeroShadow(A);
1695 }
1696 
1697 Value *DFSanFunction::getShadow(Value *V) {
1698   if (!isa<Argument>(V) && !isa<Instruction>(V))
1699     return DFS.getZeroShadow(V);
1700   Value *&Shadow = ValShadowMap[V];
1701   if (!Shadow) {
1702     if (Argument *A = dyn_cast<Argument>(V)) {
1703       if (IsNativeABI)
1704         return DFS.getZeroShadow(V);
1705       switch (IA) {
1706       case DataFlowSanitizer::IA_TLS: {
1707         Shadow = getShadowForTLSArgument(A);
1708         break;
1709       }
1710       case DataFlowSanitizer::IA_Args: {
1711         unsigned ArgIdx = A->getArgNo() + F->arg_size() / 2;
1712         Function::arg_iterator Arg = F->arg_begin();
1713         std::advance(Arg, ArgIdx);
1714         Shadow = &*Arg;
1715         assert(Shadow->getType() == DFS.PrimitiveShadowTy);
1716         break;
1717       }
1718       }
1719       NonZeroChecks.push_back(Shadow);
1720     } else {
1721       Shadow = DFS.getZeroShadow(V);
1722     }
1723   }
1724   return Shadow;
1725 }
1726 
1727 void DFSanFunction::setShadow(Instruction *I, Value *Shadow) {
1728   assert(!ValShadowMap.count(I));
1729   assert(DFS.shouldTrackFieldsAndIndices() ||
1730          Shadow->getType() == DFS.PrimitiveShadowTy);
1731   ValShadowMap[I] = Shadow;
1732 }
1733 
1734 Value *DataFlowSanitizer::getShadowOffset(Value *Addr, IRBuilder<> &IRB) {
1735   // Returns Addr & shadow_mask
1736   assert(Addr != RetvalTLS && "Reinstrumenting?");
1737   Value *ShadowPtrMaskValue;
1738   if (DFSanRuntimeShadowMask)
1739     ShadowPtrMaskValue = IRB.CreateLoad(IntptrTy, ExternalShadowMask);
1740   else
1741     ShadowPtrMaskValue = ShadowPtrMask;
1742   return IRB.CreateAnd(IRB.CreatePtrToInt(Addr, IntptrTy),
1743                        IRB.CreatePtrToInt(ShadowPtrMaskValue, IntptrTy));
1744 }
1745 
1746 std::pair<Value *, Value *>
1747 DataFlowSanitizer::getShadowOriginAddress(Value *Addr, Align InstAlignment,
1748                                           Instruction *Pos) {
1749   // Returns ((Addr & shadow_mask) + origin_base) & ~4UL
1750   IRBuilder<> IRB(Pos);
1751   Value *ShadowOffset = getShadowOffset(Addr, IRB);
1752   Value *ShadowPtr = IRB.CreateIntToPtr(
1753       IRB.CreateMul(ShadowOffset, ShadowPtrMul), PrimitiveShadowPtrTy);
1754   Value *OriginPtr = nullptr;
1755   if (shouldTrackOrigins()) {
1756     Value *OriginLong = IRB.CreateAdd(ShadowOffset, OriginBase);
1757     const Align Alignment = llvm::assumeAligned(InstAlignment.value());
1758     // When alignment is >= 4, Addr must be aligned to 4, otherwise it is UB.
1759     // So Mask is unnecessary.
1760     if (Alignment < MinOriginAlignment) {
1761       uint64_t Mask = MinOriginAlignment.value() - 1;
1762       OriginLong = IRB.CreateAnd(OriginLong, ConstantInt::get(IntptrTy, ~Mask));
1763     }
1764     OriginPtr = IRB.CreateIntToPtr(OriginLong, OriginPtrTy);
1765   }
1766   return {ShadowPtr, OriginPtr};
1767 }
1768 
1769 Value *DataFlowSanitizer::getShadowAddress(Value *Addr, Instruction *Pos) {
1770   // Returns (Addr & shadow_mask) x 2
1771   IRBuilder<> IRB(Pos);
1772   Value *ShadowOffset = getShadowOffset(Addr, IRB);
1773   return IRB.CreateIntToPtr(IRB.CreateMul(ShadowOffset, ShadowPtrMul),
1774                             PrimitiveShadowPtrTy);
1775 }
1776 
1777 Value *DFSanFunction::combineShadowsThenConvert(Type *T, Value *V1, Value *V2,
1778                                                 Instruction *Pos) {
1779   Value *PrimitiveValue = combineShadows(V1, V2, Pos);
1780   return expandFromPrimitiveShadow(T, PrimitiveValue, Pos);
1781 }
1782 
1783 // Generates IR to compute the union of the two given shadows, inserting it
1784 // before Pos. The combined value is with primitive type.
1785 Value *DFSanFunction::combineShadows(Value *V1, Value *V2, Instruction *Pos) {
1786   if (DFS.isZeroShadow(V1))
1787     return collapseToPrimitiveShadow(V2, Pos);
1788   if (DFS.isZeroShadow(V2))
1789     return collapseToPrimitiveShadow(V1, Pos);
1790   if (V1 == V2)
1791     return collapseToPrimitiveShadow(V1, Pos);
1792 
1793   auto V1Elems = ShadowElements.find(V1);
1794   auto V2Elems = ShadowElements.find(V2);
1795   if (V1Elems != ShadowElements.end() && V2Elems != ShadowElements.end()) {
1796     if (std::includes(V1Elems->second.begin(), V1Elems->second.end(),
1797                       V2Elems->second.begin(), V2Elems->second.end())) {
1798       return collapseToPrimitiveShadow(V1, Pos);
1799     }
1800     if (std::includes(V2Elems->second.begin(), V2Elems->second.end(),
1801                       V1Elems->second.begin(), V1Elems->second.end())) {
1802       return collapseToPrimitiveShadow(V2, Pos);
1803     }
1804   } else if (V1Elems != ShadowElements.end()) {
1805     if (V1Elems->second.count(V2))
1806       return collapseToPrimitiveShadow(V1, Pos);
1807   } else if (V2Elems != ShadowElements.end()) {
1808     if (V2Elems->second.count(V1))
1809       return collapseToPrimitiveShadow(V2, Pos);
1810   }
1811 
1812   auto Key = std::make_pair(V1, V2);
1813   if (V1 > V2)
1814     std::swap(Key.first, Key.second);
1815   CachedShadow &CCS = CachedShadows[Key];
1816   if (CCS.Block && DT.dominates(CCS.Block, Pos->getParent()))
1817     return CCS.Shadow;
1818 
1819   // Converts inputs shadows to shadows with primitive types.
1820   Value *PV1 = collapseToPrimitiveShadow(V1, Pos);
1821   Value *PV2 = collapseToPrimitiveShadow(V2, Pos);
1822 
1823   IRBuilder<> IRB(Pos);
1824   if (ClFast16Labels) {
1825     CCS.Block = Pos->getParent();
1826     CCS.Shadow = IRB.CreateOr(PV1, PV2);
1827   } else if (AvoidNewBlocks) {
1828     CallInst *Call = IRB.CreateCall(DFS.DFSanCheckedUnionFn, {PV1, PV2});
1829     Call->addAttribute(AttributeList::ReturnIndex, Attribute::ZExt);
1830     Call->addParamAttr(0, Attribute::ZExt);
1831     Call->addParamAttr(1, Attribute::ZExt);
1832 
1833     CCS.Block = Pos->getParent();
1834     CCS.Shadow = Call;
1835   } else {
1836     BasicBlock *Head = Pos->getParent();
1837     Value *Ne = IRB.CreateICmpNE(PV1, PV2);
1838     BranchInst *BI = cast<BranchInst>(SplitBlockAndInsertIfThen(
1839         Ne, Pos, /*Unreachable=*/false, DFS.ColdCallWeights, &DT));
1840     IRBuilder<> ThenIRB(BI);
1841     CallInst *Call = ThenIRB.CreateCall(DFS.DFSanUnionFn, {PV1, PV2});
1842     Call->addAttribute(AttributeList::ReturnIndex, Attribute::ZExt);
1843     Call->addParamAttr(0, Attribute::ZExt);
1844     Call->addParamAttr(1, Attribute::ZExt);
1845 
1846     BasicBlock *Tail = BI->getSuccessor(0);
1847     PHINode *Phi =
1848         PHINode::Create(DFS.PrimitiveShadowTy, 2, "", &Tail->front());
1849     Phi->addIncoming(Call, Call->getParent());
1850     Phi->addIncoming(PV1, Head);
1851 
1852     CCS.Block = Tail;
1853     CCS.Shadow = Phi;
1854   }
1855 
1856   std::set<Value *> UnionElems;
1857   if (V1Elems != ShadowElements.end()) {
1858     UnionElems = V1Elems->second;
1859   } else {
1860     UnionElems.insert(V1);
1861   }
1862   if (V2Elems != ShadowElements.end()) {
1863     UnionElems.insert(V2Elems->second.begin(), V2Elems->second.end());
1864   } else {
1865     UnionElems.insert(V2);
1866   }
1867   ShadowElements[CCS.Shadow] = std::move(UnionElems);
1868 
1869   return CCS.Shadow;
1870 }
1871 
1872 // A convenience function which folds the shadows of each of the operands
1873 // of the provided instruction Inst, inserting the IR before Inst.  Returns
1874 // the computed union Value.
1875 Value *DFSanFunction::combineOperandShadows(Instruction *Inst) {
1876   if (Inst->getNumOperands() == 0)
1877     return DFS.getZeroShadow(Inst);
1878 
1879   Value *Shadow = getShadow(Inst->getOperand(0));
1880   for (unsigned I = 1, N = Inst->getNumOperands(); I < N; ++I)
1881     Shadow = combineShadows(Shadow, getShadow(Inst->getOperand(I)), Inst);
1882 
1883   return expandFromPrimitiveShadow(Inst->getType(), Shadow, Inst);
1884 }
1885 
1886 void DFSanVisitor::visitInstOperands(Instruction &I) {
1887   Value *CombinedShadow = DFSF.combineOperandShadows(&I);
1888   DFSF.setShadow(&I, CombinedShadow);
1889   visitInstOperandOrigins(I);
1890 }
1891 
1892 Value *DFSanFunction::combineOrigins(const std::vector<Value *> &Shadows,
1893                                      const std::vector<Value *> &Origins,
1894                                      Instruction *Pos, ConstantInt *Zero) {
1895   assert(Shadows.size() == Origins.size());
1896   size_t Size = Origins.size();
1897   if (Size == 0)
1898     return DFS.ZeroOrigin;
1899   Value *Origin = nullptr;
1900   if (!Zero)
1901     Zero = DFS.ZeroPrimitiveShadow;
1902   for (size_t I = 0; I != Size; ++I) {
1903     Value *OpOrigin = Origins[I];
1904     Constant *ConstOpOrigin = dyn_cast<Constant>(OpOrigin);
1905     if (ConstOpOrigin && ConstOpOrigin->isNullValue())
1906       continue;
1907     if (!Origin) {
1908       Origin = OpOrigin;
1909       continue;
1910     }
1911     Value *OpShadow = Shadows[I];
1912     Value *PrimitiveShadow = collapseToPrimitiveShadow(OpShadow, Pos);
1913     IRBuilder<> IRB(Pos);
1914     Value *Cond = IRB.CreateICmpNE(PrimitiveShadow, Zero);
1915     Origin = IRB.CreateSelect(Cond, OpOrigin, Origin);
1916   }
1917   return Origin ? Origin : DFS.ZeroOrigin;
1918 }
1919 
1920 Value *DFSanFunction::combineOperandOrigins(Instruction *Inst) {
1921   size_t Size = Inst->getNumOperands();
1922   std::vector<Value *> Shadows(Size);
1923   std::vector<Value *> Origins(Size);
1924   for (unsigned I = 0; I != Size; ++I) {
1925     Shadows[I] = getShadow(Inst->getOperand(I));
1926     Origins[I] = getOrigin(Inst->getOperand(I));
1927   }
1928   return combineOrigins(Shadows, Origins, Inst);
1929 }
1930 
1931 void DFSanVisitor::visitInstOperandOrigins(Instruction &I) {
1932   if (!DFSF.DFS.shouldTrackOrigins())
1933     return;
1934   Value *CombinedOrigin = DFSF.combineOperandOrigins(&I);
1935   DFSF.setOrigin(&I, CombinedOrigin);
1936 }
1937 
1938 Align DFSanFunction::getShadowAlign(Align InstAlignment) {
1939   const Align Alignment = ClPreserveAlignment ? InstAlignment : Align(1);
1940   return Align(Alignment.value() * DFS.ShadowWidthBytes);
1941 }
1942 
1943 Align DFSanFunction::getOriginAlign(Align InstAlignment) {
1944   const Align Alignment = llvm::assumeAligned(InstAlignment.value());
1945   return Align(std::max(MinOriginAlignment, Alignment));
1946 }
1947 
1948 bool DFSanFunction::useCallbackLoadLabelAndOrigin(uint64_t Size,
1949                                                   Align InstAlignment) {
1950   assert(Size != 0);
1951   // * if Size == 1, it is sufficient to load its origin aligned at 4.
1952   // * if Size == 2, we assume most cases Addr % 2 == 0, so it is sufficient to
1953   //   load its origin aligned at 4. If not, although origins may be lost, it
1954   //   should not happen very often.
1955   // * if align >= 4, Addr must be aligned to 4, otherwise it is UB. When
1956   //   Size % 4 == 0, it is more efficient to load origins without callbacks.
1957   // * Otherwise we use __dfsan_load_label_and_origin.
1958   // This should ensure that common cases run efficiently.
1959   if (Size <= 2)
1960     return false;
1961 
1962   const Align Alignment = llvm::assumeAligned(InstAlignment.value());
1963   if (Alignment >= MinOriginAlignment &&
1964       Size % (64 / DFS.ShadowWidthBits) == 0)
1965     return false;
1966 
1967   return true;
1968 }
1969 
1970 std::pair<Value *, Value *> DFSanFunction::loadFast16ShadowFast(
1971     Value *ShadowAddr, Value *OriginAddr, uint64_t Size, Align ShadowAlign,
1972     Align OriginAlign, Value *FirstOrigin, Instruction *Pos) {
1973   // First OR all the WideShadows, then OR individual shadows within the
1974   // combined WideShadow. This is fewer instructions than ORing shadows
1975   // individually.
1976   const bool ShouldTrackOrigins = DFS.shouldTrackOrigins();
1977   std::vector<Value *> Shadows;
1978   std::vector<Value *> Origins;
1979   IRBuilder<> IRB(Pos);
1980   Value *WideAddr =
1981       IRB.CreateBitCast(ShadowAddr, Type::getInt64PtrTy(*DFS.Ctx));
1982   Value *CombinedWideShadow =
1983       IRB.CreateAlignedLoad(IRB.getInt64Ty(), WideAddr, ShadowAlign);
1984   if (ShouldTrackOrigins) {
1985     Shadows.push_back(CombinedWideShadow);
1986     Origins.push_back(FirstOrigin);
1987   }
1988   for (uint64_t Ofs = 64 / DFS.ShadowWidthBits; Ofs != Size;
1989        Ofs += 64 / DFS.ShadowWidthBits) {
1990     WideAddr = IRB.CreateGEP(Type::getInt64Ty(*DFS.Ctx), WideAddr,
1991                              ConstantInt::get(DFS.IntptrTy, 1));
1992     Value *NextWideShadow =
1993         IRB.CreateAlignedLoad(IRB.getInt64Ty(), WideAddr, ShadowAlign);
1994     CombinedWideShadow = IRB.CreateOr(CombinedWideShadow, NextWideShadow);
1995     if (ShouldTrackOrigins) {
1996       Shadows.push_back(NextWideShadow);
1997       OriginAddr = IRB.CreateGEP(DFS.OriginTy, OriginAddr,
1998                                  ConstantInt::get(DFS.IntptrTy, 1));
1999       Origins.push_back(
2000           IRB.CreateAlignedLoad(DFS.OriginTy, OriginAddr, OriginAlign));
2001     }
2002   }
2003   for (unsigned Width = 32; Width >= DFS.ShadowWidthBits; Width >>= 1) {
2004     Value *ShrShadow = IRB.CreateLShr(CombinedWideShadow, Width);
2005     CombinedWideShadow = IRB.CreateOr(CombinedWideShadow, ShrShadow);
2006   }
2007   return {IRB.CreateTrunc(CombinedWideShadow, DFS.PrimitiveShadowTy),
2008           ShouldTrackOrigins
2009               ? combineOrigins(Shadows, Origins, Pos,
2010                                ConstantInt::getSigned(IRB.getInt64Ty(), 0))
2011               : DFS.ZeroOrigin};
2012 }
2013 
2014 Value *DFSanFunction::loadLegacyShadowFast(Value *ShadowAddr, uint64_t Size,
2015                                            Align ShadowAlign,
2016                                            Instruction *Pos) {
2017   // Fast path for the common case where each byte has identical shadow: load
2018   // shadow 64 bits at a time, fall out to a __dfsan_union_load call if any
2019   // shadow is non-equal.
2020   BasicBlock *FallbackBB = BasicBlock::Create(*DFS.Ctx, "", F);
2021   IRBuilder<> FallbackIRB(FallbackBB);
2022   CallInst *FallbackCall = FallbackIRB.CreateCall(
2023       DFS.DFSanUnionLoadFn, {ShadowAddr, ConstantInt::get(DFS.IntptrTy, Size)});
2024   FallbackCall->addAttribute(AttributeList::ReturnIndex, Attribute::ZExt);
2025 
2026   // Compare each of the shadows stored in the loaded 64 bits to each other,
2027   // by computing (WideShadow rotl ShadowWidthBits) == WideShadow.
2028   IRBuilder<> IRB(Pos);
2029   Value *WideAddr =
2030       IRB.CreateBitCast(ShadowAddr, Type::getInt64PtrTy(*DFS.Ctx));
2031   Value *WideShadow =
2032       IRB.CreateAlignedLoad(IRB.getInt64Ty(), WideAddr, ShadowAlign);
2033   Value *TruncShadow = IRB.CreateTrunc(WideShadow, DFS.PrimitiveShadowTy);
2034   Value *ShlShadow = IRB.CreateShl(WideShadow, DFS.ShadowWidthBits);
2035   Value *ShrShadow = IRB.CreateLShr(WideShadow, 64 - DFS.ShadowWidthBits);
2036   Value *RotShadow = IRB.CreateOr(ShlShadow, ShrShadow);
2037   Value *ShadowsEq = IRB.CreateICmpEQ(WideShadow, RotShadow);
2038 
2039   BasicBlock *Head = Pos->getParent();
2040   BasicBlock *Tail = Head->splitBasicBlock(Pos->getIterator());
2041 
2042   if (DomTreeNode *OldNode = DT.getNode(Head)) {
2043     std::vector<DomTreeNode *> Children(OldNode->begin(), OldNode->end());
2044 
2045     DomTreeNode *NewNode = DT.addNewBlock(Tail, Head);
2046     for (auto *Child : Children)
2047       DT.changeImmediateDominator(Child, NewNode);
2048   }
2049 
2050   // In the following code LastBr will refer to the previous basic block's
2051   // conditional branch instruction, whose true successor is fixed up to point
2052   // to the next block during the loop below or to the tail after the final
2053   // iteration.
2054   BranchInst *LastBr = BranchInst::Create(FallbackBB, FallbackBB, ShadowsEq);
2055   ReplaceInstWithInst(Head->getTerminator(), LastBr);
2056   DT.addNewBlock(FallbackBB, Head);
2057 
2058   for (uint64_t Ofs = 64 / DFS.ShadowWidthBits; Ofs != Size;
2059        Ofs += 64 / DFS.ShadowWidthBits) {
2060     BasicBlock *NextBB = BasicBlock::Create(*DFS.Ctx, "", F);
2061     DT.addNewBlock(NextBB, LastBr->getParent());
2062     IRBuilder<> NextIRB(NextBB);
2063     WideAddr = NextIRB.CreateGEP(Type::getInt64Ty(*DFS.Ctx), WideAddr,
2064                                  ConstantInt::get(DFS.IntptrTy, 1));
2065     Value *NextWideShadow =
2066         NextIRB.CreateAlignedLoad(NextIRB.getInt64Ty(), WideAddr, ShadowAlign);
2067     ShadowsEq = NextIRB.CreateICmpEQ(WideShadow, NextWideShadow);
2068     LastBr->setSuccessor(0, NextBB);
2069     LastBr = NextIRB.CreateCondBr(ShadowsEq, FallbackBB, FallbackBB);
2070   }
2071 
2072   LastBr->setSuccessor(0, Tail);
2073   FallbackIRB.CreateBr(Tail);
2074   PHINode *Shadow =
2075       PHINode::Create(DFS.PrimitiveShadowTy, 2, "", &Tail->front());
2076   Shadow->addIncoming(FallbackCall, FallbackBB);
2077   Shadow->addIncoming(TruncShadow, LastBr->getParent());
2078   return Shadow;
2079 }
2080 
2081 // Generates IR to load shadow corresponding to bytes [Addr, Addr+Size), where
2082 // Addr has alignment Align, and take the union of each of those shadows. The
2083 // returned shadow always has primitive type.
2084 std::pair<Value *, Value *> DFSanFunction::loadShadowOrigin(Value *Addr,
2085                                                             uint64_t Size,
2086                                                             Align InstAlignment,
2087                                                             Instruction *Pos) {
2088   const bool ShouldTrackOrigins = DFS.shouldTrackOrigins();
2089 
2090   // Non-escaped loads.
2091   if (AllocaInst *AI = dyn_cast<AllocaInst>(Addr)) {
2092     const auto SI = AllocaShadowMap.find(AI);
2093     if (SI != AllocaShadowMap.end()) {
2094       IRBuilder<> IRB(Pos);
2095       Value *ShadowLI = IRB.CreateLoad(DFS.PrimitiveShadowTy, SI->second);
2096       const auto OI = AllocaOriginMap.find(AI);
2097       assert(!ShouldTrackOrigins || OI != AllocaOriginMap.end());
2098       return {ShadowLI, ShouldTrackOrigins
2099                             ? IRB.CreateLoad(DFS.OriginTy, OI->second)
2100                             : nullptr};
2101     }
2102   }
2103 
2104   // Load from constant addresses.
2105   SmallVector<const Value *, 2> Objs;
2106   getUnderlyingObjects(Addr, Objs);
2107   bool AllConstants = true;
2108   for (const Value *Obj : Objs) {
2109     if (isa<Function>(Obj) || isa<BlockAddress>(Obj))
2110       continue;
2111     if (isa<GlobalVariable>(Obj) && cast<GlobalVariable>(Obj)->isConstant())
2112       continue;
2113 
2114     AllConstants = false;
2115     break;
2116   }
2117   if (AllConstants)
2118     return {DFS.ZeroPrimitiveShadow,
2119             ShouldTrackOrigins ? DFS.ZeroOrigin : nullptr};
2120 
2121   if (Size == 0)
2122     return {DFS.ZeroPrimitiveShadow,
2123             ShouldTrackOrigins ? DFS.ZeroOrigin : nullptr};
2124 
2125   // Use callback to load if this is not an optimizable case for origin
2126   // tracking.
2127   if (ShouldTrackOrigins &&
2128       useCallbackLoadLabelAndOrigin(Size, InstAlignment)) {
2129     IRBuilder<> IRB(Pos);
2130     CallInst *Call =
2131         IRB.CreateCall(DFS.DFSanLoadLabelAndOriginFn,
2132                        {IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()),
2133                         ConstantInt::get(DFS.IntptrTy, Size)});
2134     Call->addAttribute(AttributeList::ReturnIndex, Attribute::ZExt);
2135     return {IRB.CreateTrunc(IRB.CreateLShr(Call, DFS.OriginWidthBits),
2136                             DFS.PrimitiveShadowTy),
2137             IRB.CreateTrunc(Call, DFS.OriginTy)};
2138   }
2139 
2140   // Other cases that support loading shadows or origins in a fast way.
2141   Value *ShadowAddr, *OriginAddr;
2142   std::tie(ShadowAddr, OriginAddr) =
2143       DFS.getShadowOriginAddress(Addr, InstAlignment, Pos);
2144 
2145   const Align ShadowAlign = getShadowAlign(InstAlignment);
2146   const Align OriginAlign = getOriginAlign(InstAlignment);
2147   Value *Origin = nullptr;
2148   if (ShouldTrackOrigins) {
2149     IRBuilder<> IRB(Pos);
2150     Origin = IRB.CreateAlignedLoad(DFS.OriginTy, OriginAddr, OriginAlign);
2151   }
2152 
2153   switch (Size) {
2154   case 1: {
2155     LoadInst *LI = new LoadInst(DFS.PrimitiveShadowTy, ShadowAddr, "", Pos);
2156     LI->setAlignment(ShadowAlign);
2157     return {LI, Origin};
2158   }
2159   case 2: {
2160     IRBuilder<> IRB(Pos);
2161     Value *ShadowAddr1 = IRB.CreateGEP(DFS.PrimitiveShadowTy, ShadowAddr,
2162                                        ConstantInt::get(DFS.IntptrTy, 1));
2163     Value *Load =
2164         IRB.CreateAlignedLoad(DFS.PrimitiveShadowTy, ShadowAddr, ShadowAlign);
2165     Value *Load1 =
2166         IRB.CreateAlignedLoad(DFS.PrimitiveShadowTy, ShadowAddr1, ShadowAlign);
2167     return {combineShadows(Load, Load1, Pos), Origin};
2168   }
2169   }
2170 
2171   if (ClFast16Labels && Size % (64 / DFS.ShadowWidthBits) == 0)
2172     return loadFast16ShadowFast(ShadowAddr, OriginAddr, Size, ShadowAlign,
2173                                 OriginAlign, Origin, Pos);
2174 
2175   if (!AvoidNewBlocks && Size % (64 / DFS.ShadowWidthBits) == 0)
2176     return {loadLegacyShadowFast(ShadowAddr, Size, ShadowAlign, Pos), Origin};
2177 
2178   IRBuilder<> IRB(Pos);
2179   FunctionCallee &UnionLoadFn =
2180       ClFast16Labels ? DFS.DFSanUnionLoadFast16LabelsFn : DFS.DFSanUnionLoadFn;
2181   CallInst *FallbackCall = IRB.CreateCall(
2182       UnionLoadFn, {ShadowAddr, ConstantInt::get(DFS.IntptrTy, Size)});
2183   FallbackCall->addAttribute(AttributeList::ReturnIndex, Attribute::ZExt);
2184   return {FallbackCall, Origin};
2185 }
2186 
2187 static AtomicOrdering addAcquireOrdering(AtomicOrdering AO) {
2188   switch (AO) {
2189   case AtomicOrdering::NotAtomic:
2190     return AtomicOrdering::NotAtomic;
2191   case AtomicOrdering::Unordered:
2192   case AtomicOrdering::Monotonic:
2193   case AtomicOrdering::Acquire:
2194     return AtomicOrdering::Acquire;
2195   case AtomicOrdering::Release:
2196   case AtomicOrdering::AcquireRelease:
2197     return AtomicOrdering::AcquireRelease;
2198   case AtomicOrdering::SequentiallyConsistent:
2199     return AtomicOrdering::SequentiallyConsistent;
2200   }
2201   llvm_unreachable("Unknown ordering");
2202 }
2203 
2204 void DFSanVisitor::visitLoadInst(LoadInst &LI) {
2205   auto &DL = LI.getModule()->getDataLayout();
2206   uint64_t Size = DL.getTypeStoreSize(LI.getType());
2207   if (Size == 0) {
2208     DFSF.setShadow(&LI, DFSF.DFS.getZeroShadow(&LI));
2209     DFSF.setOrigin(&LI, DFSF.DFS.ZeroOrigin);
2210     return;
2211   }
2212 
2213   // When an application load is atomic, increase atomic ordering between
2214   // atomic application loads and stores to ensure happen-before order; load
2215   // shadow data after application data; store zero shadow data before
2216   // application data. This ensure shadow loads return either labels of the
2217   // initial application data or zeros.
2218   if (LI.isAtomic())
2219     LI.setOrdering(addAcquireOrdering(LI.getOrdering()));
2220 
2221   Instruction *Pos = LI.isAtomic() ? LI.getNextNode() : &LI;
2222   std::vector<Value *> Shadows;
2223   std::vector<Value *> Origins;
2224   Value *PrimitiveShadow, *Origin;
2225   std::tie(PrimitiveShadow, Origin) =
2226       DFSF.loadShadowOrigin(LI.getPointerOperand(), Size, LI.getAlign(), Pos);
2227   const bool ShouldTrackOrigins = DFSF.DFS.shouldTrackOrigins();
2228   if (ShouldTrackOrigins) {
2229     Shadows.push_back(PrimitiveShadow);
2230     Origins.push_back(Origin);
2231   }
2232   if (ClCombinePointerLabelsOnLoad) {
2233     Value *PtrShadow = DFSF.getShadow(LI.getPointerOperand());
2234     PrimitiveShadow = DFSF.combineShadows(PrimitiveShadow, PtrShadow, Pos);
2235     if (ShouldTrackOrigins) {
2236       Shadows.push_back(PtrShadow);
2237       Origins.push_back(DFSF.getOrigin(LI.getPointerOperand()));
2238     }
2239   }
2240   if (!DFSF.DFS.isZeroShadow(PrimitiveShadow))
2241     DFSF.NonZeroChecks.push_back(PrimitiveShadow);
2242 
2243   Value *Shadow =
2244       DFSF.expandFromPrimitiveShadow(LI.getType(), PrimitiveShadow, Pos);
2245   DFSF.setShadow(&LI, Shadow);
2246 
2247   if (ShouldTrackOrigins) {
2248     DFSF.setOrigin(&LI, DFSF.combineOrigins(Shadows, Origins, Pos));
2249   }
2250 
2251   if (ClEventCallbacks) {
2252     IRBuilder<> IRB(Pos);
2253     Value *Addr8 = IRB.CreateBitCast(LI.getPointerOperand(), DFSF.DFS.Int8Ptr);
2254     IRB.CreateCall(DFSF.DFS.DFSanLoadCallbackFn, {PrimitiveShadow, Addr8});
2255   }
2256 }
2257 
2258 Value *DFSanFunction::updateOrigin(Value *V, IRBuilder<> &IRB) {
2259   if (!DFS.shouldTrackOrigins())
2260     return V;
2261   return IRB.CreateCall(DFS.DFSanChainOriginFn, V);
2262 }
2263 
2264 Value *DFSanFunction::originToIntptr(IRBuilder<> &IRB, Value *Origin) {
2265   const unsigned OriginSize = DataFlowSanitizer::OriginWidthBytes;
2266   const DataLayout &DL = F->getParent()->getDataLayout();
2267   unsigned IntptrSize = DL.getTypeStoreSize(DFS.IntptrTy);
2268   if (IntptrSize == OriginSize)
2269     return Origin;
2270   assert(IntptrSize == OriginSize * 2);
2271   Origin = IRB.CreateIntCast(Origin, DFS.IntptrTy, /* isSigned */ false);
2272   return IRB.CreateOr(Origin, IRB.CreateShl(Origin, OriginSize * 8));
2273 }
2274 
2275 void DFSanFunction::paintOrigin(IRBuilder<> &IRB, Value *Origin,
2276                                 Value *StoreOriginAddr,
2277                                 uint64_t StoreOriginSize, Align Alignment) {
2278   const unsigned OriginSize = DataFlowSanitizer::OriginWidthBytes;
2279   const DataLayout &DL = F->getParent()->getDataLayout();
2280   const Align IntptrAlignment = DL.getABITypeAlign(DFS.IntptrTy);
2281   unsigned IntptrSize = DL.getTypeStoreSize(DFS.IntptrTy);
2282   assert(IntptrAlignment >= MinOriginAlignment);
2283   assert(IntptrSize >= OriginSize);
2284 
2285   unsigned Ofs = 0;
2286   Align CurrentAlignment = Alignment;
2287   if (Alignment >= IntptrAlignment && IntptrSize > OriginSize) {
2288     Value *IntptrOrigin = originToIntptr(IRB, Origin);
2289     Value *IntptrStoreOriginPtr = IRB.CreatePointerCast(
2290         StoreOriginAddr, PointerType::get(DFS.IntptrTy, 0));
2291     for (unsigned I = 0; I < StoreOriginSize / IntptrSize; ++I) {
2292       Value *Ptr =
2293           I ? IRB.CreateConstGEP1_32(DFS.IntptrTy, IntptrStoreOriginPtr, I)
2294             : IntptrStoreOriginPtr;
2295       IRB.CreateAlignedStore(IntptrOrigin, Ptr, CurrentAlignment);
2296       Ofs += IntptrSize / OriginSize;
2297       CurrentAlignment = IntptrAlignment;
2298     }
2299   }
2300 
2301   for (unsigned I = Ofs; I < (StoreOriginSize + OriginSize - 1) / OriginSize;
2302        ++I) {
2303     Value *GEP = I ? IRB.CreateConstGEP1_32(DFS.OriginTy, StoreOriginAddr, I)
2304                    : StoreOriginAddr;
2305     IRB.CreateAlignedStore(Origin, GEP, CurrentAlignment);
2306     CurrentAlignment = MinOriginAlignment;
2307   }
2308 }
2309 
2310 Value *DFSanFunction::convertToBool(Value *V, IRBuilder<> &IRB,
2311                                     const Twine &Name) {
2312   Type *VTy = V->getType();
2313   assert(VTy->isIntegerTy());
2314   if (VTy->getIntegerBitWidth() == 1)
2315     // Just converting a bool to a bool, so do nothing.
2316     return V;
2317   return IRB.CreateICmpNE(V, ConstantInt::get(VTy, 0), Name);
2318 }
2319 
2320 void DFSanFunction::storeOrigin(Instruction *Pos, Value *Addr, uint64_t Size,
2321                                 Value *Shadow, Value *Origin,
2322                                 Value *StoreOriginAddr, Align InstAlignment) {
2323   // Do not write origins for zero shadows because we do not trace origins for
2324   // untainted sinks.
2325   const Align OriginAlignment = getOriginAlign(InstAlignment);
2326   Value *CollapsedShadow = collapseToPrimitiveShadow(Shadow, Pos);
2327   IRBuilder<> IRB(Pos);
2328   if (auto *ConstantShadow = dyn_cast<Constant>(CollapsedShadow)) {
2329     if (!ConstantShadow->isZeroValue())
2330       paintOrigin(IRB, updateOrigin(Origin, IRB), StoreOriginAddr, Size,
2331                   OriginAlignment);
2332     return;
2333   }
2334 
2335   if (shouldInstrumentWithCall()) {
2336     IRB.CreateCall(DFS.DFSanMaybeStoreOriginFn,
2337                    {CollapsedShadow,
2338                     IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()),
2339                     ConstantInt::get(DFS.IntptrTy, Size), Origin});
2340   } else {
2341     Value *Cmp = convertToBool(CollapsedShadow, IRB, "_dfscmp");
2342     Instruction *CheckTerm = SplitBlockAndInsertIfThen(
2343         Cmp, &*IRB.GetInsertPoint(), false, DFS.OriginStoreWeights, &DT);
2344     IRBuilder<> IRBNew(CheckTerm);
2345     paintOrigin(IRBNew, updateOrigin(Origin, IRBNew), StoreOriginAddr, Size,
2346                 OriginAlignment);
2347     ++NumOriginStores;
2348   }
2349 }
2350 
2351 void DFSanFunction::storeZeroPrimitiveShadow(Value *Addr, uint64_t Size,
2352                                              Align ShadowAlign,
2353                                              Instruction *Pos) {
2354   IRBuilder<> IRB(Pos);
2355   IntegerType *ShadowTy =
2356       IntegerType::get(*DFS.Ctx, Size * DFS.ShadowWidthBits);
2357   Value *ExtZeroShadow = ConstantInt::get(ShadowTy, 0);
2358   Value *ShadowAddr = DFS.getShadowAddress(Addr, Pos);
2359   Value *ExtShadowAddr =
2360       IRB.CreateBitCast(ShadowAddr, PointerType::getUnqual(ShadowTy));
2361   IRB.CreateAlignedStore(ExtZeroShadow, ExtShadowAddr, ShadowAlign);
2362   // Do not write origins for 0 shadows because we do not trace origins for
2363   // untainted sinks.
2364 }
2365 
2366 void DFSanFunction::storePrimitiveShadowOrigin(Value *Addr, uint64_t Size,
2367                                                Align InstAlignment,
2368                                                Value *PrimitiveShadow,
2369                                                Value *Origin,
2370                                                Instruction *Pos) {
2371   const bool ShouldTrackOrigins = DFS.shouldTrackOrigins() && Origin;
2372 
2373   if (AllocaInst *AI = dyn_cast<AllocaInst>(Addr)) {
2374     const auto SI = AllocaShadowMap.find(AI);
2375     if (SI != AllocaShadowMap.end()) {
2376       IRBuilder<> IRB(Pos);
2377       IRB.CreateStore(PrimitiveShadow, SI->second);
2378 
2379       // Do not write origins for 0 shadows because we do not trace origins for
2380       // untainted sinks.
2381       if (ShouldTrackOrigins && !DFS.isZeroShadow(PrimitiveShadow)) {
2382         const auto OI = AllocaOriginMap.find(AI);
2383         assert(OI != AllocaOriginMap.end() && Origin);
2384         IRB.CreateStore(Origin, OI->second);
2385       }
2386       return;
2387     }
2388   }
2389 
2390   const Align ShadowAlign = getShadowAlign(InstAlignment);
2391   if (DFS.isZeroShadow(PrimitiveShadow)) {
2392     storeZeroPrimitiveShadow(Addr, Size, ShadowAlign, Pos);
2393     return;
2394   }
2395 
2396   IRBuilder<> IRB(Pos);
2397   Value *ShadowAddr, *OriginAddr;
2398   std::tie(ShadowAddr, OriginAddr) =
2399       DFS.getShadowOriginAddress(Addr, InstAlignment, Pos);
2400 
2401   const unsigned ShadowVecSize = 128 / DFS.ShadowWidthBits;
2402   uint64_t Offset = 0;
2403   uint64_t LeftSize = Size;
2404   if (LeftSize >= ShadowVecSize) {
2405     auto *ShadowVecTy =
2406         FixedVectorType::get(DFS.PrimitiveShadowTy, ShadowVecSize);
2407     Value *ShadowVec = UndefValue::get(ShadowVecTy);
2408     for (unsigned I = 0; I != ShadowVecSize; ++I) {
2409       ShadowVec = IRB.CreateInsertElement(
2410           ShadowVec, PrimitiveShadow,
2411           ConstantInt::get(Type::getInt32Ty(*DFS.Ctx), I));
2412     }
2413     Value *ShadowVecAddr =
2414         IRB.CreateBitCast(ShadowAddr, PointerType::getUnqual(ShadowVecTy));
2415     do {
2416       Value *CurShadowVecAddr =
2417           IRB.CreateConstGEP1_32(ShadowVecTy, ShadowVecAddr, Offset);
2418       IRB.CreateAlignedStore(ShadowVec, CurShadowVecAddr, ShadowAlign);
2419       LeftSize -= ShadowVecSize;
2420       ++Offset;
2421     } while (LeftSize >= ShadowVecSize);
2422     Offset *= ShadowVecSize;
2423   }
2424   while (LeftSize > 0) {
2425     Value *CurShadowAddr =
2426         IRB.CreateConstGEP1_32(DFS.PrimitiveShadowTy, ShadowAddr, Offset);
2427     IRB.CreateAlignedStore(PrimitiveShadow, CurShadowAddr, ShadowAlign);
2428     --LeftSize;
2429     ++Offset;
2430   }
2431 
2432   if (ShouldTrackOrigins) {
2433     storeOrigin(Pos, Addr, Size, PrimitiveShadow, Origin, OriginAddr,
2434                 InstAlignment);
2435   }
2436 }
2437 
2438 static AtomicOrdering addReleaseOrdering(AtomicOrdering AO) {
2439   switch (AO) {
2440   case AtomicOrdering::NotAtomic:
2441     return AtomicOrdering::NotAtomic;
2442   case AtomicOrdering::Unordered:
2443   case AtomicOrdering::Monotonic:
2444   case AtomicOrdering::Release:
2445     return AtomicOrdering::Release;
2446   case AtomicOrdering::Acquire:
2447   case AtomicOrdering::AcquireRelease:
2448     return AtomicOrdering::AcquireRelease;
2449   case AtomicOrdering::SequentiallyConsistent:
2450     return AtomicOrdering::SequentiallyConsistent;
2451   }
2452   llvm_unreachable("Unknown ordering");
2453 }
2454 
2455 void DFSanVisitor::visitStoreInst(StoreInst &SI) {
2456   auto &DL = SI.getModule()->getDataLayout();
2457   Value *Val = SI.getValueOperand();
2458   uint64_t Size = DL.getTypeStoreSize(Val->getType());
2459   if (Size == 0)
2460     return;
2461 
2462   // When an application store is atomic, increase atomic ordering between
2463   // atomic application loads and stores to ensure happen-before order; load
2464   // shadow data after application data; store zero shadow data before
2465   // application data. This ensure shadow loads return either labels of the
2466   // initial application data or zeros.
2467   if (SI.isAtomic())
2468     SI.setOrdering(addReleaseOrdering(SI.getOrdering()));
2469 
2470   const bool ShouldTrackOrigins =
2471       DFSF.DFS.shouldTrackOrigins() && !SI.isAtomic();
2472   std::vector<Value *> Shadows;
2473   std::vector<Value *> Origins;
2474 
2475   Value *Shadow =
2476       SI.isAtomic() ? DFSF.DFS.getZeroShadow(Val) : DFSF.getShadow(Val);
2477 
2478   if (ShouldTrackOrigins) {
2479     Shadows.push_back(Shadow);
2480     Origins.push_back(DFSF.getOrigin(Val));
2481   }
2482 
2483   Value *PrimitiveShadow;
2484   if (ClCombinePointerLabelsOnStore) {
2485     Value *PtrShadow = DFSF.getShadow(SI.getPointerOperand());
2486     if (ShouldTrackOrigins) {
2487       Shadows.push_back(PtrShadow);
2488       Origins.push_back(DFSF.getOrigin(SI.getPointerOperand()));
2489     }
2490     PrimitiveShadow = DFSF.combineShadows(Shadow, PtrShadow, &SI);
2491   } else {
2492     PrimitiveShadow = DFSF.collapseToPrimitiveShadow(Shadow, &SI);
2493   }
2494   Value *Origin = nullptr;
2495   if (ShouldTrackOrigins) {
2496     Origin = DFSF.combineOrigins(Shadows, Origins, &SI);
2497   }
2498   DFSF.storePrimitiveShadowOrigin(SI.getPointerOperand(), Size, SI.getAlign(),
2499                                   PrimitiveShadow, Origin, &SI);
2500   if (ClEventCallbacks) {
2501     IRBuilder<> IRB(&SI);
2502     Value *Addr8 = IRB.CreateBitCast(SI.getPointerOperand(), DFSF.DFS.Int8Ptr);
2503     IRB.CreateCall(DFSF.DFS.DFSanStoreCallbackFn, {PrimitiveShadow, Addr8});
2504   }
2505 }
2506 
2507 void DFSanVisitor::visitCASOrRMW(Align InstAlignment, Instruction &I) {
2508   assert(isa<AtomicRMWInst>(I) || isa<AtomicCmpXchgInst>(I));
2509 
2510   Value *Val = I.getOperand(1);
2511   const auto &DL = I.getModule()->getDataLayout();
2512   uint64_t Size = DL.getTypeStoreSize(Val->getType());
2513   if (Size == 0)
2514     return;
2515 
2516   // Conservatively set data at stored addresses and return with zero shadow to
2517   // prevent shadow data races.
2518   IRBuilder<> IRB(&I);
2519   Value *Addr = I.getOperand(0);
2520   const Align ShadowAlign = DFSF.getShadowAlign(InstAlignment);
2521   DFSF.storeZeroPrimitiveShadow(Addr, Size, ShadowAlign, &I);
2522   DFSF.setShadow(&I, DFSF.DFS.getZeroShadow(&I));
2523   DFSF.setOrigin(&I, DFSF.DFS.ZeroOrigin);
2524 }
2525 
2526 void DFSanVisitor::visitAtomicRMWInst(AtomicRMWInst &I) {
2527   visitCASOrRMW(I.getAlign(), I);
2528   // TODO: The ordering change follows MSan. It is possible not to change
2529   // ordering because we always set and use 0 shadows.
2530   I.setOrdering(addReleaseOrdering(I.getOrdering()));
2531 }
2532 
2533 void DFSanVisitor::visitAtomicCmpXchgInst(AtomicCmpXchgInst &I) {
2534   visitCASOrRMW(I.getAlign(), I);
2535   // TODO: The ordering change follows MSan. It is possible not to change
2536   // ordering because we always set and use 0 shadows.
2537   I.setSuccessOrdering(addReleaseOrdering(I.getSuccessOrdering()));
2538 }
2539 
2540 void DFSanVisitor::visitUnaryOperator(UnaryOperator &UO) {
2541   visitInstOperands(UO);
2542 }
2543 
2544 void DFSanVisitor::visitBinaryOperator(BinaryOperator &BO) {
2545   visitInstOperands(BO);
2546 }
2547 
2548 void DFSanVisitor::visitCastInst(CastInst &CI) { visitInstOperands(CI); }
2549 
2550 void DFSanVisitor::visitCmpInst(CmpInst &CI) {
2551   visitInstOperands(CI);
2552   if (ClEventCallbacks) {
2553     IRBuilder<> IRB(&CI);
2554     Value *CombinedShadow = DFSF.getShadow(&CI);
2555     IRB.CreateCall(DFSF.DFS.DFSanCmpCallbackFn, CombinedShadow);
2556   }
2557 }
2558 
2559 void DFSanVisitor::visitGetElementPtrInst(GetElementPtrInst &GEPI) {
2560   visitInstOperands(GEPI);
2561 }
2562 
2563 void DFSanVisitor::visitExtractElementInst(ExtractElementInst &I) {
2564   visitInstOperands(I);
2565 }
2566 
2567 void DFSanVisitor::visitInsertElementInst(InsertElementInst &I) {
2568   visitInstOperands(I);
2569 }
2570 
2571 void DFSanVisitor::visitShuffleVectorInst(ShuffleVectorInst &I) {
2572   visitInstOperands(I);
2573 }
2574 
2575 void DFSanVisitor::visitExtractValueInst(ExtractValueInst &I) {
2576   if (!DFSF.DFS.shouldTrackFieldsAndIndices()) {
2577     visitInstOperands(I);
2578     return;
2579   }
2580 
2581   IRBuilder<> IRB(&I);
2582   Value *Agg = I.getAggregateOperand();
2583   Value *AggShadow = DFSF.getShadow(Agg);
2584   Value *ResShadow = IRB.CreateExtractValue(AggShadow, I.getIndices());
2585   DFSF.setShadow(&I, ResShadow);
2586   visitInstOperandOrigins(I);
2587 }
2588 
2589 void DFSanVisitor::visitInsertValueInst(InsertValueInst &I) {
2590   if (!DFSF.DFS.shouldTrackFieldsAndIndices()) {
2591     visitInstOperands(I);
2592     return;
2593   }
2594 
2595   IRBuilder<> IRB(&I);
2596   Value *AggShadow = DFSF.getShadow(I.getAggregateOperand());
2597   Value *InsShadow = DFSF.getShadow(I.getInsertedValueOperand());
2598   Value *Res = IRB.CreateInsertValue(AggShadow, InsShadow, I.getIndices());
2599   DFSF.setShadow(&I, Res);
2600   visitInstOperandOrigins(I);
2601 }
2602 
2603 void DFSanVisitor::visitAllocaInst(AllocaInst &I) {
2604   bool AllLoadsStores = true;
2605   for (User *U : I.users()) {
2606     if (isa<LoadInst>(U))
2607       continue;
2608 
2609     if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
2610       if (SI->getPointerOperand() == &I)
2611         continue;
2612     }
2613 
2614     AllLoadsStores = false;
2615     break;
2616   }
2617   if (AllLoadsStores) {
2618     IRBuilder<> IRB(&I);
2619     DFSF.AllocaShadowMap[&I] = IRB.CreateAlloca(DFSF.DFS.PrimitiveShadowTy);
2620     if (DFSF.DFS.shouldTrackOrigins()) {
2621       DFSF.AllocaOriginMap[&I] =
2622           IRB.CreateAlloca(DFSF.DFS.OriginTy, nullptr, "_dfsa");
2623     }
2624   }
2625   DFSF.setShadow(&I, DFSF.DFS.ZeroPrimitiveShadow);
2626   DFSF.setOrigin(&I, DFSF.DFS.ZeroOrigin);
2627 }
2628 
2629 void DFSanVisitor::visitSelectInst(SelectInst &I) {
2630   Value *CondShadow = DFSF.getShadow(I.getCondition());
2631   Value *TrueShadow = DFSF.getShadow(I.getTrueValue());
2632   Value *FalseShadow = DFSF.getShadow(I.getFalseValue());
2633   Value *ShadowSel = nullptr;
2634   const bool ShouldTrackOrigins = DFSF.DFS.shouldTrackOrigins();
2635   std::vector<Value *> Shadows;
2636   std::vector<Value *> Origins;
2637   Value *TrueOrigin =
2638       ShouldTrackOrigins ? DFSF.getOrigin(I.getTrueValue()) : nullptr;
2639   Value *FalseOrigin =
2640       ShouldTrackOrigins ? DFSF.getOrigin(I.getFalseValue()) : nullptr;
2641 
2642   if (isa<VectorType>(I.getCondition()->getType())) {
2643     ShadowSel = DFSF.combineShadowsThenConvert(I.getType(), TrueShadow,
2644                                                FalseShadow, &I);
2645     if (ShouldTrackOrigins) {
2646       Shadows.push_back(TrueShadow);
2647       Shadows.push_back(FalseShadow);
2648       Origins.push_back(TrueOrigin);
2649       Origins.push_back(FalseOrigin);
2650     }
2651   } else {
2652     if (TrueShadow == FalseShadow) {
2653       ShadowSel = TrueShadow;
2654       if (ShouldTrackOrigins) {
2655         Shadows.push_back(TrueShadow);
2656         Origins.push_back(TrueOrigin);
2657       }
2658     } else {
2659       ShadowSel =
2660           SelectInst::Create(I.getCondition(), TrueShadow, FalseShadow, "", &I);
2661       if (ShouldTrackOrigins) {
2662         Shadows.push_back(ShadowSel);
2663         Origins.push_back(SelectInst::Create(I.getCondition(), TrueOrigin,
2664                                              FalseOrigin, "", &I));
2665       }
2666     }
2667   }
2668   DFSF.setShadow(&I, ClTrackSelectControlFlow
2669                          ? DFSF.combineShadowsThenConvert(
2670                                I.getType(), CondShadow, ShadowSel, &I)
2671                          : ShadowSel);
2672   if (ShouldTrackOrigins) {
2673     if (ClTrackSelectControlFlow) {
2674       Shadows.push_back(CondShadow);
2675       Origins.push_back(DFSF.getOrigin(I.getCondition()));
2676     }
2677     DFSF.setOrigin(&I, DFSF.combineOrigins(Shadows, Origins, &I));
2678   }
2679 }
2680 
2681 void DFSanVisitor::visitMemSetInst(MemSetInst &I) {
2682   IRBuilder<> IRB(&I);
2683   Value *ValShadow = DFSF.getShadow(I.getValue());
2684   Value *ValOrigin = DFSF.DFS.shouldTrackOrigins()
2685                          ? DFSF.getOrigin(I.getValue())
2686                          : DFSF.DFS.ZeroOrigin;
2687   IRB.CreateCall(
2688       DFSF.DFS.DFSanSetLabelFn,
2689       {ValShadow, ValOrigin,
2690        IRB.CreateBitCast(I.getDest(), Type::getInt8PtrTy(*DFSF.DFS.Ctx)),
2691        IRB.CreateZExtOrTrunc(I.getLength(), DFSF.DFS.IntptrTy)});
2692 }
2693 
2694 void DFSanVisitor::visitMemTransferInst(MemTransferInst &I) {
2695   IRBuilder<> IRB(&I);
2696   Value *RawDestShadow = DFSF.DFS.getShadowAddress(I.getDest(), &I);
2697   Value *SrcShadow = DFSF.DFS.getShadowAddress(I.getSource(), &I);
2698   Value *LenShadow =
2699       IRB.CreateMul(I.getLength(), ConstantInt::get(I.getLength()->getType(),
2700                                                     DFSF.DFS.ShadowWidthBytes));
2701   Type *Int8Ptr = Type::getInt8PtrTy(*DFSF.DFS.Ctx);
2702   Value *DestShadow = IRB.CreateBitCast(RawDestShadow, Int8Ptr);
2703   SrcShadow = IRB.CreateBitCast(SrcShadow, Int8Ptr);
2704   auto *MTI = cast<MemTransferInst>(
2705       IRB.CreateCall(I.getFunctionType(), I.getCalledOperand(),
2706                      {DestShadow, SrcShadow, LenShadow, I.getVolatileCst()}));
2707   if (ClPreserveAlignment) {
2708     MTI->setDestAlignment(I.getDestAlign() * DFSF.DFS.ShadowWidthBytes);
2709     MTI->setSourceAlignment(I.getSourceAlign() * DFSF.DFS.ShadowWidthBytes);
2710   } else {
2711     MTI->setDestAlignment(Align(DFSF.DFS.ShadowWidthBytes));
2712     MTI->setSourceAlignment(Align(DFSF.DFS.ShadowWidthBytes));
2713   }
2714   if (ClEventCallbacks) {
2715     IRB.CreateCall(DFSF.DFS.DFSanMemTransferCallbackFn,
2716                    {RawDestShadow, I.getLength()});
2717   }
2718 }
2719 
2720 void DFSanVisitor::visitReturnInst(ReturnInst &RI) {
2721   if (!DFSF.IsNativeABI && RI.getReturnValue()) {
2722     switch (DFSF.IA) {
2723     case DataFlowSanitizer::IA_TLS: {
2724       Value *S = DFSF.getShadow(RI.getReturnValue());
2725       IRBuilder<> IRB(&RI);
2726       Type *RT = DFSF.F->getFunctionType()->getReturnType();
2727       unsigned Size =
2728           getDataLayout().getTypeAllocSize(DFSF.DFS.getShadowTy(RT));
2729       if (Size <= RetvalTLSSize) {
2730         // If the size overflows, stores nothing. At callsite, oversized return
2731         // shadows are set to zero.
2732         IRB.CreateAlignedStore(S, DFSF.getRetvalTLS(RT, IRB),
2733                                ShadowTLSAlignment);
2734       }
2735       if (DFSF.DFS.shouldTrackOrigins()) {
2736         Value *O = DFSF.getOrigin(RI.getReturnValue());
2737         IRB.CreateStore(O, DFSF.getRetvalOriginTLS());
2738       }
2739       break;
2740     }
2741     case DataFlowSanitizer::IA_Args: {
2742       IRBuilder<> IRB(&RI);
2743       Type *RT = DFSF.F->getFunctionType()->getReturnType();
2744       Value *InsVal =
2745           IRB.CreateInsertValue(UndefValue::get(RT), RI.getReturnValue(), 0);
2746       Value *InsShadow =
2747           IRB.CreateInsertValue(InsVal, DFSF.getShadow(RI.getReturnValue()), 1);
2748       RI.setOperand(0, InsShadow);
2749       break;
2750     }
2751     }
2752   }
2753 }
2754 
2755 void DFSanVisitor::addShadowArguments(Function &F, CallBase &CB,
2756                                       std::vector<Value *> &Args,
2757                                       IRBuilder<> &IRB) {
2758   FunctionType *FT = F.getFunctionType();
2759 
2760   auto *I = CB.arg_begin();
2761 
2762   // Adds non-variable argument shadows.
2763   for (unsigned N = FT->getNumParams(); N != 0; ++I, --N)
2764     Args.push_back(DFSF.collapseToPrimitiveShadow(DFSF.getShadow(*I), &CB));
2765 
2766   // Adds variable argument shadows.
2767   if (FT->isVarArg()) {
2768     auto *LabelVATy = ArrayType::get(DFSF.DFS.PrimitiveShadowTy,
2769                                      CB.arg_size() - FT->getNumParams());
2770     auto *LabelVAAlloca =
2771         new AllocaInst(LabelVATy, getDataLayout().getAllocaAddrSpace(),
2772                        "labelva", &DFSF.F->getEntryBlock().front());
2773 
2774     for (unsigned N = 0; I != CB.arg_end(); ++I, ++N) {
2775       auto *LabelVAPtr = IRB.CreateStructGEP(LabelVATy, LabelVAAlloca, N);
2776       IRB.CreateStore(DFSF.collapseToPrimitiveShadow(DFSF.getShadow(*I), &CB),
2777                       LabelVAPtr);
2778     }
2779 
2780     Args.push_back(IRB.CreateStructGEP(LabelVATy, LabelVAAlloca, 0));
2781   }
2782 
2783   // Adds the return value shadow.
2784   if (!FT->getReturnType()->isVoidTy()) {
2785     if (!DFSF.LabelReturnAlloca) {
2786       DFSF.LabelReturnAlloca = new AllocaInst(
2787           DFSF.DFS.PrimitiveShadowTy, getDataLayout().getAllocaAddrSpace(),
2788           "labelreturn", &DFSF.F->getEntryBlock().front());
2789     }
2790     Args.push_back(DFSF.LabelReturnAlloca);
2791   }
2792 }
2793 
2794 void DFSanVisitor::addOriginArguments(Function &F, CallBase &CB,
2795                                       std::vector<Value *> &Args,
2796                                       IRBuilder<> &IRB) {
2797   FunctionType *FT = F.getFunctionType();
2798 
2799   auto *I = CB.arg_begin();
2800 
2801   // Add non-variable argument origins.
2802   for (unsigned N = FT->getNumParams(); N != 0; ++I, --N)
2803     Args.push_back(DFSF.getOrigin(*I));
2804 
2805   // Add variable argument origins.
2806   if (FT->isVarArg()) {
2807     auto *OriginVATy =
2808         ArrayType::get(DFSF.DFS.OriginTy, CB.arg_size() - FT->getNumParams());
2809     auto *OriginVAAlloca =
2810         new AllocaInst(OriginVATy, getDataLayout().getAllocaAddrSpace(),
2811                        "originva", &DFSF.F->getEntryBlock().front());
2812 
2813     for (unsigned N = 0; I != CB.arg_end(); ++I, ++N) {
2814       auto *OriginVAPtr = IRB.CreateStructGEP(OriginVATy, OriginVAAlloca, N);
2815       IRB.CreateStore(DFSF.getOrigin(*I), OriginVAPtr);
2816     }
2817 
2818     Args.push_back(IRB.CreateStructGEP(OriginVATy, OriginVAAlloca, 0));
2819   }
2820 
2821   // Add the return value origin.
2822   if (!FT->getReturnType()->isVoidTy()) {
2823     if (!DFSF.OriginReturnAlloca) {
2824       DFSF.OriginReturnAlloca = new AllocaInst(
2825           DFSF.DFS.OriginTy, getDataLayout().getAllocaAddrSpace(),
2826           "originreturn", &DFSF.F->getEntryBlock().front());
2827     }
2828     Args.push_back(DFSF.OriginReturnAlloca);
2829   }
2830 }
2831 
2832 bool DFSanVisitor::visitWrappedCallBase(Function &F, CallBase &CB) {
2833   IRBuilder<> IRB(&CB);
2834   switch (DFSF.DFS.getWrapperKind(&F)) {
2835   case DataFlowSanitizer::WK_Warning:
2836     CB.setCalledFunction(&F);
2837     IRB.CreateCall(DFSF.DFS.DFSanUnimplementedFn,
2838                    IRB.CreateGlobalStringPtr(F.getName()));
2839     DFSF.setShadow(&CB, DFSF.DFS.getZeroShadow(&CB));
2840     DFSF.setOrigin(&CB, DFSF.DFS.ZeroOrigin);
2841     return true;
2842   case DataFlowSanitizer::WK_Discard:
2843     CB.setCalledFunction(&F);
2844     DFSF.setShadow(&CB, DFSF.DFS.getZeroShadow(&CB));
2845     DFSF.setOrigin(&CB, DFSF.DFS.ZeroOrigin);
2846     return true;
2847   case DataFlowSanitizer::WK_Functional:
2848     CB.setCalledFunction(&F);
2849     visitInstOperands(CB);
2850     return true;
2851   case DataFlowSanitizer::WK_Custom:
2852     // Don't try to handle invokes of custom functions, it's too complicated.
2853     // Instead, invoke the dfsw$ wrapper, which will in turn call the __dfsw_
2854     // wrapper.
2855     CallInst *CI = dyn_cast<CallInst>(&CB);
2856     if (!CI)
2857       return false;
2858 
2859     const bool ShouldTrackOrigins = DFSF.DFS.shouldTrackOrigins();
2860     FunctionType *FT = F.getFunctionType();
2861     TransformedFunction CustomFn = DFSF.DFS.getCustomFunctionType(FT);
2862     std::string CustomFName = ShouldTrackOrigins ? "__dfso_" : "__dfsw_";
2863     CustomFName += F.getName();
2864     FunctionCallee CustomF = DFSF.DFS.Mod->getOrInsertFunction(
2865         CustomFName, CustomFn.TransformedType);
2866     if (Function *CustomFn = dyn_cast<Function>(CustomF.getCallee())) {
2867       CustomFn->copyAttributesFrom(&F);
2868 
2869       // Custom functions returning non-void will write to the return label.
2870       if (!FT->getReturnType()->isVoidTy()) {
2871         CustomFn->removeAttributes(AttributeList::FunctionIndex,
2872                                    DFSF.DFS.ReadOnlyNoneAttrs);
2873       }
2874     }
2875 
2876     std::vector<Value *> Args;
2877 
2878     // Adds non-variable arguments.
2879     auto *I = CB.arg_begin();
2880     for (unsigned N = FT->getNumParams(); N != 0; ++I, --N) {
2881       Type *T = (*I)->getType();
2882       FunctionType *ParamFT;
2883       if (isa<PointerType>(T) &&
2884           (ParamFT = dyn_cast<FunctionType>(T->getPointerElementType()))) {
2885         std::string TName = "dfst";
2886         TName += utostr(FT->getNumParams() - N);
2887         TName += "$";
2888         TName += F.getName();
2889         Constant *T = DFSF.DFS.getOrBuildTrampolineFunction(ParamFT, TName);
2890         Args.push_back(T);
2891         Args.push_back(
2892             IRB.CreateBitCast(*I, Type::getInt8PtrTy(*DFSF.DFS.Ctx)));
2893       } else {
2894         Args.push_back(*I);
2895       }
2896     }
2897 
2898     // Adds shadow arguments.
2899     const unsigned ShadowArgStart = Args.size();
2900     addShadowArguments(F, CB, Args, IRB);
2901 
2902     // Adds origin arguments.
2903     const unsigned OriginArgStart = Args.size();
2904     if (ShouldTrackOrigins)
2905       addOriginArguments(F, CB, Args, IRB);
2906 
2907     // Adds variable arguments.
2908     append_range(Args, drop_begin(CB.args(), FT->getNumParams()));
2909 
2910     CallInst *CustomCI = IRB.CreateCall(CustomF, Args);
2911     CustomCI->setCallingConv(CI->getCallingConv());
2912     CustomCI->setAttributes(transformFunctionAttributes(
2913         CustomFn, CI->getContext(), CI->getAttributes()));
2914 
2915     // Update the parameter attributes of the custom call instruction to
2916     // zero extend the shadow parameters. This is required for targets
2917     // which consider PrimitiveShadowTy an illegal type.
2918     for (unsigned N = 0; N < FT->getNumParams(); N++) {
2919       const unsigned ArgNo = ShadowArgStart + N;
2920       if (CustomCI->getArgOperand(ArgNo)->getType() ==
2921           DFSF.DFS.PrimitiveShadowTy)
2922         CustomCI->addParamAttr(ArgNo, Attribute::ZExt);
2923       if (ShouldTrackOrigins) {
2924         const unsigned OriginArgNo = OriginArgStart + N;
2925         if (CustomCI->getArgOperand(OriginArgNo)->getType() ==
2926             DFSF.DFS.OriginTy)
2927           CustomCI->addParamAttr(OriginArgNo, Attribute::ZExt);
2928       }
2929     }
2930 
2931     // Loads the return value shadow and origin.
2932     if (!FT->getReturnType()->isVoidTy()) {
2933       LoadInst *LabelLoad =
2934           IRB.CreateLoad(DFSF.DFS.PrimitiveShadowTy, DFSF.LabelReturnAlloca);
2935       DFSF.setShadow(CustomCI, DFSF.expandFromPrimitiveShadow(
2936                                    FT->getReturnType(), LabelLoad, &CB));
2937       if (ShouldTrackOrigins) {
2938         LoadInst *OriginLoad =
2939             IRB.CreateLoad(DFSF.DFS.OriginTy, DFSF.OriginReturnAlloca);
2940         DFSF.setOrigin(CustomCI, OriginLoad);
2941       }
2942     }
2943 
2944     CI->replaceAllUsesWith(CustomCI);
2945     CI->eraseFromParent();
2946     return true;
2947   }
2948   return false;
2949 }
2950 
2951 void DFSanVisitor::visitCallBase(CallBase &CB) {
2952   Function *F = CB.getCalledFunction();
2953   if ((F && F->isIntrinsic()) || CB.isInlineAsm()) {
2954     visitInstOperands(CB);
2955     return;
2956   }
2957 
2958   // Calls to this function are synthesized in wrappers, and we shouldn't
2959   // instrument them.
2960   if (F == DFSF.DFS.DFSanVarargWrapperFn.getCallee()->stripPointerCasts())
2961     return;
2962 
2963   DenseMap<Value *, Function *>::iterator UnwrappedFnIt =
2964       DFSF.DFS.UnwrappedFnMap.find(CB.getCalledOperand());
2965   if (UnwrappedFnIt != DFSF.DFS.UnwrappedFnMap.end())
2966     if (visitWrappedCallBase(*UnwrappedFnIt->second, CB))
2967       return;
2968 
2969   IRBuilder<> IRB(&CB);
2970 
2971   const bool ShouldTrackOrigins = DFSF.DFS.shouldTrackOrigins();
2972   FunctionType *FT = CB.getFunctionType();
2973   if (DFSF.DFS.getInstrumentedABI() == DataFlowSanitizer::IA_TLS) {
2974     // Stores argument shadows.
2975     unsigned ArgOffset = 0;
2976     const DataLayout &DL = getDataLayout();
2977     for (unsigned I = 0, N = FT->getNumParams(); I != N; ++I) {
2978       if (ShouldTrackOrigins) {
2979         // Ignore overflowed origins
2980         Value *ArgShadow = DFSF.getShadow(CB.getArgOperand(I));
2981         if (I < DFSF.DFS.NumOfElementsInArgOrgTLS &&
2982             !DFSF.DFS.isZeroShadow(ArgShadow))
2983           IRB.CreateStore(DFSF.getOrigin(CB.getArgOperand(I)),
2984                           DFSF.getArgOriginTLS(I, IRB));
2985       }
2986 
2987       unsigned Size =
2988           DL.getTypeAllocSize(DFSF.DFS.getShadowTy(FT->getParamType(I)));
2989       // Stop storing if arguments' size overflows. Inside a function, arguments
2990       // after overflow have zero shadow values.
2991       if (ArgOffset + Size > ArgTLSSize)
2992         break;
2993       IRB.CreateAlignedStore(
2994           DFSF.getShadow(CB.getArgOperand(I)),
2995           DFSF.getArgTLS(FT->getParamType(I), ArgOffset, IRB),
2996           ShadowTLSAlignment);
2997       ArgOffset += alignTo(Size, ShadowTLSAlignment);
2998     }
2999   }
3000 
3001   Instruction *Next = nullptr;
3002   if (!CB.getType()->isVoidTy()) {
3003     if (InvokeInst *II = dyn_cast<InvokeInst>(&CB)) {
3004       if (II->getNormalDest()->getSinglePredecessor()) {
3005         Next = &II->getNormalDest()->front();
3006       } else {
3007         BasicBlock *NewBB =
3008             SplitEdge(II->getParent(), II->getNormalDest(), &DFSF.DT);
3009         Next = &NewBB->front();
3010       }
3011     } else {
3012       assert(CB.getIterator() != CB.getParent()->end());
3013       Next = CB.getNextNode();
3014     }
3015 
3016     if (DFSF.DFS.getInstrumentedABI() == DataFlowSanitizer::IA_TLS) {
3017       // Loads the return value shadow.
3018       IRBuilder<> NextIRB(Next);
3019       const DataLayout &DL = getDataLayout();
3020       unsigned Size = DL.getTypeAllocSize(DFSF.DFS.getShadowTy(&CB));
3021       if (Size > RetvalTLSSize) {
3022         // Set overflowed return shadow to be zero.
3023         DFSF.setShadow(&CB, DFSF.DFS.getZeroShadow(&CB));
3024       } else {
3025         LoadInst *LI = NextIRB.CreateAlignedLoad(
3026             DFSF.DFS.getShadowTy(&CB), DFSF.getRetvalTLS(CB.getType(), NextIRB),
3027             ShadowTLSAlignment, "_dfsret");
3028         DFSF.SkipInsts.insert(LI);
3029         DFSF.setShadow(&CB, LI);
3030         DFSF.NonZeroChecks.push_back(LI);
3031       }
3032 
3033       if (ShouldTrackOrigins) {
3034         LoadInst *LI = NextIRB.CreateLoad(
3035             DFSF.DFS.OriginTy, DFSF.getRetvalOriginTLS(), "_dfsret_o");
3036         DFSF.SkipInsts.insert(LI);
3037         DFSF.setOrigin(&CB, LI);
3038       }
3039     }
3040   }
3041 
3042   // Do all instrumentation for IA_Args down here to defer tampering with the
3043   // CFG in a way that SplitEdge may be able to detect.
3044   if (DFSF.DFS.getInstrumentedABI() == DataFlowSanitizer::IA_Args) {
3045     FunctionType *NewFT = DFSF.DFS.getArgsFunctionType(FT);
3046     Value *Func =
3047         IRB.CreateBitCast(CB.getCalledOperand(), PointerType::getUnqual(NewFT));
3048 
3049     const unsigned NumParams = FT->getNumParams();
3050 
3051     // Copy original arguments.
3052     auto *ArgIt = CB.arg_begin(), *ArgEnd = CB.arg_end();
3053     std::vector<Value *> Args(NumParams);
3054     std::copy_n(ArgIt, NumParams, Args.begin());
3055 
3056     // Add shadow arguments by transforming original arguments.
3057     std::generate_n(std::back_inserter(Args), NumParams,
3058                     [&]() { return DFSF.getShadow(*ArgIt++); });
3059 
3060     if (FT->isVarArg()) {
3061       unsigned VarArgSize = CB.arg_size() - NumParams;
3062       ArrayType *VarArgArrayTy =
3063           ArrayType::get(DFSF.DFS.PrimitiveShadowTy, VarArgSize);
3064       AllocaInst *VarArgShadow =
3065           new AllocaInst(VarArgArrayTy, getDataLayout().getAllocaAddrSpace(),
3066                          "", &DFSF.F->getEntryBlock().front());
3067       Args.push_back(IRB.CreateConstGEP2_32(VarArgArrayTy, VarArgShadow, 0, 0));
3068 
3069       // Copy remaining var args.
3070       unsigned GepIndex = 0;
3071       std::for_each(ArgIt, ArgEnd, [&](Value *Arg) {
3072         IRB.CreateStore(
3073             DFSF.getShadow(Arg),
3074             IRB.CreateConstGEP2_32(VarArgArrayTy, VarArgShadow, 0, GepIndex++));
3075         Args.push_back(Arg);
3076       });
3077     }
3078 
3079     CallBase *NewCB;
3080     if (InvokeInst *II = dyn_cast<InvokeInst>(&CB)) {
3081       NewCB = IRB.CreateInvoke(NewFT, Func, II->getNormalDest(),
3082                                II->getUnwindDest(), Args);
3083     } else {
3084       NewCB = IRB.CreateCall(NewFT, Func, Args);
3085     }
3086     NewCB->setCallingConv(CB.getCallingConv());
3087     NewCB->setAttributes(CB.getAttributes().removeAttributes(
3088         *DFSF.DFS.Ctx, AttributeList::ReturnIndex,
3089         AttributeFuncs::typeIncompatible(NewCB->getType())));
3090 
3091     if (Next) {
3092       ExtractValueInst *ExVal = ExtractValueInst::Create(NewCB, 0, "", Next);
3093       DFSF.SkipInsts.insert(ExVal);
3094       ExtractValueInst *ExShadow = ExtractValueInst::Create(NewCB, 1, "", Next);
3095       DFSF.SkipInsts.insert(ExShadow);
3096       DFSF.setShadow(ExVal, ExShadow);
3097       DFSF.NonZeroChecks.push_back(ExShadow);
3098 
3099       CB.replaceAllUsesWith(ExVal);
3100     }
3101 
3102     CB.eraseFromParent();
3103   }
3104 }
3105 
3106 void DFSanVisitor::visitPHINode(PHINode &PN) {
3107   Type *ShadowTy = DFSF.DFS.getShadowTy(&PN);
3108   PHINode *ShadowPN =
3109       PHINode::Create(ShadowTy, PN.getNumIncomingValues(), "", &PN);
3110 
3111   // Give the shadow phi node valid predecessors to fool SplitEdge into working.
3112   Value *UndefShadow = UndefValue::get(ShadowTy);
3113   for (BasicBlock *BB : PN.blocks())
3114     ShadowPN->addIncoming(UndefShadow, BB);
3115 
3116   DFSF.PHIFixups.push_back(std::make_pair(&PN, ShadowPN));
3117   DFSF.setShadow(&PN, ShadowPN);
3118 }
3119 
3120 namespace {
3121 class DataFlowSanitizerLegacyPass : public ModulePass {
3122 private:
3123   std::vector<std::string> ABIListFiles;
3124 
3125 public:
3126   static char ID;
3127 
3128   DataFlowSanitizerLegacyPass(
3129       const std::vector<std::string> &ABIListFiles = std::vector<std::string>())
3130       : ModulePass(ID), ABIListFiles(ABIListFiles) {}
3131 
3132   bool runOnModule(Module &M) override {
3133     return DataFlowSanitizer(ABIListFiles).runImpl(M);
3134   }
3135 };
3136 } // namespace
3137 
3138 char DataFlowSanitizerLegacyPass::ID;
3139 
3140 INITIALIZE_PASS(DataFlowSanitizerLegacyPass, "dfsan",
3141                 "DataFlowSanitizer: dynamic data flow analysis.", false, false)
3142 
3143 ModulePass *llvm::createDataFlowSanitizerLegacyPassPass(
3144     const std::vector<std::string> &ABIListFiles) {
3145   return new DataFlowSanitizerLegacyPass(ABIListFiles);
3146 }
3147 
3148 PreservedAnalyses DataFlowSanitizerPass::run(Module &M,
3149                                              ModuleAnalysisManager &AM) {
3150   if (DataFlowSanitizer(ABIListFiles).runImpl(M)) {
3151     return PreservedAnalyses::none();
3152   }
3153   return PreservedAnalyses::all();
3154 }
3155