1 //===- DataFlowSanitizer.cpp - dynamic data flow analysis -----------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 /// \file 10 /// This file is a part of DataFlowSanitizer, a generalised dynamic data flow 11 /// analysis. 12 /// 13 /// Unlike other Sanitizer tools, this tool is not designed to detect a specific 14 /// class of bugs on its own. Instead, it provides a generic dynamic data flow 15 /// analysis framework to be used by clients to help detect application-specific 16 /// issues within their own code. 17 /// 18 /// The analysis is based on automatic propagation of data flow labels (also 19 /// known as taint labels) through a program as it performs computation. 20 /// 21 /// There are two possible memory layouts. In the first one, each byte of 22 /// application memory is backed by a shadow memory byte. The shadow byte can 23 /// represent up to 8 labels. To enable this you must specify the 24 /// -dfsan-fast-8-labels flag. On Linux/x86_64, memory is then laid out as 25 /// follows: 26 /// 27 /// +--------------------+ 0x800000000000 (top of memory) 28 /// | application memory | 29 /// +--------------------+ 0x700000008000 (kAppAddr) 30 /// | | 31 /// | unused | 32 /// | | 33 /// +--------------------+ 0x300200000000 (kUnusedAddr) 34 /// | union table | 35 /// +--------------------+ 0x300000000000 (kUnionTableAddr) 36 /// | origin | 37 /// +--------------------+ 0x200000008000 (kOriginAddr) 38 /// | shadow memory | 39 /// +--------------------+ 0x100000008000 (kShadowAddr) 40 /// | unused | 41 /// +--------------------+ 0x000000010000 42 /// | reserved by kernel | 43 /// +--------------------+ 0x000000000000 44 /// 45 /// 46 /// In the second memory layout, each byte of application memory is backed by 47 /// two bytes of shadow memory which hold the label. That means we can represent 48 /// either 16 labels (with -dfsan-fast-16-labels flag) or 2^16 labels (on the 49 /// default legacy mode) per byte. On Linux/x86_64, memory is then laid out as 50 /// follows: 51 /// 52 /// +--------------------+ 0x800000000000 (top of memory) 53 /// | application memory | 54 /// +--------------------+ 0x700000008000 (kAppAddr) 55 /// | | 56 /// | unused | 57 /// | | 58 /// +--------------------+ 0x300200000000 (kUnusedAddr) 59 /// | union table | 60 /// +--------------------+ 0x300000000000 (kUnionTableAddr) 61 /// | origin | 62 /// +--------------------+ 0x200000008000 (kOriginAddr) 63 /// | shadow memory | 64 /// +--------------------+ 0x000000010000 (kShadowAddr) 65 /// | reserved by kernel | 66 /// +--------------------+ 0x000000000000 67 /// 68 /// 69 /// To derive a shadow memory address from an application memory address, 70 /// bits 44-46 are cleared to bring the address into the range 71 /// [0x000000008000,0x100000000000). Then the address is shifted left by 1 to 72 /// account for the double byte representation of shadow labels and move the 73 /// address into the shadow memory range. See the function 74 /// DataFlowSanitizer::getShadowAddress below. 75 /// 76 /// For more information, please refer to the design document: 77 /// http://clang.llvm.org/docs/DataFlowSanitizerDesign.html 78 // 79 //===----------------------------------------------------------------------===// 80 81 #include "llvm/Transforms/Instrumentation/DataFlowSanitizer.h" 82 #include "llvm/ADT/DenseMap.h" 83 #include "llvm/ADT/DenseSet.h" 84 #include "llvm/ADT/DepthFirstIterator.h" 85 #include "llvm/ADT/None.h" 86 #include "llvm/ADT/SmallPtrSet.h" 87 #include "llvm/ADT/SmallVector.h" 88 #include "llvm/ADT/StringExtras.h" 89 #include "llvm/ADT/StringRef.h" 90 #include "llvm/ADT/Triple.h" 91 #include "llvm/ADT/iterator.h" 92 #include "llvm/Analysis/ValueTracking.h" 93 #include "llvm/IR/Argument.h" 94 #include "llvm/IR/Attributes.h" 95 #include "llvm/IR/BasicBlock.h" 96 #include "llvm/IR/Constant.h" 97 #include "llvm/IR/Constants.h" 98 #include "llvm/IR/DataLayout.h" 99 #include "llvm/IR/DerivedTypes.h" 100 #include "llvm/IR/Dominators.h" 101 #include "llvm/IR/Function.h" 102 #include "llvm/IR/GlobalAlias.h" 103 #include "llvm/IR/GlobalValue.h" 104 #include "llvm/IR/GlobalVariable.h" 105 #include "llvm/IR/IRBuilder.h" 106 #include "llvm/IR/InlineAsm.h" 107 #include "llvm/IR/InstVisitor.h" 108 #include "llvm/IR/InstrTypes.h" 109 #include "llvm/IR/Instruction.h" 110 #include "llvm/IR/Instructions.h" 111 #include "llvm/IR/IntrinsicInst.h" 112 #include "llvm/IR/LLVMContext.h" 113 #include "llvm/IR/MDBuilder.h" 114 #include "llvm/IR/Module.h" 115 #include "llvm/IR/PassManager.h" 116 #include "llvm/IR/Type.h" 117 #include "llvm/IR/User.h" 118 #include "llvm/IR/Value.h" 119 #include "llvm/InitializePasses.h" 120 #include "llvm/Pass.h" 121 #include "llvm/Support/Alignment.h" 122 #include "llvm/Support/Casting.h" 123 #include "llvm/Support/CommandLine.h" 124 #include "llvm/Support/ErrorHandling.h" 125 #include "llvm/Support/SpecialCaseList.h" 126 #include "llvm/Support/VirtualFileSystem.h" 127 #include "llvm/Transforms/Instrumentation.h" 128 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 129 #include "llvm/Transforms/Utils/Local.h" 130 #include <algorithm> 131 #include <cassert> 132 #include <cstddef> 133 #include <cstdint> 134 #include <iterator> 135 #include <memory> 136 #include <set> 137 #include <string> 138 #include <utility> 139 #include <vector> 140 141 using namespace llvm; 142 143 // This must be consistent with ShadowWidthBits. 144 static const Align ShadowTLSAlignment = Align(2); 145 146 static const Align MinOriginAlignment = Align(4); 147 148 // The size of TLS variables. These constants must be kept in sync with the ones 149 // in dfsan.cpp. 150 static const unsigned ArgTLSSize = 800; 151 static const unsigned RetvalTLSSize = 800; 152 153 // External symbol to be used when generating the shadow address for 154 // architectures with multiple VMAs. Instead of using a constant integer 155 // the runtime will set the external mask based on the VMA range. 156 const char DFSanExternShadowPtrMask[] = "__dfsan_shadow_ptr_mask"; 157 158 // The -dfsan-preserve-alignment flag controls whether this pass assumes that 159 // alignment requirements provided by the input IR are correct. For example, 160 // if the input IR contains a load with alignment 8, this flag will cause 161 // the shadow load to have alignment 16. This flag is disabled by default as 162 // we have unfortunately encountered too much code (including Clang itself; 163 // see PR14291) which performs misaligned access. 164 static cl::opt<bool> ClPreserveAlignment( 165 "dfsan-preserve-alignment", 166 cl::desc("respect alignment requirements provided by input IR"), cl::Hidden, 167 cl::init(false)); 168 169 // The ABI list files control how shadow parameters are passed. The pass treats 170 // every function labelled "uninstrumented" in the ABI list file as conforming 171 // to the "native" (i.e. unsanitized) ABI. Unless the ABI list contains 172 // additional annotations for those functions, a call to one of those functions 173 // will produce a warning message, as the labelling behaviour of the function is 174 // unknown. The other supported annotations are "functional" and "discard", 175 // which are described below under DataFlowSanitizer::WrapperKind. 176 static cl::list<std::string> ClABIListFiles( 177 "dfsan-abilist", 178 cl::desc("File listing native ABI functions and how the pass treats them"), 179 cl::Hidden); 180 181 // Controls whether the pass uses IA_Args or IA_TLS as the ABI for instrumented 182 // functions (see DataFlowSanitizer::InstrumentedABI below). 183 static cl::opt<bool> 184 ClArgsABI("dfsan-args-abi", 185 cl::desc("Use the argument ABI rather than the TLS ABI"), 186 cl::Hidden); 187 188 // Controls whether the pass includes or ignores the labels of pointers in load 189 // instructions. 190 static cl::opt<bool> ClCombinePointerLabelsOnLoad( 191 "dfsan-combine-pointer-labels-on-load", 192 cl::desc("Combine the label of the pointer with the label of the data when " 193 "loading from memory."), 194 cl::Hidden, cl::init(true)); 195 196 // Controls whether the pass includes or ignores the labels of pointers in 197 // stores instructions. 198 static cl::opt<bool> ClCombinePointerLabelsOnStore( 199 "dfsan-combine-pointer-labels-on-store", 200 cl::desc("Combine the label of the pointer with the label of the data when " 201 "storing in memory."), 202 cl::Hidden, cl::init(false)); 203 204 // Controls whether the pass propagates labels of offsets in GEP instructions. 205 static cl::opt<bool> ClCombineOffsetLabelsOnGEP( 206 "dfsan-combine-offset-labels-on-gep", 207 cl::desc( 208 "Combine the label of the offset with the label of the pointer when " 209 "doing pointer arithmetic."), 210 cl::Hidden, cl::init(true)); 211 212 static cl::opt<bool> ClDebugNonzeroLabels( 213 "dfsan-debug-nonzero-labels", 214 cl::desc("Insert calls to __dfsan_nonzero_label on observing a parameter, " 215 "load or return with a nonzero label"), 216 cl::Hidden); 217 218 // Experimental feature that inserts callbacks for certain data events. 219 // Currently callbacks are only inserted for loads, stores, memory transfers 220 // (i.e. memcpy and memmove), and comparisons. 221 // 222 // If this flag is set to true, the user must provide definitions for the 223 // following callback functions: 224 // void __dfsan_load_callback(dfsan_label Label, void* addr); 225 // void __dfsan_store_callback(dfsan_label Label, void* addr); 226 // void __dfsan_mem_transfer_callback(dfsan_label *Start, size_t Len); 227 // void __dfsan_cmp_callback(dfsan_label CombinedLabel); 228 static cl::opt<bool> ClEventCallbacks( 229 "dfsan-event-callbacks", 230 cl::desc("Insert calls to __dfsan_*_callback functions on data events."), 231 cl::Hidden, cl::init(false)); 232 233 // Use a distinct bit for each base label, enabling faster unions with less 234 // instrumentation. Limits the max number of base labels to 16. 235 static cl::opt<bool> ClFast16Labels( 236 "dfsan-fast-16-labels", 237 cl::desc("Use more efficient instrumentation, limiting the number of " 238 "labels to 16."), 239 cl::Hidden, cl::init(false)); 240 241 // Use a distinct bit for each base label, enabling faster unions with less 242 // instrumentation. Limits the max number of base labels to 8. 243 static cl::opt<bool> ClFast8Labels( 244 "dfsan-fast-8-labels", 245 cl::desc("Use more efficient instrumentation, limiting the number of " 246 "labels to 8."), 247 cl::Hidden, cl::init(false)); 248 249 // Controls whether the pass tracks the control flow of select instructions. 250 static cl::opt<bool> ClTrackSelectControlFlow( 251 "dfsan-track-select-control-flow", 252 cl::desc("Propagate labels from condition values of select instructions " 253 "to results."), 254 cl::Hidden, cl::init(true)); 255 256 // TODO: This default value follows MSan. DFSan may use a different value. 257 static cl::opt<int> ClInstrumentWithCallThreshold( 258 "dfsan-instrument-with-call-threshold", 259 cl::desc("If the function being instrumented requires more than " 260 "this number of origin stores, use callbacks instead of " 261 "inline checks (-1 means never use callbacks)."), 262 cl::Hidden, cl::init(3500)); 263 264 // Controls how to track origins. 265 // * 0: do not track origins. 266 // * 1: track origins at memory store operations. 267 // * 2: track origins at memory load and store operations. 268 // TODO: track callsites. 269 static cl::opt<int> ClTrackOrigins("dfsan-track-origins", 270 cl::desc("Track origins of labels"), 271 cl::Hidden, cl::init(0)); 272 273 static StringRef getGlobalTypeString(const GlobalValue &G) { 274 // Types of GlobalVariables are always pointer types. 275 Type *GType = G.getValueType(); 276 // For now we support excluding struct types only. 277 if (StructType *SGType = dyn_cast<StructType>(GType)) { 278 if (!SGType->isLiteral()) 279 return SGType->getName(); 280 } 281 return "<unknown type>"; 282 } 283 284 namespace { 285 286 class DFSanABIList { 287 std::unique_ptr<SpecialCaseList> SCL; 288 289 public: 290 DFSanABIList() = default; 291 292 void set(std::unique_ptr<SpecialCaseList> List) { SCL = std::move(List); } 293 294 /// Returns whether either this function or its source file are listed in the 295 /// given category. 296 bool isIn(const Function &F, StringRef Category) const { 297 return isIn(*F.getParent(), Category) || 298 SCL->inSection("dataflow", "fun", F.getName(), Category); 299 } 300 301 /// Returns whether this global alias is listed in the given category. 302 /// 303 /// If GA aliases a function, the alias's name is matched as a function name 304 /// would be. Similarly, aliases of globals are matched like globals. 305 bool isIn(const GlobalAlias &GA, StringRef Category) const { 306 if (isIn(*GA.getParent(), Category)) 307 return true; 308 309 if (isa<FunctionType>(GA.getValueType())) 310 return SCL->inSection("dataflow", "fun", GA.getName(), Category); 311 312 return SCL->inSection("dataflow", "global", GA.getName(), Category) || 313 SCL->inSection("dataflow", "type", getGlobalTypeString(GA), 314 Category); 315 } 316 317 /// Returns whether this module is listed in the given category. 318 bool isIn(const Module &M, StringRef Category) const { 319 return SCL->inSection("dataflow", "src", M.getModuleIdentifier(), Category); 320 } 321 }; 322 323 /// TransformedFunction is used to express the result of transforming one 324 /// function type into another. This struct is immutable. It holds metadata 325 /// useful for updating calls of the old function to the new type. 326 struct TransformedFunction { 327 TransformedFunction(FunctionType *OriginalType, FunctionType *TransformedType, 328 std::vector<unsigned> ArgumentIndexMapping) 329 : OriginalType(OriginalType), TransformedType(TransformedType), 330 ArgumentIndexMapping(ArgumentIndexMapping) {} 331 332 // Disallow copies. 333 TransformedFunction(const TransformedFunction &) = delete; 334 TransformedFunction &operator=(const TransformedFunction &) = delete; 335 336 // Allow moves. 337 TransformedFunction(TransformedFunction &&) = default; 338 TransformedFunction &operator=(TransformedFunction &&) = default; 339 340 /// Type of the function before the transformation. 341 FunctionType *OriginalType; 342 343 /// Type of the function after the transformation. 344 FunctionType *TransformedType; 345 346 /// Transforming a function may change the position of arguments. This 347 /// member records the mapping from each argument's old position to its new 348 /// position. Argument positions are zero-indexed. If the transformation 349 /// from F to F' made the first argument of F into the third argument of F', 350 /// then ArgumentIndexMapping[0] will equal 2. 351 std::vector<unsigned> ArgumentIndexMapping; 352 }; 353 354 /// Given function attributes from a call site for the original function, 355 /// return function attributes appropriate for a call to the transformed 356 /// function. 357 AttributeList 358 transformFunctionAttributes(const TransformedFunction &TransformedFunction, 359 LLVMContext &Ctx, AttributeList CallSiteAttrs) { 360 361 // Construct a vector of AttributeSet for each function argument. 362 std::vector<llvm::AttributeSet> ArgumentAttributes( 363 TransformedFunction.TransformedType->getNumParams()); 364 365 // Copy attributes from the parameter of the original function to the 366 // transformed version. 'ArgumentIndexMapping' holds the mapping from 367 // old argument position to new. 368 for (unsigned I = 0, IE = TransformedFunction.ArgumentIndexMapping.size(); 369 I < IE; ++I) { 370 unsigned TransformedIndex = TransformedFunction.ArgumentIndexMapping[I]; 371 ArgumentAttributes[TransformedIndex] = CallSiteAttrs.getParamAttributes(I); 372 } 373 374 // Copy annotations on varargs arguments. 375 for (unsigned I = TransformedFunction.OriginalType->getNumParams(), 376 IE = CallSiteAttrs.getNumAttrSets(); 377 I < IE; ++I) { 378 ArgumentAttributes.push_back(CallSiteAttrs.getParamAttributes(I)); 379 } 380 381 return AttributeList::get(Ctx, CallSiteAttrs.getFnAttributes(), 382 CallSiteAttrs.getRetAttributes(), 383 llvm::makeArrayRef(ArgumentAttributes)); 384 } 385 386 class DataFlowSanitizer { 387 friend struct DFSanFunction; 388 friend class DFSanVisitor; 389 390 enum { 391 OriginWidthBits = 32, 392 OriginWidthBytes = OriginWidthBits / 8 393 }; 394 395 /// Which ABI should be used for instrumented functions? 396 enum InstrumentedABI { 397 /// Argument and return value labels are passed through additional 398 /// arguments and by modifying the return type. 399 IA_Args, 400 401 /// Argument and return value labels are passed through TLS variables 402 /// __dfsan_arg_tls and __dfsan_retval_tls. 403 IA_TLS 404 }; 405 406 /// How should calls to uninstrumented functions be handled? 407 enum WrapperKind { 408 /// This function is present in an uninstrumented form but we don't know 409 /// how it should be handled. Print a warning and call the function anyway. 410 /// Don't label the return value. 411 WK_Warning, 412 413 /// This function does not write to (user-accessible) memory, and its return 414 /// value is unlabelled. 415 WK_Discard, 416 417 /// This function does not write to (user-accessible) memory, and the label 418 /// of its return value is the union of the label of its arguments. 419 WK_Functional, 420 421 /// Instead of calling the function, a custom wrapper __dfsw_F is called, 422 /// where F is the name of the function. This function may wrap the 423 /// original function or provide its own implementation. This is similar to 424 /// the IA_Args ABI, except that IA_Args uses a struct return type to 425 /// pass the return value shadow in a register, while WK_Custom uses an 426 /// extra pointer argument to return the shadow. This allows the wrapped 427 /// form of the function type to be expressed in C. 428 WK_Custom 429 }; 430 431 unsigned ShadowWidthBits; 432 unsigned ShadowWidthBytes; 433 434 Module *Mod; 435 LLVMContext *Ctx; 436 Type *Int8Ptr; 437 IntegerType *OriginTy; 438 PointerType *OriginPtrTy; 439 ConstantInt *OriginBase; 440 ConstantInt *ZeroOrigin; 441 /// The shadow type for all primitive types and vector types. 442 IntegerType *PrimitiveShadowTy; 443 PointerType *PrimitiveShadowPtrTy; 444 IntegerType *IntptrTy; 445 ConstantInt *ZeroPrimitiveShadow; 446 ConstantInt *ShadowPtrMask; 447 ConstantInt *ShadowPtrMul; 448 Constant *ArgTLS; 449 ArrayType *ArgOriginTLSTy; 450 Constant *ArgOriginTLS; 451 Constant *RetvalTLS; 452 Constant *RetvalOriginTLS; 453 Constant *ExternalShadowMask; 454 FunctionType *DFSanUnionFnTy; 455 FunctionType *DFSanUnionLoadFnTy; 456 FunctionType *DFSanLoadLabelAndOriginFnTy; 457 FunctionType *DFSanUnimplementedFnTy; 458 FunctionType *DFSanSetLabelFnTy; 459 FunctionType *DFSanNonzeroLabelFnTy; 460 FunctionType *DFSanVarargWrapperFnTy; 461 FunctionType *DFSanCmpCallbackFnTy; 462 FunctionType *DFSanLoadStoreCallbackFnTy; 463 FunctionType *DFSanMemTransferCallbackFnTy; 464 FunctionType *DFSanChainOriginFnTy; 465 FunctionType *DFSanChainOriginIfTaintedFnTy; 466 FunctionType *DFSanMemOriginTransferFnTy; 467 FunctionType *DFSanMaybeStoreOriginFnTy; 468 FunctionCallee DFSanUnionFn; 469 FunctionCallee DFSanCheckedUnionFn; 470 FunctionCallee DFSanUnionLoadFn; 471 FunctionCallee DFSanUnionLoadFastLabelsFn; 472 FunctionCallee DFSanLoadLabelAndOriginFn; 473 FunctionCallee DFSanUnimplementedFn; 474 FunctionCallee DFSanSetLabelFn; 475 FunctionCallee DFSanNonzeroLabelFn; 476 FunctionCallee DFSanVarargWrapperFn; 477 FunctionCallee DFSanLoadCallbackFn; 478 FunctionCallee DFSanStoreCallbackFn; 479 FunctionCallee DFSanMemTransferCallbackFn; 480 FunctionCallee DFSanCmpCallbackFn; 481 FunctionCallee DFSanChainOriginFn; 482 FunctionCallee DFSanChainOriginIfTaintedFn; 483 FunctionCallee DFSanMemOriginTransferFn; 484 FunctionCallee DFSanMaybeStoreOriginFn; 485 SmallPtrSet<Value *, 16> DFSanRuntimeFunctions; 486 MDNode *ColdCallWeights; 487 MDNode *OriginStoreWeights; 488 DFSanABIList ABIList; 489 DenseMap<Value *, Function *> UnwrappedFnMap; 490 AttrBuilder ReadOnlyNoneAttrs; 491 bool DFSanRuntimeShadowMask = false; 492 493 Value *getShadowOffset(Value *Addr, IRBuilder<> &IRB); 494 Value *getShadowAddress(Value *Addr, Instruction *Pos); 495 Value *getShadowAddress(Value *Addr, Instruction *Pos, Value *ShadowOffset); 496 std::pair<Value *, Value *> 497 getShadowOriginAddress(Value *Addr, Align InstAlignment, Instruction *Pos); 498 bool isInstrumented(const Function *F); 499 bool isInstrumented(const GlobalAlias *GA); 500 FunctionType *getArgsFunctionType(FunctionType *T); 501 FunctionType *getTrampolineFunctionType(FunctionType *T); 502 TransformedFunction getCustomFunctionType(FunctionType *T); 503 InstrumentedABI getInstrumentedABI(); 504 WrapperKind getWrapperKind(Function *F); 505 void addGlobalNamePrefix(GlobalValue *GV); 506 Function *buildWrapperFunction(Function *F, StringRef NewFName, 507 GlobalValue::LinkageTypes NewFLink, 508 FunctionType *NewFT); 509 Constant *getOrBuildTrampolineFunction(FunctionType *FT, StringRef FName); 510 void initializeCallbackFunctions(Module &M); 511 void initializeRuntimeFunctions(Module &M); 512 void injectMetadataGlobals(Module &M); 513 514 bool init(Module &M); 515 516 /// Advances \p OriginAddr to point to the next 32-bit origin and then loads 517 /// from it. Returns the origin's loaded value. 518 Value *loadNextOrigin(Instruction *Pos, Align OriginAlign, 519 Value **OriginAddr); 520 521 /// Returns whether fast8 or fast16 mode has been specified. 522 bool hasFastLabelsEnabled(); 523 524 /// Returns whether the given load byte size is amenable to inlined 525 /// optimization patterns. 526 bool hasLoadSizeForFastPath(uint64_t Size); 527 528 /// Returns whether the pass tracks origins. Support only fast16 mode in TLS 529 /// ABI mode. 530 bool shouldTrackOrigins(); 531 532 /// Returns whether the pass tracks labels for struct fields and array 533 /// indices. Support only fast16 mode in TLS ABI mode. 534 bool shouldTrackFieldsAndIndices(); 535 536 /// Returns a zero constant with the shadow type of OrigTy. 537 /// 538 /// getZeroShadow({T1,T2,...}) = {getZeroShadow(T1),getZeroShadow(T2,...} 539 /// getZeroShadow([n x T]) = [n x getZeroShadow(T)] 540 /// getZeroShadow(other type) = i16(0) 541 /// 542 /// Note that a zero shadow is always i16(0) when shouldTrackFieldsAndIndices 543 /// returns false. 544 Constant *getZeroShadow(Type *OrigTy); 545 /// Returns a zero constant with the shadow type of V's type. 546 Constant *getZeroShadow(Value *V); 547 548 /// Checks if V is a zero shadow. 549 bool isZeroShadow(Value *V); 550 551 /// Returns the shadow type of OrigTy. 552 /// 553 /// getShadowTy({T1,T2,...}) = {getShadowTy(T1),getShadowTy(T2),...} 554 /// getShadowTy([n x T]) = [n x getShadowTy(T)] 555 /// getShadowTy(other type) = i16 556 /// 557 /// Note that a shadow type is always i16 when shouldTrackFieldsAndIndices 558 /// returns false. 559 Type *getShadowTy(Type *OrigTy); 560 /// Returns the shadow type of of V's type. 561 Type *getShadowTy(Value *V); 562 563 const uint64_t NumOfElementsInArgOrgTLS = ArgTLSSize / OriginWidthBytes; 564 565 public: 566 DataFlowSanitizer(const std::vector<std::string> &ABIListFiles); 567 568 bool runImpl(Module &M); 569 }; 570 571 struct DFSanFunction { 572 DataFlowSanitizer &DFS; 573 Function *F; 574 DominatorTree DT; 575 DataFlowSanitizer::InstrumentedABI IA; 576 bool IsNativeABI; 577 AllocaInst *LabelReturnAlloca = nullptr; 578 AllocaInst *OriginReturnAlloca = nullptr; 579 DenseMap<Value *, Value *> ValShadowMap; 580 DenseMap<Value *, Value *> ValOriginMap; 581 DenseMap<AllocaInst *, AllocaInst *> AllocaShadowMap; 582 DenseMap<AllocaInst *, AllocaInst *> AllocaOriginMap; 583 584 struct PHIFixupElement { 585 PHINode *Phi; 586 PHINode *ShadowPhi; 587 PHINode *OriginPhi; 588 }; 589 std::vector<PHIFixupElement> PHIFixups; 590 591 DenseSet<Instruction *> SkipInsts; 592 std::vector<Value *> NonZeroChecks; 593 bool AvoidNewBlocks; 594 595 struct CachedShadow { 596 BasicBlock *Block; // The block where Shadow is defined. 597 Value *Shadow; 598 }; 599 /// Maps a value to its latest shadow value in terms of domination tree. 600 DenseMap<std::pair<Value *, Value *>, CachedShadow> CachedShadows; 601 /// Maps a value to its latest collapsed shadow value it was converted to in 602 /// terms of domination tree. When ClDebugNonzeroLabels is on, this cache is 603 /// used at a post process where CFG blocks are split. So it does not cache 604 /// BasicBlock like CachedShadows, but uses domination between values. 605 DenseMap<Value *, Value *> CachedCollapsedShadows; 606 DenseMap<Value *, std::set<Value *>> ShadowElements; 607 608 DFSanFunction(DataFlowSanitizer &DFS, Function *F, bool IsNativeABI) 609 : DFS(DFS), F(F), IA(DFS.getInstrumentedABI()), IsNativeABI(IsNativeABI) { 610 DT.recalculate(*F); 611 // FIXME: Need to track down the register allocator issue which causes poor 612 // performance in pathological cases with large numbers of basic blocks. 613 AvoidNewBlocks = F->size() > 1000; 614 } 615 616 /// Computes the shadow address for a given function argument. 617 /// 618 /// Shadow = ArgTLS+ArgOffset. 619 Value *getArgTLS(Type *T, unsigned ArgOffset, IRBuilder<> &IRB); 620 621 /// Computes the shadow address for a return value. 622 Value *getRetvalTLS(Type *T, IRBuilder<> &IRB); 623 624 /// Computes the origin address for a given function argument. 625 /// 626 /// Origin = ArgOriginTLS[ArgNo]. 627 Value *getArgOriginTLS(unsigned ArgNo, IRBuilder<> &IRB); 628 629 /// Computes the origin address for a return value. 630 Value *getRetvalOriginTLS(); 631 632 Value *getOrigin(Value *V); 633 void setOrigin(Instruction *I, Value *Origin); 634 /// Generates IR to compute the origin of the last operand with a taint label. 635 Value *combineOperandOrigins(Instruction *Inst); 636 /// Before the instruction Pos, generates IR to compute the last origin with a 637 /// taint label. Labels and origins are from vectors Shadows and Origins 638 /// correspondingly. The generated IR is like 639 /// Sn-1 != Zero ? On-1: ... S2 != Zero ? O2: S1 != Zero ? O1: O0 640 /// When Zero is nullptr, it uses ZeroPrimitiveShadow. Otherwise it can be 641 /// zeros with other bitwidths. 642 Value *combineOrigins(const std::vector<Value *> &Shadows, 643 const std::vector<Value *> &Origins, Instruction *Pos, 644 ConstantInt *Zero = nullptr); 645 646 Value *getShadow(Value *V); 647 void setShadow(Instruction *I, Value *Shadow); 648 /// Generates IR to compute the union of the two given shadows, inserting it 649 /// before Pos. The combined value is with primitive type. 650 Value *combineShadows(Value *V1, Value *V2, Instruction *Pos); 651 /// Combines the shadow values of V1 and V2, then converts the combined value 652 /// with primitive type into a shadow value with the original type T. 653 Value *combineShadowsThenConvert(Type *T, Value *V1, Value *V2, 654 Instruction *Pos); 655 Value *combineOperandShadows(Instruction *Inst); 656 657 /// Generates IR to load shadow and origin corresponding to bytes [\p 658 /// Addr, \p Addr + \p Size), where addr has alignment \p 659 /// InstAlignment, and take the union of each of those shadows. The returned 660 /// shadow always has primitive type. 661 /// 662 /// When tracking loads is enabled, the returned origin is a chain at the 663 /// current stack if the returned shadow is tainted. 664 std::pair<Value *, Value *> loadShadowOrigin(Value *Addr, uint64_t Size, 665 Align InstAlignment, 666 Instruction *Pos); 667 668 void storePrimitiveShadowOrigin(Value *Addr, uint64_t Size, 669 Align InstAlignment, Value *PrimitiveShadow, 670 Value *Origin, Instruction *Pos); 671 /// Applies PrimitiveShadow to all primitive subtypes of T, returning 672 /// the expanded shadow value. 673 /// 674 /// EFP({T1,T2, ...}, PS) = {EFP(T1,PS),EFP(T2,PS),...} 675 /// EFP([n x T], PS) = [n x EFP(T,PS)] 676 /// EFP(other types, PS) = PS 677 Value *expandFromPrimitiveShadow(Type *T, Value *PrimitiveShadow, 678 Instruction *Pos); 679 /// Collapses Shadow into a single primitive shadow value, unioning all 680 /// primitive shadow values in the process. Returns the final primitive 681 /// shadow value. 682 /// 683 /// CTP({V1,V2, ...}) = UNION(CFP(V1,PS),CFP(V2,PS),...) 684 /// CTP([V1,V2,...]) = UNION(CFP(V1,PS),CFP(V2,PS),...) 685 /// CTP(other types, PS) = PS 686 Value *collapseToPrimitiveShadow(Value *Shadow, Instruction *Pos); 687 688 void storeZeroPrimitiveShadow(Value *Addr, uint64_t Size, Align ShadowAlign, 689 Instruction *Pos); 690 691 Align getShadowAlign(Align InstAlignment); 692 693 private: 694 /// Collapses the shadow with aggregate type into a single primitive shadow 695 /// value. 696 template <class AggregateType> 697 Value *collapseAggregateShadow(AggregateType *AT, Value *Shadow, 698 IRBuilder<> &IRB); 699 700 Value *collapseToPrimitiveShadow(Value *Shadow, IRBuilder<> &IRB); 701 702 /// Returns the shadow value of an argument A. 703 Value *getShadowForTLSArgument(Argument *A); 704 705 /// The fast path of loading shadow in legacy mode. 706 Value *loadLegacyShadowFast(Value *ShadowAddr, uint64_t Size, 707 Align ShadowAlign, Instruction *Pos); 708 709 /// The fast path of loading shadow in fast-16-label mode. 710 std::pair<Value *, Value *> 711 loadFast16ShadowFast(Value *ShadowAddr, Value *OriginAddr, uint64_t Size, 712 Align ShadowAlign, Align OriginAlign, Value *FirstOrigin, 713 Instruction *Pos); 714 715 Align getOriginAlign(Align InstAlignment); 716 717 /// Because 4 contiguous bytes share one 4-byte origin, the most accurate load 718 /// is __dfsan_load_label_and_origin. This function returns the union of all 719 /// labels and the origin of the first taint label. However this is an 720 /// additional call with many instructions. To ensure common cases are fast, 721 /// checks if it is possible to load labels and origins without using the 722 /// callback function. 723 /// 724 /// When enabling tracking load instructions, we always use 725 /// __dfsan_load_label_and_origin to reduce code size. 726 bool useCallbackLoadLabelAndOrigin(uint64_t Size, Align InstAlignment); 727 728 /// Returns a chain at the current stack with previous origin V. 729 Value *updateOrigin(Value *V, IRBuilder<> &IRB); 730 731 /// Returns a chain at the current stack with previous origin V if Shadow is 732 /// tainted. 733 Value *updateOriginIfTainted(Value *Shadow, Value *Origin, IRBuilder<> &IRB); 734 735 /// Creates an Intptr = Origin | Origin << 32 if Intptr's size is 64. Returns 736 /// Origin otherwise. 737 Value *originToIntptr(IRBuilder<> &IRB, Value *Origin); 738 739 /// Stores Origin into the address range [StoreOriginAddr, StoreOriginAddr + 740 /// Size). 741 void paintOrigin(IRBuilder<> &IRB, Value *Origin, Value *StoreOriginAddr, 742 uint64_t StoreOriginSize, Align Alignment); 743 744 /// Stores Origin in terms of its Shadow value. 745 /// * Do not write origins for zero shadows because we do not trace origins 746 /// for untainted sinks. 747 /// * Use __dfsan_maybe_store_origin if there are too many origin store 748 /// instrumentations. 749 void storeOrigin(Instruction *Pos, Value *Addr, uint64_t Size, Value *Shadow, 750 Value *Origin, Value *StoreOriginAddr, Align InstAlignment); 751 752 /// Convert a scalar value to an i1 by comparing with 0. 753 Value *convertToBool(Value *V, IRBuilder<> &IRB, const Twine &Name = ""); 754 755 bool shouldInstrumentWithCall(); 756 757 /// Generates IR to load shadow and origin corresponding to bytes [\p 758 /// Addr, \p Addr + \p Size), where addr has alignment \p 759 /// InstAlignment, and take the union of each of those shadows. The returned 760 /// shadow always has primitive type. 761 std::pair<Value *, Value *> 762 loadShadowOriginSansLoadTracking(Value *Addr, uint64_t Size, 763 Align InstAlignment, Instruction *Pos); 764 int NumOriginStores = 0; 765 }; 766 767 class DFSanVisitor : public InstVisitor<DFSanVisitor> { 768 public: 769 DFSanFunction &DFSF; 770 771 DFSanVisitor(DFSanFunction &DFSF) : DFSF(DFSF) {} 772 773 const DataLayout &getDataLayout() const { 774 return DFSF.F->getParent()->getDataLayout(); 775 } 776 777 // Combines shadow values and origins for all of I's operands. 778 void visitInstOperands(Instruction &I); 779 780 void visitUnaryOperator(UnaryOperator &UO); 781 void visitBinaryOperator(BinaryOperator &BO); 782 void visitCastInst(CastInst &CI); 783 void visitCmpInst(CmpInst &CI); 784 void visitGetElementPtrInst(GetElementPtrInst &GEPI); 785 void visitLoadInst(LoadInst &LI); 786 void visitStoreInst(StoreInst &SI); 787 void visitAtomicRMWInst(AtomicRMWInst &I); 788 void visitAtomicCmpXchgInst(AtomicCmpXchgInst &I); 789 void visitReturnInst(ReturnInst &RI); 790 void visitCallBase(CallBase &CB); 791 void visitPHINode(PHINode &PN); 792 void visitExtractElementInst(ExtractElementInst &I); 793 void visitInsertElementInst(InsertElementInst &I); 794 void visitShuffleVectorInst(ShuffleVectorInst &I); 795 void visitExtractValueInst(ExtractValueInst &I); 796 void visitInsertValueInst(InsertValueInst &I); 797 void visitAllocaInst(AllocaInst &I); 798 void visitSelectInst(SelectInst &I); 799 void visitMemSetInst(MemSetInst &I); 800 void visitMemTransferInst(MemTransferInst &I); 801 802 private: 803 void visitCASOrRMW(Align InstAlignment, Instruction &I); 804 805 // Returns false when this is an invoke of a custom function. 806 bool visitWrappedCallBase(Function &F, CallBase &CB); 807 808 // Combines origins for all of I's operands. 809 void visitInstOperandOrigins(Instruction &I); 810 811 void addShadowArguments(Function &F, CallBase &CB, std::vector<Value *> &Args, 812 IRBuilder<> &IRB); 813 814 void addOriginArguments(Function &F, CallBase &CB, std::vector<Value *> &Args, 815 IRBuilder<> &IRB); 816 }; 817 818 } // end anonymous namespace 819 820 DataFlowSanitizer::DataFlowSanitizer( 821 const std::vector<std::string> &ABIListFiles) { 822 if (ClFast8Labels && ClFast16Labels) { 823 report_fatal_error( 824 "cannot set both -dfsan-fast-8-labels and -dfsan-fast-16-labels"); 825 } 826 827 ShadowWidthBits = ClFast8Labels ? 8 : 16; 828 ShadowWidthBytes = ShadowWidthBits / 8; 829 830 std::vector<std::string> AllABIListFiles(std::move(ABIListFiles)); 831 llvm::append_range(AllABIListFiles, ClABIListFiles); 832 // FIXME: should we propagate vfs::FileSystem to this constructor? 833 ABIList.set( 834 SpecialCaseList::createOrDie(AllABIListFiles, *vfs::getRealFileSystem())); 835 } 836 837 FunctionType *DataFlowSanitizer::getArgsFunctionType(FunctionType *T) { 838 SmallVector<Type *, 4> ArgTypes(T->param_begin(), T->param_end()); 839 ArgTypes.append(T->getNumParams(), PrimitiveShadowTy); 840 if (T->isVarArg()) 841 ArgTypes.push_back(PrimitiveShadowPtrTy); 842 Type *RetType = T->getReturnType(); 843 if (!RetType->isVoidTy()) 844 RetType = StructType::get(RetType, PrimitiveShadowTy); 845 return FunctionType::get(RetType, ArgTypes, T->isVarArg()); 846 } 847 848 FunctionType *DataFlowSanitizer::getTrampolineFunctionType(FunctionType *T) { 849 assert(!T->isVarArg()); 850 SmallVector<Type *, 4> ArgTypes; 851 ArgTypes.push_back(T->getPointerTo()); 852 ArgTypes.append(T->param_begin(), T->param_end()); 853 ArgTypes.append(T->getNumParams(), PrimitiveShadowTy); 854 Type *RetType = T->getReturnType(); 855 if (!RetType->isVoidTy()) 856 ArgTypes.push_back(PrimitiveShadowPtrTy); 857 858 if (shouldTrackOrigins()) { 859 ArgTypes.append(T->getNumParams(), OriginTy); 860 if (!RetType->isVoidTy()) 861 ArgTypes.push_back(OriginPtrTy); 862 } 863 864 return FunctionType::get(T->getReturnType(), ArgTypes, false); 865 } 866 867 TransformedFunction DataFlowSanitizer::getCustomFunctionType(FunctionType *T) { 868 SmallVector<Type *, 4> ArgTypes; 869 870 // Some parameters of the custom function being constructed are 871 // parameters of T. Record the mapping from parameters of T to 872 // parameters of the custom function, so that parameter attributes 873 // at call sites can be updated. 874 std::vector<unsigned> ArgumentIndexMapping; 875 for (unsigned I = 0, E = T->getNumParams(); I != E; ++I) { 876 Type *ParamType = T->getParamType(I); 877 FunctionType *FT; 878 if (isa<PointerType>(ParamType) && 879 (FT = dyn_cast<FunctionType>(ParamType->getPointerElementType()))) { 880 ArgumentIndexMapping.push_back(ArgTypes.size()); 881 ArgTypes.push_back(getTrampolineFunctionType(FT)->getPointerTo()); 882 ArgTypes.push_back(Type::getInt8PtrTy(*Ctx)); 883 } else { 884 ArgumentIndexMapping.push_back(ArgTypes.size()); 885 ArgTypes.push_back(ParamType); 886 } 887 } 888 for (unsigned I = 0, E = T->getNumParams(); I != E; ++I) 889 ArgTypes.push_back(PrimitiveShadowTy); 890 if (T->isVarArg()) 891 ArgTypes.push_back(PrimitiveShadowPtrTy); 892 Type *RetType = T->getReturnType(); 893 if (!RetType->isVoidTy()) 894 ArgTypes.push_back(PrimitiveShadowPtrTy); 895 896 if (shouldTrackOrigins()) { 897 for (unsigned I = 0, E = T->getNumParams(); I != E; ++I) 898 ArgTypes.push_back(OriginTy); 899 if (T->isVarArg()) 900 ArgTypes.push_back(OriginPtrTy); 901 if (!RetType->isVoidTy()) 902 ArgTypes.push_back(OriginPtrTy); 903 } 904 905 return TransformedFunction( 906 T, FunctionType::get(T->getReturnType(), ArgTypes, T->isVarArg()), 907 ArgumentIndexMapping); 908 } 909 910 bool DataFlowSanitizer::isZeroShadow(Value *V) { 911 if (!shouldTrackFieldsAndIndices()) 912 return ZeroPrimitiveShadow == V; 913 914 Type *T = V->getType(); 915 if (!isa<ArrayType>(T) && !isa<StructType>(T)) { 916 if (const ConstantInt *CI = dyn_cast<ConstantInt>(V)) 917 return CI->isZero(); 918 return false; 919 } 920 921 return isa<ConstantAggregateZero>(V); 922 } 923 924 bool DataFlowSanitizer::hasFastLabelsEnabled() { 925 static const bool HasFastLabelsEnabled = ClFast8Labels || ClFast16Labels; 926 return HasFastLabelsEnabled; 927 } 928 929 bool DataFlowSanitizer::hasLoadSizeForFastPath(uint64_t Size) { 930 uint64_t ShadowSize = Size * ShadowWidthBytes; 931 return ShadowSize % 8 == 0 || ShadowSize == 4; 932 } 933 934 bool DataFlowSanitizer::shouldTrackOrigins() { 935 static const bool ShouldTrackOrigins = 936 ClTrackOrigins && getInstrumentedABI() == DataFlowSanitizer::IA_TLS && 937 hasFastLabelsEnabled(); 938 return ShouldTrackOrigins; 939 } 940 941 bool DataFlowSanitizer::shouldTrackFieldsAndIndices() { 942 return getInstrumentedABI() == DataFlowSanitizer::IA_TLS && 943 hasFastLabelsEnabled(); 944 } 945 946 Constant *DataFlowSanitizer::getZeroShadow(Type *OrigTy) { 947 if (!shouldTrackFieldsAndIndices()) 948 return ZeroPrimitiveShadow; 949 950 if (!isa<ArrayType>(OrigTy) && !isa<StructType>(OrigTy)) 951 return ZeroPrimitiveShadow; 952 Type *ShadowTy = getShadowTy(OrigTy); 953 return ConstantAggregateZero::get(ShadowTy); 954 } 955 956 Constant *DataFlowSanitizer::getZeroShadow(Value *V) { 957 return getZeroShadow(V->getType()); 958 } 959 960 static Value *expandFromPrimitiveShadowRecursive( 961 Value *Shadow, SmallVector<unsigned, 4> &Indices, Type *SubShadowTy, 962 Value *PrimitiveShadow, IRBuilder<> &IRB) { 963 if (!isa<ArrayType>(SubShadowTy) && !isa<StructType>(SubShadowTy)) 964 return IRB.CreateInsertValue(Shadow, PrimitiveShadow, Indices); 965 966 if (ArrayType *AT = dyn_cast<ArrayType>(SubShadowTy)) { 967 for (unsigned Idx = 0; Idx < AT->getNumElements(); Idx++) { 968 Indices.push_back(Idx); 969 Shadow = expandFromPrimitiveShadowRecursive( 970 Shadow, Indices, AT->getElementType(), PrimitiveShadow, IRB); 971 Indices.pop_back(); 972 } 973 return Shadow; 974 } 975 976 if (StructType *ST = dyn_cast<StructType>(SubShadowTy)) { 977 for (unsigned Idx = 0; Idx < ST->getNumElements(); Idx++) { 978 Indices.push_back(Idx); 979 Shadow = expandFromPrimitiveShadowRecursive( 980 Shadow, Indices, ST->getElementType(Idx), PrimitiveShadow, IRB); 981 Indices.pop_back(); 982 } 983 return Shadow; 984 } 985 llvm_unreachable("Unexpected shadow type"); 986 } 987 988 bool DFSanFunction::shouldInstrumentWithCall() { 989 return ClInstrumentWithCallThreshold >= 0 && 990 NumOriginStores >= ClInstrumentWithCallThreshold; 991 } 992 993 Value *DFSanFunction::expandFromPrimitiveShadow(Type *T, Value *PrimitiveShadow, 994 Instruction *Pos) { 995 Type *ShadowTy = DFS.getShadowTy(T); 996 997 if (!isa<ArrayType>(ShadowTy) && !isa<StructType>(ShadowTy)) 998 return PrimitiveShadow; 999 1000 if (DFS.isZeroShadow(PrimitiveShadow)) 1001 return DFS.getZeroShadow(ShadowTy); 1002 1003 IRBuilder<> IRB(Pos); 1004 SmallVector<unsigned, 4> Indices; 1005 Value *Shadow = UndefValue::get(ShadowTy); 1006 Shadow = expandFromPrimitiveShadowRecursive(Shadow, Indices, ShadowTy, 1007 PrimitiveShadow, IRB); 1008 1009 // Caches the primitive shadow value that built the shadow value. 1010 CachedCollapsedShadows[Shadow] = PrimitiveShadow; 1011 return Shadow; 1012 } 1013 1014 template <class AggregateType> 1015 Value *DFSanFunction::collapseAggregateShadow(AggregateType *AT, Value *Shadow, 1016 IRBuilder<> &IRB) { 1017 if (!AT->getNumElements()) 1018 return DFS.ZeroPrimitiveShadow; 1019 1020 Value *FirstItem = IRB.CreateExtractValue(Shadow, 0); 1021 Value *Aggregator = collapseToPrimitiveShadow(FirstItem, IRB); 1022 1023 for (unsigned Idx = 1; Idx < AT->getNumElements(); Idx++) { 1024 Value *ShadowItem = IRB.CreateExtractValue(Shadow, Idx); 1025 Value *ShadowInner = collapseToPrimitiveShadow(ShadowItem, IRB); 1026 Aggregator = IRB.CreateOr(Aggregator, ShadowInner); 1027 } 1028 return Aggregator; 1029 } 1030 1031 Value *DFSanFunction::collapseToPrimitiveShadow(Value *Shadow, 1032 IRBuilder<> &IRB) { 1033 Type *ShadowTy = Shadow->getType(); 1034 if (!isa<ArrayType>(ShadowTy) && !isa<StructType>(ShadowTy)) 1035 return Shadow; 1036 if (ArrayType *AT = dyn_cast<ArrayType>(ShadowTy)) 1037 return collapseAggregateShadow<>(AT, Shadow, IRB); 1038 if (StructType *ST = dyn_cast<StructType>(ShadowTy)) 1039 return collapseAggregateShadow<>(ST, Shadow, IRB); 1040 llvm_unreachable("Unexpected shadow type"); 1041 } 1042 1043 Value *DFSanFunction::collapseToPrimitiveShadow(Value *Shadow, 1044 Instruction *Pos) { 1045 Type *ShadowTy = Shadow->getType(); 1046 if (!isa<ArrayType>(ShadowTy) && !isa<StructType>(ShadowTy)) 1047 return Shadow; 1048 1049 assert(DFS.shouldTrackFieldsAndIndices()); 1050 1051 // Checks if the cached collapsed shadow value dominates Pos. 1052 Value *&CS = CachedCollapsedShadows[Shadow]; 1053 if (CS && DT.dominates(CS, Pos)) 1054 return CS; 1055 1056 IRBuilder<> IRB(Pos); 1057 Value *PrimitiveShadow = collapseToPrimitiveShadow(Shadow, IRB); 1058 // Caches the converted primitive shadow value. 1059 CS = PrimitiveShadow; 1060 return PrimitiveShadow; 1061 } 1062 1063 Type *DataFlowSanitizer::getShadowTy(Type *OrigTy) { 1064 if (!shouldTrackFieldsAndIndices()) 1065 return PrimitiveShadowTy; 1066 1067 if (!OrigTy->isSized()) 1068 return PrimitiveShadowTy; 1069 if (isa<IntegerType>(OrigTy)) 1070 return PrimitiveShadowTy; 1071 if (isa<VectorType>(OrigTy)) 1072 return PrimitiveShadowTy; 1073 if (ArrayType *AT = dyn_cast<ArrayType>(OrigTy)) 1074 return ArrayType::get(getShadowTy(AT->getElementType()), 1075 AT->getNumElements()); 1076 if (StructType *ST = dyn_cast<StructType>(OrigTy)) { 1077 SmallVector<Type *, 4> Elements; 1078 for (unsigned I = 0, N = ST->getNumElements(); I < N; ++I) 1079 Elements.push_back(getShadowTy(ST->getElementType(I))); 1080 return StructType::get(*Ctx, Elements); 1081 } 1082 return PrimitiveShadowTy; 1083 } 1084 1085 Type *DataFlowSanitizer::getShadowTy(Value *V) { 1086 return getShadowTy(V->getType()); 1087 } 1088 1089 bool DataFlowSanitizer::init(Module &M) { 1090 Triple TargetTriple(M.getTargetTriple()); 1091 const DataLayout &DL = M.getDataLayout(); 1092 1093 Mod = &M; 1094 Ctx = &M.getContext(); 1095 Int8Ptr = Type::getInt8PtrTy(*Ctx); 1096 OriginTy = IntegerType::get(*Ctx, OriginWidthBits); 1097 OriginPtrTy = PointerType::getUnqual(OriginTy); 1098 PrimitiveShadowTy = IntegerType::get(*Ctx, ShadowWidthBits); 1099 PrimitiveShadowPtrTy = PointerType::getUnqual(PrimitiveShadowTy); 1100 IntptrTy = DL.getIntPtrType(*Ctx); 1101 ZeroPrimitiveShadow = ConstantInt::getSigned(PrimitiveShadowTy, 0); 1102 ShadowPtrMul = ConstantInt::getSigned(IntptrTy, ShadowWidthBytes); 1103 OriginBase = ConstantInt::get(IntptrTy, 0x200000000000LL); 1104 ZeroOrigin = ConstantInt::getSigned(OriginTy, 0); 1105 1106 switch (TargetTriple.getArch()) { 1107 case Triple::x86_64: 1108 ShadowPtrMask = ClFast8Labels 1109 ? ConstantInt::getSigned(IntptrTy, ~0x600000000000LL) 1110 : ConstantInt::getSigned(IntptrTy, ~0x700000000000LL); 1111 break; 1112 case Triple::mips64: 1113 case Triple::mips64el: 1114 ShadowPtrMask = ClFast8Labels 1115 ? ConstantInt::getSigned(IntptrTy, ~0xE000000000LL) 1116 : ConstantInt::getSigned(IntptrTy, ~0xF000000000LL); 1117 break; 1118 case Triple::aarch64: 1119 case Triple::aarch64_be: 1120 // AArch64 supports multiple VMAs and the shadow mask is set at runtime. 1121 DFSanRuntimeShadowMask = true; 1122 break; 1123 default: 1124 report_fatal_error("unsupported triple"); 1125 } 1126 1127 Type *DFSanUnionArgs[2] = {PrimitiveShadowTy, PrimitiveShadowTy}; 1128 DFSanUnionFnTy = 1129 FunctionType::get(PrimitiveShadowTy, DFSanUnionArgs, /*isVarArg=*/false); 1130 Type *DFSanUnionLoadArgs[2] = {PrimitiveShadowPtrTy, IntptrTy}; 1131 DFSanUnionLoadFnTy = FunctionType::get(PrimitiveShadowTy, DFSanUnionLoadArgs, 1132 /*isVarArg=*/false); 1133 Type *DFSanLoadLabelAndOriginArgs[2] = {Int8Ptr, IntptrTy}; 1134 DFSanLoadLabelAndOriginFnTy = 1135 FunctionType::get(IntegerType::get(*Ctx, 64), DFSanLoadLabelAndOriginArgs, 1136 /*isVarArg=*/false); 1137 DFSanUnimplementedFnTy = FunctionType::get( 1138 Type::getVoidTy(*Ctx), Type::getInt8PtrTy(*Ctx), /*isVarArg=*/false); 1139 Type *DFSanSetLabelArgs[4] = {PrimitiveShadowTy, OriginTy, 1140 Type::getInt8PtrTy(*Ctx), IntptrTy}; 1141 DFSanSetLabelFnTy = FunctionType::get(Type::getVoidTy(*Ctx), 1142 DFSanSetLabelArgs, /*isVarArg=*/false); 1143 DFSanNonzeroLabelFnTy = 1144 FunctionType::get(Type::getVoidTy(*Ctx), None, /*isVarArg=*/false); 1145 DFSanVarargWrapperFnTy = FunctionType::get( 1146 Type::getVoidTy(*Ctx), Type::getInt8PtrTy(*Ctx), /*isVarArg=*/false); 1147 DFSanCmpCallbackFnTy = 1148 FunctionType::get(Type::getVoidTy(*Ctx), PrimitiveShadowTy, 1149 /*isVarArg=*/false); 1150 DFSanChainOriginFnTy = 1151 FunctionType::get(OriginTy, OriginTy, /*isVarArg=*/false); 1152 Type *DFSanChainOriginIfTaintedArgs[2] = {PrimitiveShadowTy, OriginTy}; 1153 DFSanChainOriginIfTaintedFnTy = FunctionType::get( 1154 OriginTy, DFSanChainOriginIfTaintedArgs, /*isVarArg=*/false); 1155 Type *DFSanMaybeStoreOriginArgs[4] = {IntegerType::get(*Ctx, ShadowWidthBits), 1156 Int8Ptr, IntptrTy, OriginTy}; 1157 DFSanMaybeStoreOriginFnTy = FunctionType::get( 1158 Type::getVoidTy(*Ctx), DFSanMaybeStoreOriginArgs, /*isVarArg=*/false); 1159 Type *DFSanMemOriginTransferArgs[3] = {Int8Ptr, Int8Ptr, IntptrTy}; 1160 DFSanMemOriginTransferFnTy = FunctionType::get( 1161 Type::getVoidTy(*Ctx), DFSanMemOriginTransferArgs, /*isVarArg=*/false); 1162 Type *DFSanLoadStoreCallbackArgs[2] = {PrimitiveShadowTy, Int8Ptr}; 1163 DFSanLoadStoreCallbackFnTy = 1164 FunctionType::get(Type::getVoidTy(*Ctx), DFSanLoadStoreCallbackArgs, 1165 /*isVarArg=*/false); 1166 Type *DFSanMemTransferCallbackArgs[2] = {PrimitiveShadowPtrTy, IntptrTy}; 1167 DFSanMemTransferCallbackFnTy = 1168 FunctionType::get(Type::getVoidTy(*Ctx), DFSanMemTransferCallbackArgs, 1169 /*isVarArg=*/false); 1170 1171 ColdCallWeights = MDBuilder(*Ctx).createBranchWeights(1, 1000); 1172 OriginStoreWeights = MDBuilder(*Ctx).createBranchWeights(1, 1000); 1173 return true; 1174 } 1175 1176 bool DataFlowSanitizer::isInstrumented(const Function *F) { 1177 return !ABIList.isIn(*F, "uninstrumented"); 1178 } 1179 1180 bool DataFlowSanitizer::isInstrumented(const GlobalAlias *GA) { 1181 return !ABIList.isIn(*GA, "uninstrumented"); 1182 } 1183 1184 DataFlowSanitizer::InstrumentedABI DataFlowSanitizer::getInstrumentedABI() { 1185 return ClArgsABI ? IA_Args : IA_TLS; 1186 } 1187 1188 DataFlowSanitizer::WrapperKind DataFlowSanitizer::getWrapperKind(Function *F) { 1189 if (ABIList.isIn(*F, "functional")) 1190 return WK_Functional; 1191 if (ABIList.isIn(*F, "discard")) 1192 return WK_Discard; 1193 if (ABIList.isIn(*F, "custom")) 1194 return WK_Custom; 1195 1196 return WK_Warning; 1197 } 1198 1199 void DataFlowSanitizer::addGlobalNamePrefix(GlobalValue *GV) { 1200 std::string GVName = std::string(GV->getName()), Prefix = "dfs$"; 1201 GV->setName(Prefix + GVName); 1202 1203 // Try to change the name of the function in module inline asm. We only do 1204 // this for specific asm directives, currently only ".symver", to try to avoid 1205 // corrupting asm which happens to contain the symbol name as a substring. 1206 // Note that the substitution for .symver assumes that the versioned symbol 1207 // also has an instrumented name. 1208 std::string Asm = GV->getParent()->getModuleInlineAsm(); 1209 std::string SearchStr = ".symver " + GVName + ","; 1210 size_t Pos = Asm.find(SearchStr); 1211 if (Pos != std::string::npos) { 1212 Asm.replace(Pos, SearchStr.size(), 1213 ".symver " + Prefix + GVName + "," + Prefix); 1214 GV->getParent()->setModuleInlineAsm(Asm); 1215 } 1216 } 1217 1218 Function * 1219 DataFlowSanitizer::buildWrapperFunction(Function *F, StringRef NewFName, 1220 GlobalValue::LinkageTypes NewFLink, 1221 FunctionType *NewFT) { 1222 FunctionType *FT = F->getFunctionType(); 1223 Function *NewF = Function::Create(NewFT, NewFLink, F->getAddressSpace(), 1224 NewFName, F->getParent()); 1225 NewF->copyAttributesFrom(F); 1226 NewF->removeAttributes( 1227 AttributeList::ReturnIndex, 1228 AttributeFuncs::typeIncompatible(NewFT->getReturnType())); 1229 1230 BasicBlock *BB = BasicBlock::Create(*Ctx, "entry", NewF); 1231 if (F->isVarArg()) { 1232 NewF->removeAttributes(AttributeList::FunctionIndex, 1233 AttrBuilder().addAttribute("split-stack")); 1234 CallInst::Create(DFSanVarargWrapperFn, 1235 IRBuilder<>(BB).CreateGlobalStringPtr(F->getName()), "", 1236 BB); 1237 new UnreachableInst(*Ctx, BB); 1238 } else { 1239 auto ArgIt = pointer_iterator<Argument *>(NewF->arg_begin()); 1240 std::vector<Value *> Args(ArgIt, ArgIt + FT->getNumParams()); 1241 1242 CallInst *CI = CallInst::Create(F, Args, "", BB); 1243 if (FT->getReturnType()->isVoidTy()) 1244 ReturnInst::Create(*Ctx, BB); 1245 else 1246 ReturnInst::Create(*Ctx, CI, BB); 1247 } 1248 1249 return NewF; 1250 } 1251 1252 Constant *DataFlowSanitizer::getOrBuildTrampolineFunction(FunctionType *FT, 1253 StringRef FName) { 1254 FunctionType *FTT = getTrampolineFunctionType(FT); 1255 FunctionCallee C = Mod->getOrInsertFunction(FName, FTT); 1256 Function *F = dyn_cast<Function>(C.getCallee()); 1257 if (F && F->isDeclaration()) { 1258 F->setLinkage(GlobalValue::LinkOnceODRLinkage); 1259 BasicBlock *BB = BasicBlock::Create(*Ctx, "entry", F); 1260 std::vector<Value *> Args; 1261 Function::arg_iterator AI = F->arg_begin() + 1; 1262 for (unsigned N = FT->getNumParams(); N != 0; ++AI, --N) 1263 Args.push_back(&*AI); 1264 CallInst *CI = CallInst::Create(FT, &*F->arg_begin(), Args, "", BB); 1265 Type *RetType = FT->getReturnType(); 1266 ReturnInst *RI = RetType->isVoidTy() ? ReturnInst::Create(*Ctx, BB) 1267 : ReturnInst::Create(*Ctx, CI, BB); 1268 1269 // F is called by a wrapped custom function with primitive shadows. So 1270 // its arguments and return value need conversion. 1271 DFSanFunction DFSF(*this, F, /*IsNativeABI=*/true); 1272 Function::arg_iterator ValAI = F->arg_begin(), ShadowAI = AI; 1273 ++ValAI; 1274 for (unsigned N = FT->getNumParams(); N != 0; ++ValAI, ++ShadowAI, --N) { 1275 Value *Shadow = 1276 DFSF.expandFromPrimitiveShadow(ValAI->getType(), &*ShadowAI, CI); 1277 DFSF.ValShadowMap[&*ValAI] = Shadow; 1278 } 1279 Function::arg_iterator RetShadowAI = ShadowAI; 1280 const bool ShouldTrackOrigins = shouldTrackOrigins(); 1281 if (ShouldTrackOrigins) { 1282 ValAI = F->arg_begin(); 1283 ++ValAI; 1284 Function::arg_iterator OriginAI = ShadowAI; 1285 if (!RetType->isVoidTy()) 1286 ++OriginAI; 1287 for (unsigned N = FT->getNumParams(); N != 0; ++ValAI, ++OriginAI, --N) { 1288 DFSF.ValOriginMap[&*ValAI] = &*OriginAI; 1289 } 1290 } 1291 DFSanVisitor(DFSF).visitCallInst(*CI); 1292 if (!RetType->isVoidTy()) { 1293 Value *PrimitiveShadow = DFSF.collapseToPrimitiveShadow( 1294 DFSF.getShadow(RI->getReturnValue()), RI); 1295 new StoreInst(PrimitiveShadow, &*RetShadowAI, RI); 1296 if (ShouldTrackOrigins) { 1297 Value *Origin = DFSF.getOrigin(RI->getReturnValue()); 1298 new StoreInst(Origin, &*std::prev(F->arg_end()), RI); 1299 } 1300 } 1301 } 1302 1303 return cast<Constant>(C.getCallee()); 1304 } 1305 1306 // Initialize DataFlowSanitizer runtime functions and declare them in the module 1307 void DataFlowSanitizer::initializeRuntimeFunctions(Module &M) { 1308 { 1309 AttributeList AL; 1310 AL = AL.addAttribute(M.getContext(), AttributeList::FunctionIndex, 1311 Attribute::NoUnwind); 1312 AL = AL.addAttribute(M.getContext(), AttributeList::FunctionIndex, 1313 Attribute::ReadNone); 1314 AL = AL.addAttribute(M.getContext(), AttributeList::ReturnIndex, 1315 Attribute::ZExt); 1316 AL = AL.addParamAttribute(M.getContext(), 0, Attribute::ZExt); 1317 AL = AL.addParamAttribute(M.getContext(), 1, Attribute::ZExt); 1318 DFSanUnionFn = 1319 Mod->getOrInsertFunction("__dfsan_union", DFSanUnionFnTy, AL); 1320 } 1321 { 1322 AttributeList AL; 1323 AL = AL.addAttribute(M.getContext(), AttributeList::FunctionIndex, 1324 Attribute::NoUnwind); 1325 AL = AL.addAttribute(M.getContext(), AttributeList::FunctionIndex, 1326 Attribute::ReadNone); 1327 AL = AL.addAttribute(M.getContext(), AttributeList::ReturnIndex, 1328 Attribute::ZExt); 1329 AL = AL.addParamAttribute(M.getContext(), 0, Attribute::ZExt); 1330 AL = AL.addParamAttribute(M.getContext(), 1, Attribute::ZExt); 1331 DFSanCheckedUnionFn = 1332 Mod->getOrInsertFunction("dfsan_union", DFSanUnionFnTy, AL); 1333 } 1334 { 1335 AttributeList AL; 1336 AL = AL.addAttribute(M.getContext(), AttributeList::FunctionIndex, 1337 Attribute::NoUnwind); 1338 AL = AL.addAttribute(M.getContext(), AttributeList::FunctionIndex, 1339 Attribute::ReadOnly); 1340 AL = AL.addAttribute(M.getContext(), AttributeList::ReturnIndex, 1341 Attribute::ZExt); 1342 DFSanUnionLoadFn = 1343 Mod->getOrInsertFunction("__dfsan_union_load", DFSanUnionLoadFnTy, AL); 1344 } 1345 { 1346 AttributeList AL; 1347 AL = AL.addAttribute(M.getContext(), AttributeList::FunctionIndex, 1348 Attribute::NoUnwind); 1349 AL = AL.addAttribute(M.getContext(), AttributeList::FunctionIndex, 1350 Attribute::ReadOnly); 1351 AL = AL.addAttribute(M.getContext(), AttributeList::ReturnIndex, 1352 Attribute::ZExt); 1353 DFSanUnionLoadFastLabelsFn = Mod->getOrInsertFunction( 1354 "__dfsan_union_load_fast16labels", DFSanUnionLoadFnTy, AL); 1355 } 1356 { 1357 AttributeList AL; 1358 AL = AL.addAttribute(M.getContext(), AttributeList::FunctionIndex, 1359 Attribute::NoUnwind); 1360 AL = AL.addAttribute(M.getContext(), AttributeList::FunctionIndex, 1361 Attribute::ReadOnly); 1362 AL = AL.addAttribute(M.getContext(), AttributeList::ReturnIndex, 1363 Attribute::ZExt); 1364 DFSanLoadLabelAndOriginFn = Mod->getOrInsertFunction( 1365 "__dfsan_load_label_and_origin", DFSanLoadLabelAndOriginFnTy, AL); 1366 } 1367 DFSanUnimplementedFn = 1368 Mod->getOrInsertFunction("__dfsan_unimplemented", DFSanUnimplementedFnTy); 1369 { 1370 AttributeList AL; 1371 AL = AL.addParamAttribute(M.getContext(), 0, Attribute::ZExt); 1372 AL = AL.addParamAttribute(M.getContext(), 1, Attribute::ZExt); 1373 DFSanSetLabelFn = 1374 Mod->getOrInsertFunction("__dfsan_set_label", DFSanSetLabelFnTy, AL); 1375 } 1376 DFSanNonzeroLabelFn = 1377 Mod->getOrInsertFunction("__dfsan_nonzero_label", DFSanNonzeroLabelFnTy); 1378 DFSanVarargWrapperFn = Mod->getOrInsertFunction("__dfsan_vararg_wrapper", 1379 DFSanVarargWrapperFnTy); 1380 { 1381 AttributeList AL; 1382 AL = AL.addParamAttribute(M.getContext(), 0, Attribute::ZExt); 1383 AL = AL.addAttribute(M.getContext(), AttributeList::ReturnIndex, 1384 Attribute::ZExt); 1385 DFSanChainOriginFn = Mod->getOrInsertFunction("__dfsan_chain_origin", 1386 DFSanChainOriginFnTy, AL); 1387 } 1388 { 1389 AttributeList AL; 1390 AL = AL.addParamAttribute(M.getContext(), 0, Attribute::ZExt); 1391 AL = AL.addParamAttribute(M.getContext(), 1, Attribute::ZExt); 1392 AL = AL.addAttribute(M.getContext(), AttributeList::ReturnIndex, 1393 Attribute::ZExt); 1394 DFSanChainOriginIfTaintedFn = Mod->getOrInsertFunction( 1395 "__dfsan_chain_origin_if_tainted", DFSanChainOriginIfTaintedFnTy, AL); 1396 } 1397 DFSanMemOriginTransferFn = Mod->getOrInsertFunction( 1398 "__dfsan_mem_origin_transfer", DFSanMemOriginTransferFnTy); 1399 1400 { 1401 AttributeList AL; 1402 AL = AL.addParamAttribute(M.getContext(), 0, Attribute::ZExt); 1403 AL = AL.addParamAttribute(M.getContext(), 3, Attribute::ZExt); 1404 DFSanMaybeStoreOriginFn = Mod->getOrInsertFunction( 1405 "__dfsan_maybe_store_origin", DFSanMaybeStoreOriginFnTy, AL); 1406 } 1407 1408 DFSanRuntimeFunctions.insert(DFSanUnionFn.getCallee()->stripPointerCasts()); 1409 DFSanRuntimeFunctions.insert( 1410 DFSanCheckedUnionFn.getCallee()->stripPointerCasts()); 1411 DFSanRuntimeFunctions.insert( 1412 DFSanUnionLoadFn.getCallee()->stripPointerCasts()); 1413 DFSanRuntimeFunctions.insert( 1414 DFSanUnionLoadFastLabelsFn.getCallee()->stripPointerCasts()); 1415 DFSanRuntimeFunctions.insert( 1416 DFSanLoadLabelAndOriginFn.getCallee()->stripPointerCasts()); 1417 DFSanRuntimeFunctions.insert( 1418 DFSanUnimplementedFn.getCallee()->stripPointerCasts()); 1419 DFSanRuntimeFunctions.insert( 1420 DFSanSetLabelFn.getCallee()->stripPointerCasts()); 1421 DFSanRuntimeFunctions.insert( 1422 DFSanNonzeroLabelFn.getCallee()->stripPointerCasts()); 1423 DFSanRuntimeFunctions.insert( 1424 DFSanVarargWrapperFn.getCallee()->stripPointerCasts()); 1425 DFSanRuntimeFunctions.insert( 1426 DFSanLoadCallbackFn.getCallee()->stripPointerCasts()); 1427 DFSanRuntimeFunctions.insert( 1428 DFSanStoreCallbackFn.getCallee()->stripPointerCasts()); 1429 DFSanRuntimeFunctions.insert( 1430 DFSanMemTransferCallbackFn.getCallee()->stripPointerCasts()); 1431 DFSanRuntimeFunctions.insert( 1432 DFSanCmpCallbackFn.getCallee()->stripPointerCasts()); 1433 DFSanRuntimeFunctions.insert( 1434 DFSanChainOriginFn.getCallee()->stripPointerCasts()); 1435 DFSanRuntimeFunctions.insert( 1436 DFSanChainOriginIfTaintedFn.getCallee()->stripPointerCasts()); 1437 DFSanRuntimeFunctions.insert( 1438 DFSanMemOriginTransferFn.getCallee()->stripPointerCasts()); 1439 DFSanRuntimeFunctions.insert( 1440 DFSanMaybeStoreOriginFn.getCallee()->stripPointerCasts()); 1441 } 1442 1443 // Initializes event callback functions and declare them in the module 1444 void DataFlowSanitizer::initializeCallbackFunctions(Module &M) { 1445 DFSanLoadCallbackFn = Mod->getOrInsertFunction("__dfsan_load_callback", 1446 DFSanLoadStoreCallbackFnTy); 1447 DFSanStoreCallbackFn = Mod->getOrInsertFunction("__dfsan_store_callback", 1448 DFSanLoadStoreCallbackFnTy); 1449 DFSanMemTransferCallbackFn = Mod->getOrInsertFunction( 1450 "__dfsan_mem_transfer_callback", DFSanMemTransferCallbackFnTy); 1451 DFSanCmpCallbackFn = 1452 Mod->getOrInsertFunction("__dfsan_cmp_callback", DFSanCmpCallbackFnTy); 1453 } 1454 1455 void DataFlowSanitizer::injectMetadataGlobals(Module &M) { 1456 // These variables can be used: 1457 // - by the runtime (to discover what the shadow width was, during 1458 // compilation) 1459 // - in testing (to avoid hardcoding the shadow width and type but instead 1460 // extract them by pattern matching) 1461 Type *IntTy = Type::getInt32Ty(*Ctx); 1462 (void)Mod->getOrInsertGlobal("__dfsan_shadow_width_bits", IntTy, [&] { 1463 return new GlobalVariable( 1464 M, IntTy, /*isConstant=*/true, GlobalValue::WeakODRLinkage, 1465 ConstantInt::get(IntTy, ShadowWidthBits), "__dfsan_shadow_width_bits"); 1466 }); 1467 (void)Mod->getOrInsertGlobal("__dfsan_shadow_width_bytes", IntTy, [&] { 1468 return new GlobalVariable(M, IntTy, /*isConstant=*/true, 1469 GlobalValue::WeakODRLinkage, 1470 ConstantInt::get(IntTy, ShadowWidthBytes), 1471 "__dfsan_shadow_width_bytes"); 1472 }); 1473 } 1474 1475 bool DataFlowSanitizer::runImpl(Module &M) { 1476 init(M); 1477 1478 if (ABIList.isIn(M, "skip")) 1479 return false; 1480 1481 const unsigned InitialGlobalSize = M.global_size(); 1482 const unsigned InitialModuleSize = M.size(); 1483 1484 bool Changed = false; 1485 1486 auto GetOrInsertGlobal = [this, &Changed](StringRef Name, 1487 Type *Ty) -> Constant * { 1488 Constant *C = Mod->getOrInsertGlobal(Name, Ty); 1489 if (GlobalVariable *G = dyn_cast<GlobalVariable>(C)) { 1490 Changed |= G->getThreadLocalMode() != GlobalVariable::InitialExecTLSModel; 1491 G->setThreadLocalMode(GlobalVariable::InitialExecTLSModel); 1492 } 1493 return C; 1494 }; 1495 1496 // These globals must be kept in sync with the ones in dfsan.cpp. 1497 ArgTLS = 1498 GetOrInsertGlobal("__dfsan_arg_tls", 1499 ArrayType::get(Type::getInt64Ty(*Ctx), ArgTLSSize / 8)); 1500 RetvalTLS = GetOrInsertGlobal( 1501 "__dfsan_retval_tls", 1502 ArrayType::get(Type::getInt64Ty(*Ctx), RetvalTLSSize / 8)); 1503 ArgOriginTLSTy = ArrayType::get(OriginTy, NumOfElementsInArgOrgTLS); 1504 ArgOriginTLS = GetOrInsertGlobal("__dfsan_arg_origin_tls", ArgOriginTLSTy); 1505 RetvalOriginTLS = GetOrInsertGlobal("__dfsan_retval_origin_tls", OriginTy); 1506 1507 (void)Mod->getOrInsertGlobal("__dfsan_track_origins", OriginTy, [&] { 1508 Changed = true; 1509 return new GlobalVariable( 1510 M, OriginTy, true, GlobalValue::WeakODRLinkage, 1511 ConstantInt::getSigned(OriginTy, shouldTrackOrigins()), 1512 "__dfsan_track_origins"); 1513 }); 1514 1515 injectMetadataGlobals(M); 1516 1517 ExternalShadowMask = 1518 Mod->getOrInsertGlobal(DFSanExternShadowPtrMask, IntptrTy); 1519 1520 initializeCallbackFunctions(M); 1521 initializeRuntimeFunctions(M); 1522 1523 std::vector<Function *> FnsToInstrument; 1524 SmallPtrSet<Function *, 2> FnsWithNativeABI; 1525 for (Function &F : M) 1526 if (!F.isIntrinsic() && !DFSanRuntimeFunctions.contains(&F)) 1527 FnsToInstrument.push_back(&F); 1528 1529 // Give function aliases prefixes when necessary, and build wrappers where the 1530 // instrumentedness is inconsistent. 1531 for (Module::alias_iterator AI = M.alias_begin(), AE = M.alias_end(); 1532 AI != AE;) { 1533 GlobalAlias *GA = &*AI; 1534 ++AI; 1535 // Don't stop on weak. We assume people aren't playing games with the 1536 // instrumentedness of overridden weak aliases. 1537 auto *F = dyn_cast<Function>(GA->getBaseObject()); 1538 if (!F) 1539 continue; 1540 1541 bool GAInst = isInstrumented(GA), FInst = isInstrumented(F); 1542 if (GAInst && FInst) { 1543 addGlobalNamePrefix(GA); 1544 } else if (GAInst != FInst) { 1545 // Non-instrumented alias of an instrumented function, or vice versa. 1546 // Replace the alias with a native-ABI wrapper of the aliasee. The pass 1547 // below will take care of instrumenting it. 1548 Function *NewF = 1549 buildWrapperFunction(F, "", GA->getLinkage(), F->getFunctionType()); 1550 GA->replaceAllUsesWith(ConstantExpr::getBitCast(NewF, GA->getType())); 1551 NewF->takeName(GA); 1552 GA->eraseFromParent(); 1553 FnsToInstrument.push_back(NewF); 1554 } 1555 } 1556 1557 ReadOnlyNoneAttrs.addAttribute(Attribute::ReadOnly) 1558 .addAttribute(Attribute::ReadNone); 1559 1560 // First, change the ABI of every function in the module. ABI-listed 1561 // functions keep their original ABI and get a wrapper function. 1562 for (std::vector<Function *>::iterator FI = FnsToInstrument.begin(), 1563 FE = FnsToInstrument.end(); 1564 FI != FE; ++FI) { 1565 Function &F = **FI; 1566 FunctionType *FT = F.getFunctionType(); 1567 1568 bool IsZeroArgsVoidRet = (FT->getNumParams() == 0 && !FT->isVarArg() && 1569 FT->getReturnType()->isVoidTy()); 1570 1571 if (isInstrumented(&F)) { 1572 // Instrumented functions get a 'dfs$' prefix. This allows us to more 1573 // easily identify cases of mismatching ABIs. 1574 if (getInstrumentedABI() == IA_Args && !IsZeroArgsVoidRet) { 1575 FunctionType *NewFT = getArgsFunctionType(FT); 1576 Function *NewF = Function::Create(NewFT, F.getLinkage(), 1577 F.getAddressSpace(), "", &M); 1578 NewF->copyAttributesFrom(&F); 1579 NewF->removeAttributes( 1580 AttributeList::ReturnIndex, 1581 AttributeFuncs::typeIncompatible(NewFT->getReturnType())); 1582 for (Function::arg_iterator FArg = F.arg_begin(), 1583 NewFArg = NewF->arg_begin(), 1584 FArgEnd = F.arg_end(); 1585 FArg != FArgEnd; ++FArg, ++NewFArg) { 1586 FArg->replaceAllUsesWith(&*NewFArg); 1587 } 1588 NewF->getBasicBlockList().splice(NewF->begin(), F.getBasicBlockList()); 1589 1590 for (Function::user_iterator UI = F.user_begin(), UE = F.user_end(); 1591 UI != UE;) { 1592 BlockAddress *BA = dyn_cast<BlockAddress>(*UI); 1593 ++UI; 1594 if (BA) { 1595 BA->replaceAllUsesWith( 1596 BlockAddress::get(NewF, BA->getBasicBlock())); 1597 delete BA; 1598 } 1599 } 1600 F.replaceAllUsesWith( 1601 ConstantExpr::getBitCast(NewF, PointerType::getUnqual(FT))); 1602 NewF->takeName(&F); 1603 F.eraseFromParent(); 1604 *FI = NewF; 1605 addGlobalNamePrefix(NewF); 1606 } else { 1607 addGlobalNamePrefix(&F); 1608 } 1609 } else if (!IsZeroArgsVoidRet || getWrapperKind(&F) == WK_Custom) { 1610 // Build a wrapper function for F. The wrapper simply calls F, and is 1611 // added to FnsToInstrument so that any instrumentation according to its 1612 // WrapperKind is done in the second pass below. 1613 FunctionType *NewFT = 1614 getInstrumentedABI() == IA_Args ? getArgsFunctionType(FT) : FT; 1615 1616 // If the function being wrapped has local linkage, then preserve the 1617 // function's linkage in the wrapper function. 1618 GlobalValue::LinkageTypes WrapperLinkage = 1619 F.hasLocalLinkage() ? F.getLinkage() 1620 : GlobalValue::LinkOnceODRLinkage; 1621 1622 Function *NewF = buildWrapperFunction( 1623 &F, 1624 (shouldTrackOrigins() ? std::string("dfso$") : std::string("dfsw$")) + 1625 std::string(F.getName()), 1626 WrapperLinkage, NewFT); 1627 if (getInstrumentedABI() == IA_TLS) 1628 NewF->removeAttributes(AttributeList::FunctionIndex, ReadOnlyNoneAttrs); 1629 1630 Value *WrappedFnCst = 1631 ConstantExpr::getBitCast(NewF, PointerType::getUnqual(FT)); 1632 F.replaceAllUsesWith(WrappedFnCst); 1633 1634 UnwrappedFnMap[WrappedFnCst] = &F; 1635 *FI = NewF; 1636 1637 if (!F.isDeclaration()) { 1638 // This function is probably defining an interposition of an 1639 // uninstrumented function and hence needs to keep the original ABI. 1640 // But any functions it may call need to use the instrumented ABI, so 1641 // we instrument it in a mode which preserves the original ABI. 1642 FnsWithNativeABI.insert(&F); 1643 1644 // This code needs to rebuild the iterators, as they may be invalidated 1645 // by the push_back, taking care that the new range does not include 1646 // any functions added by this code. 1647 size_t N = FI - FnsToInstrument.begin(), 1648 Count = FE - FnsToInstrument.begin(); 1649 FnsToInstrument.push_back(&F); 1650 FI = FnsToInstrument.begin() + N; 1651 FE = FnsToInstrument.begin() + Count; 1652 } 1653 // Hopefully, nobody will try to indirectly call a vararg 1654 // function... yet. 1655 } else if (FT->isVarArg()) { 1656 UnwrappedFnMap[&F] = &F; 1657 *FI = nullptr; 1658 } 1659 } 1660 1661 for (Function *F : FnsToInstrument) { 1662 if (!F || F->isDeclaration()) 1663 continue; 1664 1665 removeUnreachableBlocks(*F); 1666 1667 DFSanFunction DFSF(*this, F, FnsWithNativeABI.count(F)); 1668 1669 // DFSanVisitor may create new basic blocks, which confuses df_iterator. 1670 // Build a copy of the list before iterating over it. 1671 SmallVector<BasicBlock *, 4> BBList(depth_first(&F->getEntryBlock())); 1672 1673 for (BasicBlock *BB : BBList) { 1674 Instruction *Inst = &BB->front(); 1675 while (true) { 1676 // DFSanVisitor may split the current basic block, changing the current 1677 // instruction's next pointer and moving the next instruction to the 1678 // tail block from which we should continue. 1679 Instruction *Next = Inst->getNextNode(); 1680 // DFSanVisitor may delete Inst, so keep track of whether it was a 1681 // terminator. 1682 bool IsTerminator = Inst->isTerminator(); 1683 if (!DFSF.SkipInsts.count(Inst)) 1684 DFSanVisitor(DFSF).visit(Inst); 1685 if (IsTerminator) 1686 break; 1687 Inst = Next; 1688 } 1689 } 1690 1691 // We will not necessarily be able to compute the shadow for every phi node 1692 // until we have visited every block. Therefore, the code that handles phi 1693 // nodes adds them to the PHIFixups list so that they can be properly 1694 // handled here. 1695 for (DFSanFunction::PHIFixupElement &P : DFSF.PHIFixups) { 1696 for (unsigned Val = 0, N = P.Phi->getNumIncomingValues(); Val != N; 1697 ++Val) { 1698 P.ShadowPhi->setIncomingValue( 1699 Val, DFSF.getShadow(P.Phi->getIncomingValue(Val))); 1700 if (P.OriginPhi) 1701 P.OriginPhi->setIncomingValue( 1702 Val, DFSF.getOrigin(P.Phi->getIncomingValue(Val))); 1703 } 1704 } 1705 1706 // -dfsan-debug-nonzero-labels will split the CFG in all kinds of crazy 1707 // places (i.e. instructions in basic blocks we haven't even begun visiting 1708 // yet). To make our life easier, do this work in a pass after the main 1709 // instrumentation. 1710 if (ClDebugNonzeroLabels) { 1711 for (Value *V : DFSF.NonZeroChecks) { 1712 Instruction *Pos; 1713 if (Instruction *I = dyn_cast<Instruction>(V)) 1714 Pos = I->getNextNode(); 1715 else 1716 Pos = &DFSF.F->getEntryBlock().front(); 1717 while (isa<PHINode>(Pos) || isa<AllocaInst>(Pos)) 1718 Pos = Pos->getNextNode(); 1719 IRBuilder<> IRB(Pos); 1720 Value *PrimitiveShadow = DFSF.collapseToPrimitiveShadow(V, Pos); 1721 Value *Ne = 1722 IRB.CreateICmpNE(PrimitiveShadow, DFSF.DFS.ZeroPrimitiveShadow); 1723 BranchInst *BI = cast<BranchInst>(SplitBlockAndInsertIfThen( 1724 Ne, Pos, /*Unreachable=*/false, ColdCallWeights)); 1725 IRBuilder<> ThenIRB(BI); 1726 ThenIRB.CreateCall(DFSF.DFS.DFSanNonzeroLabelFn, {}); 1727 } 1728 } 1729 } 1730 1731 return Changed || !FnsToInstrument.empty() || 1732 M.global_size() != InitialGlobalSize || M.size() != InitialModuleSize; 1733 } 1734 1735 Value *DFSanFunction::getArgTLS(Type *T, unsigned ArgOffset, IRBuilder<> &IRB) { 1736 Value *Base = IRB.CreatePointerCast(DFS.ArgTLS, DFS.IntptrTy); 1737 if (ArgOffset) 1738 Base = IRB.CreateAdd(Base, ConstantInt::get(DFS.IntptrTy, ArgOffset)); 1739 return IRB.CreateIntToPtr(Base, PointerType::get(DFS.getShadowTy(T), 0), 1740 "_dfsarg"); 1741 } 1742 1743 Value *DFSanFunction::getRetvalTLS(Type *T, IRBuilder<> &IRB) { 1744 return IRB.CreatePointerCast( 1745 DFS.RetvalTLS, PointerType::get(DFS.getShadowTy(T), 0), "_dfsret"); 1746 } 1747 1748 Value *DFSanFunction::getRetvalOriginTLS() { return DFS.RetvalOriginTLS; } 1749 1750 Value *DFSanFunction::getArgOriginTLS(unsigned ArgNo, IRBuilder<> &IRB) { 1751 return IRB.CreateConstGEP2_64(DFS.ArgOriginTLSTy, DFS.ArgOriginTLS, 0, ArgNo, 1752 "_dfsarg_o"); 1753 } 1754 1755 Value *DFSanFunction::getOrigin(Value *V) { 1756 assert(DFS.shouldTrackOrigins()); 1757 if (!isa<Argument>(V) && !isa<Instruction>(V)) 1758 return DFS.ZeroOrigin; 1759 Value *&Origin = ValOriginMap[V]; 1760 if (!Origin) { 1761 if (Argument *A = dyn_cast<Argument>(V)) { 1762 if (IsNativeABI) 1763 return DFS.ZeroOrigin; 1764 switch (IA) { 1765 case DataFlowSanitizer::IA_TLS: { 1766 if (A->getArgNo() < DFS.NumOfElementsInArgOrgTLS) { 1767 Instruction *ArgOriginTLSPos = &*F->getEntryBlock().begin(); 1768 IRBuilder<> IRB(ArgOriginTLSPos); 1769 Value *ArgOriginPtr = getArgOriginTLS(A->getArgNo(), IRB); 1770 Origin = IRB.CreateLoad(DFS.OriginTy, ArgOriginPtr); 1771 } else { 1772 // Overflow 1773 Origin = DFS.ZeroOrigin; 1774 } 1775 break; 1776 } 1777 case DataFlowSanitizer::IA_Args: { 1778 Origin = DFS.ZeroOrigin; 1779 break; 1780 } 1781 } 1782 } else { 1783 Origin = DFS.ZeroOrigin; 1784 } 1785 } 1786 return Origin; 1787 } 1788 1789 void DFSanFunction::setOrigin(Instruction *I, Value *Origin) { 1790 if (!DFS.shouldTrackOrigins()) 1791 return; 1792 assert(!ValOriginMap.count(I)); 1793 assert(Origin->getType() == DFS.OriginTy); 1794 ValOriginMap[I] = Origin; 1795 } 1796 1797 Value *DFSanFunction::getShadowForTLSArgument(Argument *A) { 1798 unsigned ArgOffset = 0; 1799 const DataLayout &DL = F->getParent()->getDataLayout(); 1800 for (auto &FArg : F->args()) { 1801 if (!FArg.getType()->isSized()) { 1802 if (A == &FArg) 1803 break; 1804 continue; 1805 } 1806 1807 unsigned Size = DL.getTypeAllocSize(DFS.getShadowTy(&FArg)); 1808 if (A != &FArg) { 1809 ArgOffset += alignTo(Size, ShadowTLSAlignment); 1810 if (ArgOffset > ArgTLSSize) 1811 break; // ArgTLS overflows, uses a zero shadow. 1812 continue; 1813 } 1814 1815 if (ArgOffset + Size > ArgTLSSize) 1816 break; // ArgTLS overflows, uses a zero shadow. 1817 1818 Instruction *ArgTLSPos = &*F->getEntryBlock().begin(); 1819 IRBuilder<> IRB(ArgTLSPos); 1820 Value *ArgShadowPtr = getArgTLS(FArg.getType(), ArgOffset, IRB); 1821 return IRB.CreateAlignedLoad(DFS.getShadowTy(&FArg), ArgShadowPtr, 1822 ShadowTLSAlignment); 1823 } 1824 1825 return DFS.getZeroShadow(A); 1826 } 1827 1828 Value *DFSanFunction::getShadow(Value *V) { 1829 if (!isa<Argument>(V) && !isa<Instruction>(V)) 1830 return DFS.getZeroShadow(V); 1831 Value *&Shadow = ValShadowMap[V]; 1832 if (!Shadow) { 1833 if (Argument *A = dyn_cast<Argument>(V)) { 1834 if (IsNativeABI) 1835 return DFS.getZeroShadow(V); 1836 switch (IA) { 1837 case DataFlowSanitizer::IA_TLS: { 1838 Shadow = getShadowForTLSArgument(A); 1839 break; 1840 } 1841 case DataFlowSanitizer::IA_Args: { 1842 unsigned ArgIdx = A->getArgNo() + F->arg_size() / 2; 1843 Function::arg_iterator Arg = F->arg_begin(); 1844 std::advance(Arg, ArgIdx); 1845 Shadow = &*Arg; 1846 assert(Shadow->getType() == DFS.PrimitiveShadowTy); 1847 break; 1848 } 1849 } 1850 NonZeroChecks.push_back(Shadow); 1851 } else { 1852 Shadow = DFS.getZeroShadow(V); 1853 } 1854 } 1855 return Shadow; 1856 } 1857 1858 void DFSanFunction::setShadow(Instruction *I, Value *Shadow) { 1859 assert(!ValShadowMap.count(I)); 1860 assert(DFS.shouldTrackFieldsAndIndices() || 1861 Shadow->getType() == DFS.PrimitiveShadowTy); 1862 ValShadowMap[I] = Shadow; 1863 } 1864 1865 Value *DataFlowSanitizer::getShadowOffset(Value *Addr, IRBuilder<> &IRB) { 1866 // Returns Addr & shadow_mask 1867 assert(Addr != RetvalTLS && "Reinstrumenting?"); 1868 Value *ShadowPtrMaskValue; 1869 if (DFSanRuntimeShadowMask) 1870 ShadowPtrMaskValue = IRB.CreateLoad(IntptrTy, ExternalShadowMask); 1871 else 1872 ShadowPtrMaskValue = ShadowPtrMask; 1873 return IRB.CreateAnd(IRB.CreatePtrToInt(Addr, IntptrTy), 1874 IRB.CreatePtrToInt(ShadowPtrMaskValue, IntptrTy)); 1875 } 1876 1877 std::pair<Value *, Value *> 1878 DataFlowSanitizer::getShadowOriginAddress(Value *Addr, Align InstAlignment, 1879 Instruction *Pos) { 1880 // Returns ((Addr & shadow_mask) + origin_base) & ~4UL 1881 IRBuilder<> IRB(Pos); 1882 Value *ShadowOffset = getShadowOffset(Addr, IRB); 1883 Value *ShadowPtr = getShadowAddress(Addr, Pos, ShadowOffset); 1884 Value *OriginPtr = nullptr; 1885 if (shouldTrackOrigins()) { 1886 Value *OriginLong = IRB.CreateAdd(ShadowOffset, OriginBase); 1887 const Align Alignment = llvm::assumeAligned(InstAlignment.value()); 1888 // When alignment is >= 4, Addr must be aligned to 4, otherwise it is UB. 1889 // So Mask is unnecessary. 1890 if (Alignment < MinOriginAlignment) { 1891 uint64_t Mask = MinOriginAlignment.value() - 1; 1892 OriginLong = IRB.CreateAnd(OriginLong, ConstantInt::get(IntptrTy, ~Mask)); 1893 } 1894 OriginPtr = IRB.CreateIntToPtr(OriginLong, OriginPtrTy); 1895 } 1896 return {ShadowPtr, OriginPtr}; 1897 } 1898 1899 Value *DataFlowSanitizer::getShadowAddress(Value *Addr, Instruction *Pos, 1900 Value *ShadowOffset) { 1901 IRBuilder<> IRB(Pos); 1902 1903 if (!ShadowPtrMul->isOne()) 1904 ShadowOffset = IRB.CreateMul(ShadowOffset, ShadowPtrMul); 1905 1906 return IRB.CreateIntToPtr(ShadowOffset, PrimitiveShadowPtrTy); 1907 } 1908 1909 Value *DataFlowSanitizer::getShadowAddress(Value *Addr, Instruction *Pos) { 1910 // Returns (Addr & shadow_mask) x 2 1911 IRBuilder<> IRB(Pos); 1912 Value *ShadowOffset = getShadowOffset(Addr, IRB); 1913 return getShadowAddress(Addr, Pos, ShadowOffset); 1914 } 1915 1916 Value *DFSanFunction::combineShadowsThenConvert(Type *T, Value *V1, Value *V2, 1917 Instruction *Pos) { 1918 Value *PrimitiveValue = combineShadows(V1, V2, Pos); 1919 return expandFromPrimitiveShadow(T, PrimitiveValue, Pos); 1920 } 1921 1922 // Generates IR to compute the union of the two given shadows, inserting it 1923 // before Pos. The combined value is with primitive type. 1924 Value *DFSanFunction::combineShadows(Value *V1, Value *V2, Instruction *Pos) { 1925 if (DFS.isZeroShadow(V1)) 1926 return collapseToPrimitiveShadow(V2, Pos); 1927 if (DFS.isZeroShadow(V2)) 1928 return collapseToPrimitiveShadow(V1, Pos); 1929 if (V1 == V2) 1930 return collapseToPrimitiveShadow(V1, Pos); 1931 1932 auto V1Elems = ShadowElements.find(V1); 1933 auto V2Elems = ShadowElements.find(V2); 1934 if (V1Elems != ShadowElements.end() && V2Elems != ShadowElements.end()) { 1935 if (std::includes(V1Elems->second.begin(), V1Elems->second.end(), 1936 V2Elems->second.begin(), V2Elems->second.end())) { 1937 return collapseToPrimitiveShadow(V1, Pos); 1938 } 1939 if (std::includes(V2Elems->second.begin(), V2Elems->second.end(), 1940 V1Elems->second.begin(), V1Elems->second.end())) { 1941 return collapseToPrimitiveShadow(V2, Pos); 1942 } 1943 } else if (V1Elems != ShadowElements.end()) { 1944 if (V1Elems->second.count(V2)) 1945 return collapseToPrimitiveShadow(V1, Pos); 1946 } else if (V2Elems != ShadowElements.end()) { 1947 if (V2Elems->second.count(V1)) 1948 return collapseToPrimitiveShadow(V2, Pos); 1949 } 1950 1951 auto Key = std::make_pair(V1, V2); 1952 if (V1 > V2) 1953 std::swap(Key.first, Key.second); 1954 CachedShadow &CCS = CachedShadows[Key]; 1955 if (CCS.Block && DT.dominates(CCS.Block, Pos->getParent())) 1956 return CCS.Shadow; 1957 1958 // Converts inputs shadows to shadows with primitive types. 1959 Value *PV1 = collapseToPrimitiveShadow(V1, Pos); 1960 Value *PV2 = collapseToPrimitiveShadow(V2, Pos); 1961 1962 IRBuilder<> IRB(Pos); 1963 if (DFS.hasFastLabelsEnabled()) { 1964 CCS.Block = Pos->getParent(); 1965 CCS.Shadow = IRB.CreateOr(PV1, PV2); 1966 } else if (AvoidNewBlocks) { 1967 CallInst *Call = IRB.CreateCall(DFS.DFSanCheckedUnionFn, {PV1, PV2}); 1968 Call->addAttribute(AttributeList::ReturnIndex, Attribute::ZExt); 1969 Call->addParamAttr(0, Attribute::ZExt); 1970 Call->addParamAttr(1, Attribute::ZExt); 1971 1972 CCS.Block = Pos->getParent(); 1973 CCS.Shadow = Call; 1974 } else { 1975 BasicBlock *Head = Pos->getParent(); 1976 Value *Ne = IRB.CreateICmpNE(PV1, PV2); 1977 BranchInst *BI = cast<BranchInst>(SplitBlockAndInsertIfThen( 1978 Ne, Pos, /*Unreachable=*/false, DFS.ColdCallWeights, &DT)); 1979 IRBuilder<> ThenIRB(BI); 1980 CallInst *Call = ThenIRB.CreateCall(DFS.DFSanUnionFn, {PV1, PV2}); 1981 Call->addAttribute(AttributeList::ReturnIndex, Attribute::ZExt); 1982 Call->addParamAttr(0, Attribute::ZExt); 1983 Call->addParamAttr(1, Attribute::ZExt); 1984 1985 BasicBlock *Tail = BI->getSuccessor(0); 1986 PHINode *Phi = 1987 PHINode::Create(DFS.PrimitiveShadowTy, 2, "", &Tail->front()); 1988 Phi->addIncoming(Call, Call->getParent()); 1989 Phi->addIncoming(PV1, Head); 1990 1991 CCS.Block = Tail; 1992 CCS.Shadow = Phi; 1993 } 1994 1995 std::set<Value *> UnionElems; 1996 if (V1Elems != ShadowElements.end()) { 1997 UnionElems = V1Elems->second; 1998 } else { 1999 UnionElems.insert(V1); 2000 } 2001 if (V2Elems != ShadowElements.end()) { 2002 UnionElems.insert(V2Elems->second.begin(), V2Elems->second.end()); 2003 } else { 2004 UnionElems.insert(V2); 2005 } 2006 ShadowElements[CCS.Shadow] = std::move(UnionElems); 2007 2008 return CCS.Shadow; 2009 } 2010 2011 // A convenience function which folds the shadows of each of the operands 2012 // of the provided instruction Inst, inserting the IR before Inst. Returns 2013 // the computed union Value. 2014 Value *DFSanFunction::combineOperandShadows(Instruction *Inst) { 2015 if (Inst->getNumOperands() == 0) 2016 return DFS.getZeroShadow(Inst); 2017 2018 Value *Shadow = getShadow(Inst->getOperand(0)); 2019 for (unsigned I = 1, N = Inst->getNumOperands(); I < N; ++I) 2020 Shadow = combineShadows(Shadow, getShadow(Inst->getOperand(I)), Inst); 2021 2022 return expandFromPrimitiveShadow(Inst->getType(), Shadow, Inst); 2023 } 2024 2025 void DFSanVisitor::visitInstOperands(Instruction &I) { 2026 Value *CombinedShadow = DFSF.combineOperandShadows(&I); 2027 DFSF.setShadow(&I, CombinedShadow); 2028 visitInstOperandOrigins(I); 2029 } 2030 2031 Value *DFSanFunction::combineOrigins(const std::vector<Value *> &Shadows, 2032 const std::vector<Value *> &Origins, 2033 Instruction *Pos, ConstantInt *Zero) { 2034 assert(Shadows.size() == Origins.size()); 2035 size_t Size = Origins.size(); 2036 if (Size == 0) 2037 return DFS.ZeroOrigin; 2038 Value *Origin = nullptr; 2039 if (!Zero) 2040 Zero = DFS.ZeroPrimitiveShadow; 2041 for (size_t I = 0; I != Size; ++I) { 2042 Value *OpOrigin = Origins[I]; 2043 Constant *ConstOpOrigin = dyn_cast<Constant>(OpOrigin); 2044 if (ConstOpOrigin && ConstOpOrigin->isNullValue()) 2045 continue; 2046 if (!Origin) { 2047 Origin = OpOrigin; 2048 continue; 2049 } 2050 Value *OpShadow = Shadows[I]; 2051 Value *PrimitiveShadow = collapseToPrimitiveShadow(OpShadow, Pos); 2052 IRBuilder<> IRB(Pos); 2053 Value *Cond = IRB.CreateICmpNE(PrimitiveShadow, Zero); 2054 Origin = IRB.CreateSelect(Cond, OpOrigin, Origin); 2055 } 2056 return Origin ? Origin : DFS.ZeroOrigin; 2057 } 2058 2059 Value *DFSanFunction::combineOperandOrigins(Instruction *Inst) { 2060 size_t Size = Inst->getNumOperands(); 2061 std::vector<Value *> Shadows(Size); 2062 std::vector<Value *> Origins(Size); 2063 for (unsigned I = 0; I != Size; ++I) { 2064 Shadows[I] = getShadow(Inst->getOperand(I)); 2065 Origins[I] = getOrigin(Inst->getOperand(I)); 2066 } 2067 return combineOrigins(Shadows, Origins, Inst); 2068 } 2069 2070 void DFSanVisitor::visitInstOperandOrigins(Instruction &I) { 2071 if (!DFSF.DFS.shouldTrackOrigins()) 2072 return; 2073 Value *CombinedOrigin = DFSF.combineOperandOrigins(&I); 2074 DFSF.setOrigin(&I, CombinedOrigin); 2075 } 2076 2077 Align DFSanFunction::getShadowAlign(Align InstAlignment) { 2078 const Align Alignment = ClPreserveAlignment ? InstAlignment : Align(1); 2079 return Align(Alignment.value() * DFS.ShadowWidthBytes); 2080 } 2081 2082 Align DFSanFunction::getOriginAlign(Align InstAlignment) { 2083 const Align Alignment = llvm::assumeAligned(InstAlignment.value()); 2084 return Align(std::max(MinOriginAlignment, Alignment)); 2085 } 2086 2087 bool DFSanFunction::useCallbackLoadLabelAndOrigin(uint64_t Size, 2088 Align InstAlignment) { 2089 // When enabling tracking load instructions, we always use 2090 // __dfsan_load_label_and_origin to reduce code size. 2091 if (ClTrackOrigins == 2) 2092 return true; 2093 2094 assert(Size != 0); 2095 // * if Size == 1, it is sufficient to load its origin aligned at 4. 2096 // * if Size == 2, we assume most cases Addr % 2 == 0, so it is sufficient to 2097 // load its origin aligned at 4. If not, although origins may be lost, it 2098 // should not happen very often. 2099 // * if align >= 4, Addr must be aligned to 4, otherwise it is UB. When 2100 // Size % 4 == 0, it is more efficient to load origins without callbacks. 2101 // * Otherwise we use __dfsan_load_label_and_origin. 2102 // This should ensure that common cases run efficiently. 2103 if (Size <= 2) 2104 return false; 2105 2106 const Align Alignment = llvm::assumeAligned(InstAlignment.value()); 2107 return Alignment < MinOriginAlignment || !DFS.hasLoadSizeForFastPath(Size); 2108 } 2109 2110 Value *DataFlowSanitizer::loadNextOrigin(Instruction *Pos, Align OriginAlign, 2111 Value **OriginAddr) { 2112 IRBuilder<> IRB(Pos); 2113 *OriginAddr = 2114 IRB.CreateGEP(OriginTy, *OriginAddr, ConstantInt::get(IntptrTy, 1)); 2115 return IRB.CreateAlignedLoad(OriginTy, *OriginAddr, OriginAlign); 2116 } 2117 2118 std::pair<Value *, Value *> DFSanFunction::loadFast16ShadowFast( 2119 Value *ShadowAddr, Value *OriginAddr, uint64_t Size, Align ShadowAlign, 2120 Align OriginAlign, Value *FirstOrigin, Instruction *Pos) { 2121 const bool ShouldTrackOrigins = DFS.shouldTrackOrigins(); 2122 const uint64_t ShadowSize = Size * DFS.ShadowWidthBytes; 2123 2124 assert(Size >= 4 && "Not large enough load size for fast path!"); 2125 2126 // Used for origin tracking. 2127 std::vector<Value *> Shadows; 2128 std::vector<Value *> Origins; 2129 2130 // Load instructions in LLVM can have arbitrary byte sizes (e.g., 3, 12, 20) 2131 // but this function is only used in a subset of cases that make it possible 2132 // to optimize the instrumentation. 2133 // 2134 // Specifically, when the shadow size in bytes (i.e., loaded bytes x shadow 2135 // per byte) is either: 2136 // - a multiple of 8 (common) 2137 // - equal to 4 (only for load32 in fast-8 mode) 2138 // 2139 // For the second case, we can fit the wide shadow in a 32-bit integer. In all 2140 // other cases, we use a 64-bit integer to hold the wide shadow. 2141 Type *WideShadowTy = 2142 ShadowSize == 4 ? Type::getInt32Ty(*DFS.Ctx) : Type::getInt64Ty(*DFS.Ctx); 2143 2144 IRBuilder<> IRB(Pos); 2145 Value *WideAddr = IRB.CreateBitCast(ShadowAddr, WideShadowTy->getPointerTo()); 2146 Value *CombinedWideShadow = 2147 IRB.CreateAlignedLoad(WideShadowTy, WideAddr, ShadowAlign); 2148 2149 unsigned WideShadowBitWidth = WideShadowTy->getIntegerBitWidth(); 2150 const uint64_t BytesPerWideShadow = WideShadowBitWidth / DFS.ShadowWidthBits; 2151 2152 auto AppendWideShadowAndOrigin = [&](Value *WideShadow, Value *Origin) { 2153 if (BytesPerWideShadow > 4) { 2154 assert(BytesPerWideShadow == 8); 2155 // The wide shadow relates to two origin pointers: one for the first four 2156 // application bytes, and one for the latest four. We use a left shift to 2157 // get just the shadow bytes that correspond to the first origin pointer, 2158 // and then the entire shadow for the second origin pointer (which will be 2159 // chosen by combineOrigins() iff the least-significant half of the wide 2160 // shadow was empty but the other half was not). 2161 Value *WideShadowLo = IRB.CreateShl( 2162 WideShadow, ConstantInt::get(WideShadowTy, WideShadowBitWidth / 2)); 2163 Shadows.push_back(WideShadow); 2164 Origins.push_back(DFS.loadNextOrigin(Pos, OriginAlign, &OriginAddr)); 2165 2166 Shadows.push_back(WideShadowLo); 2167 Origins.push_back(Origin); 2168 } else { 2169 Shadows.push_back(WideShadow); 2170 Origins.push_back(Origin); 2171 } 2172 }; 2173 2174 if (ShouldTrackOrigins) 2175 AppendWideShadowAndOrigin(CombinedWideShadow, FirstOrigin); 2176 2177 // First OR all the WideShadows (i.e., 64bit or 32bit shadow chunks) linearly; 2178 // then OR individual shadows within the combined WideShadow by binary ORing. 2179 // This is fewer instructions than ORing shadows individually, since it 2180 // needs logN shift/or instructions (N being the bytes of the combined wide 2181 // shadow). 2182 for (uint64_t ByteOfs = BytesPerWideShadow; ByteOfs < Size; 2183 ByteOfs += BytesPerWideShadow) { 2184 WideAddr = IRB.CreateGEP(WideShadowTy, WideAddr, 2185 ConstantInt::get(DFS.IntptrTy, 1)); 2186 Value *NextWideShadow = 2187 IRB.CreateAlignedLoad(WideShadowTy, WideAddr, ShadowAlign); 2188 CombinedWideShadow = IRB.CreateOr(CombinedWideShadow, NextWideShadow); 2189 if (ShouldTrackOrigins) { 2190 Value *NextOrigin = DFS.loadNextOrigin(Pos, OriginAlign, &OriginAddr); 2191 AppendWideShadowAndOrigin(NextWideShadow, NextOrigin); 2192 } 2193 } 2194 for (unsigned Width = WideShadowBitWidth / 2; Width >= DFS.ShadowWidthBits; 2195 Width >>= 1) { 2196 Value *ShrShadow = IRB.CreateLShr(CombinedWideShadow, Width); 2197 CombinedWideShadow = IRB.CreateOr(CombinedWideShadow, ShrShadow); 2198 } 2199 return {IRB.CreateTrunc(CombinedWideShadow, DFS.PrimitiveShadowTy), 2200 ShouldTrackOrigins 2201 ? combineOrigins(Shadows, Origins, Pos, 2202 ConstantInt::getSigned(IRB.getInt64Ty(), 0)) 2203 : DFS.ZeroOrigin}; 2204 } 2205 2206 Value *DFSanFunction::loadLegacyShadowFast(Value *ShadowAddr, uint64_t Size, 2207 Align ShadowAlign, 2208 Instruction *Pos) { 2209 // Fast path for the common case where each byte has identical shadow: load 2210 // shadow 64 (or 32) bits at a time, fall out to a __dfsan_union_load call if 2211 // any shadow is non-equal. 2212 BasicBlock *FallbackBB = BasicBlock::Create(*DFS.Ctx, "", F); 2213 IRBuilder<> FallbackIRB(FallbackBB); 2214 CallInst *FallbackCall = FallbackIRB.CreateCall( 2215 DFS.DFSanUnionLoadFn, {ShadowAddr, ConstantInt::get(DFS.IntptrTy, Size)}); 2216 FallbackCall->addAttribute(AttributeList::ReturnIndex, Attribute::ZExt); 2217 2218 const uint64_t ShadowSize = Size * DFS.ShadowWidthBytes; 2219 assert(Size >= 4 && "Not large enough load size for fast path!"); 2220 2221 // Same as in loadFast16AShadowsFast. In the case of load32, we can fit the 2222 // wide shadow in a 32-bit integer instead. 2223 Type *WideShadowTy = 2224 ShadowSize == 4 ? Type::getInt32Ty(*DFS.Ctx) : Type::getInt64Ty(*DFS.Ctx); 2225 2226 // Compare each of the shadows stored in the loaded 64 bits to each other, 2227 // by computing (WideShadow rotl ShadowWidthBits) == WideShadow. 2228 IRBuilder<> IRB(Pos); 2229 unsigned WideShadowBitWidth = WideShadowTy->getIntegerBitWidth(); 2230 Value *WideAddr = IRB.CreateBitCast(ShadowAddr, WideShadowTy->getPointerTo()); 2231 Value *WideShadow = 2232 IRB.CreateAlignedLoad(WideShadowTy, WideAddr, ShadowAlign); 2233 Value *TruncShadow = IRB.CreateTrunc(WideShadow, DFS.PrimitiveShadowTy); 2234 Value *ShlShadow = IRB.CreateShl(WideShadow, DFS.ShadowWidthBits); 2235 Value *ShrShadow = 2236 IRB.CreateLShr(WideShadow, WideShadowBitWidth - DFS.ShadowWidthBits); 2237 Value *RotShadow = IRB.CreateOr(ShlShadow, ShrShadow); 2238 Value *ShadowsEq = IRB.CreateICmpEQ(WideShadow, RotShadow); 2239 2240 BasicBlock *Head = Pos->getParent(); 2241 BasicBlock *Tail = Head->splitBasicBlock(Pos->getIterator()); 2242 2243 if (DomTreeNode *OldNode = DT.getNode(Head)) { 2244 std::vector<DomTreeNode *> Children(OldNode->begin(), OldNode->end()); 2245 2246 DomTreeNode *NewNode = DT.addNewBlock(Tail, Head); 2247 for (auto *Child : Children) 2248 DT.changeImmediateDominator(Child, NewNode); 2249 } 2250 2251 // In the following code LastBr will refer to the previous basic block's 2252 // conditional branch instruction, whose true successor is fixed up to point 2253 // to the next block during the loop below or to the tail after the final 2254 // iteration. 2255 BranchInst *LastBr = BranchInst::Create(FallbackBB, FallbackBB, ShadowsEq); 2256 ReplaceInstWithInst(Head->getTerminator(), LastBr); 2257 DT.addNewBlock(FallbackBB, Head); 2258 2259 const uint64_t BytesPerWideShadow = WideShadowBitWidth / DFS.ShadowWidthBits; 2260 2261 for (uint64_t ByteOfs = BytesPerWideShadow; ByteOfs < Size; 2262 ByteOfs += BytesPerWideShadow) { 2263 BasicBlock *NextBB = BasicBlock::Create(*DFS.Ctx, "", F); 2264 DT.addNewBlock(NextBB, LastBr->getParent()); 2265 IRBuilder<> NextIRB(NextBB); 2266 WideAddr = NextIRB.CreateGEP(WideShadowTy, WideAddr, 2267 ConstantInt::get(DFS.IntptrTy, 1)); 2268 Value *NextWideShadow = 2269 NextIRB.CreateAlignedLoad(WideShadowTy, WideAddr, ShadowAlign); 2270 ShadowsEq = NextIRB.CreateICmpEQ(WideShadow, NextWideShadow); 2271 LastBr->setSuccessor(0, NextBB); 2272 LastBr = NextIRB.CreateCondBr(ShadowsEq, FallbackBB, FallbackBB); 2273 } 2274 2275 LastBr->setSuccessor(0, Tail); 2276 FallbackIRB.CreateBr(Tail); 2277 PHINode *Shadow = 2278 PHINode::Create(DFS.PrimitiveShadowTy, 2, "", &Tail->front()); 2279 Shadow->addIncoming(FallbackCall, FallbackBB); 2280 Shadow->addIncoming(TruncShadow, LastBr->getParent()); 2281 return Shadow; 2282 } 2283 2284 std::pair<Value *, Value *> DFSanFunction::loadShadowOriginSansLoadTracking( 2285 Value *Addr, uint64_t Size, Align InstAlignment, Instruction *Pos) { 2286 const bool ShouldTrackOrigins = DFS.shouldTrackOrigins(); 2287 2288 // Non-escaped loads. 2289 if (AllocaInst *AI = dyn_cast<AllocaInst>(Addr)) { 2290 const auto SI = AllocaShadowMap.find(AI); 2291 if (SI != AllocaShadowMap.end()) { 2292 IRBuilder<> IRB(Pos); 2293 Value *ShadowLI = IRB.CreateLoad(DFS.PrimitiveShadowTy, SI->second); 2294 const auto OI = AllocaOriginMap.find(AI); 2295 assert(!ShouldTrackOrigins || OI != AllocaOriginMap.end()); 2296 return {ShadowLI, ShouldTrackOrigins 2297 ? IRB.CreateLoad(DFS.OriginTy, OI->second) 2298 : nullptr}; 2299 } 2300 } 2301 2302 // Load from constant addresses. 2303 SmallVector<const Value *, 2> Objs; 2304 getUnderlyingObjects(Addr, Objs); 2305 bool AllConstants = true; 2306 for (const Value *Obj : Objs) { 2307 if (isa<Function>(Obj) || isa<BlockAddress>(Obj)) 2308 continue; 2309 if (isa<GlobalVariable>(Obj) && cast<GlobalVariable>(Obj)->isConstant()) 2310 continue; 2311 2312 AllConstants = false; 2313 break; 2314 } 2315 if (AllConstants) 2316 return {DFS.ZeroPrimitiveShadow, 2317 ShouldTrackOrigins ? DFS.ZeroOrigin : nullptr}; 2318 2319 if (Size == 0) 2320 return {DFS.ZeroPrimitiveShadow, 2321 ShouldTrackOrigins ? DFS.ZeroOrigin : nullptr}; 2322 2323 // Use callback to load if this is not an optimizable case for origin 2324 // tracking. 2325 if (ShouldTrackOrigins && 2326 useCallbackLoadLabelAndOrigin(Size, InstAlignment)) { 2327 IRBuilder<> IRB(Pos); 2328 CallInst *Call = 2329 IRB.CreateCall(DFS.DFSanLoadLabelAndOriginFn, 2330 {IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()), 2331 ConstantInt::get(DFS.IntptrTy, Size)}); 2332 Call->addAttribute(AttributeList::ReturnIndex, Attribute::ZExt); 2333 return {IRB.CreateTrunc(IRB.CreateLShr(Call, DFS.OriginWidthBits), 2334 DFS.PrimitiveShadowTy), 2335 IRB.CreateTrunc(Call, DFS.OriginTy)}; 2336 } 2337 2338 // Other cases that support loading shadows or origins in a fast way. 2339 Value *ShadowAddr, *OriginAddr; 2340 std::tie(ShadowAddr, OriginAddr) = 2341 DFS.getShadowOriginAddress(Addr, InstAlignment, Pos); 2342 2343 const Align ShadowAlign = getShadowAlign(InstAlignment); 2344 const Align OriginAlign = getOriginAlign(InstAlignment); 2345 Value *Origin = nullptr; 2346 if (ShouldTrackOrigins) { 2347 IRBuilder<> IRB(Pos); 2348 Origin = IRB.CreateAlignedLoad(DFS.OriginTy, OriginAddr, OriginAlign); 2349 } 2350 2351 // When the byte size is small enough, we can load the shadow directly with 2352 // just a few instructions. 2353 switch (Size) { 2354 case 1: { 2355 LoadInst *LI = new LoadInst(DFS.PrimitiveShadowTy, ShadowAddr, "", Pos); 2356 LI->setAlignment(ShadowAlign); 2357 return {LI, Origin}; 2358 } 2359 case 2: { 2360 IRBuilder<> IRB(Pos); 2361 Value *ShadowAddr1 = IRB.CreateGEP(DFS.PrimitiveShadowTy, ShadowAddr, 2362 ConstantInt::get(DFS.IntptrTy, 1)); 2363 Value *Load = 2364 IRB.CreateAlignedLoad(DFS.PrimitiveShadowTy, ShadowAddr, ShadowAlign); 2365 Value *Load1 = 2366 IRB.CreateAlignedLoad(DFS.PrimitiveShadowTy, ShadowAddr1, ShadowAlign); 2367 return {combineShadows(Load, Load1, Pos), Origin}; 2368 } 2369 } 2370 bool HasSizeForFastPath = DFS.hasLoadSizeForFastPath(Size); 2371 bool HasFastLabelsEnabled = DFS.hasFastLabelsEnabled(); 2372 2373 if (HasFastLabelsEnabled && HasSizeForFastPath) 2374 return loadFast16ShadowFast(ShadowAddr, OriginAddr, Size, ShadowAlign, 2375 OriginAlign, Origin, Pos); 2376 2377 if (!AvoidNewBlocks && HasSizeForFastPath) 2378 return {loadLegacyShadowFast(ShadowAddr, Size, ShadowAlign, Pos), Origin}; 2379 2380 IRBuilder<> IRB(Pos); 2381 FunctionCallee &UnionLoadFn = HasFastLabelsEnabled 2382 ? DFS.DFSanUnionLoadFastLabelsFn 2383 : DFS.DFSanUnionLoadFn; 2384 CallInst *FallbackCall = IRB.CreateCall( 2385 UnionLoadFn, {ShadowAddr, ConstantInt::get(DFS.IntptrTy, Size)}); 2386 FallbackCall->addAttribute(AttributeList::ReturnIndex, Attribute::ZExt); 2387 return {FallbackCall, Origin}; 2388 } 2389 2390 std::pair<Value *, Value *> DFSanFunction::loadShadowOrigin(Value *Addr, 2391 uint64_t Size, 2392 Align InstAlignment, 2393 Instruction *Pos) { 2394 Value *PrimitiveShadow, *Origin; 2395 std::tie(PrimitiveShadow, Origin) = 2396 loadShadowOriginSansLoadTracking(Addr, Size, InstAlignment, Pos); 2397 if (DFS.shouldTrackOrigins()) { 2398 if (ClTrackOrigins == 2) { 2399 IRBuilder<> IRB(Pos); 2400 auto *ConstantShadow = dyn_cast<Constant>(PrimitiveShadow); 2401 if (!ConstantShadow || !ConstantShadow->isZeroValue()) 2402 Origin = updateOriginIfTainted(PrimitiveShadow, Origin, IRB); 2403 } 2404 } 2405 return {PrimitiveShadow, Origin}; 2406 } 2407 2408 static AtomicOrdering addAcquireOrdering(AtomicOrdering AO) { 2409 switch (AO) { 2410 case AtomicOrdering::NotAtomic: 2411 return AtomicOrdering::NotAtomic; 2412 case AtomicOrdering::Unordered: 2413 case AtomicOrdering::Monotonic: 2414 case AtomicOrdering::Acquire: 2415 return AtomicOrdering::Acquire; 2416 case AtomicOrdering::Release: 2417 case AtomicOrdering::AcquireRelease: 2418 return AtomicOrdering::AcquireRelease; 2419 case AtomicOrdering::SequentiallyConsistent: 2420 return AtomicOrdering::SequentiallyConsistent; 2421 } 2422 llvm_unreachable("Unknown ordering"); 2423 } 2424 2425 void DFSanVisitor::visitLoadInst(LoadInst &LI) { 2426 auto &DL = LI.getModule()->getDataLayout(); 2427 uint64_t Size = DL.getTypeStoreSize(LI.getType()); 2428 if (Size == 0) { 2429 DFSF.setShadow(&LI, DFSF.DFS.getZeroShadow(&LI)); 2430 DFSF.setOrigin(&LI, DFSF.DFS.ZeroOrigin); 2431 return; 2432 } 2433 2434 // When an application load is atomic, increase atomic ordering between 2435 // atomic application loads and stores to ensure happen-before order; load 2436 // shadow data after application data; store zero shadow data before 2437 // application data. This ensure shadow loads return either labels of the 2438 // initial application data or zeros. 2439 if (LI.isAtomic()) 2440 LI.setOrdering(addAcquireOrdering(LI.getOrdering())); 2441 2442 Instruction *Pos = LI.isAtomic() ? LI.getNextNode() : &LI; 2443 std::vector<Value *> Shadows; 2444 std::vector<Value *> Origins; 2445 Value *PrimitiveShadow, *Origin; 2446 std::tie(PrimitiveShadow, Origin) = 2447 DFSF.loadShadowOrigin(LI.getPointerOperand(), Size, LI.getAlign(), Pos); 2448 const bool ShouldTrackOrigins = DFSF.DFS.shouldTrackOrigins(); 2449 if (ShouldTrackOrigins) { 2450 Shadows.push_back(PrimitiveShadow); 2451 Origins.push_back(Origin); 2452 } 2453 if (ClCombinePointerLabelsOnLoad) { 2454 Value *PtrShadow = DFSF.getShadow(LI.getPointerOperand()); 2455 PrimitiveShadow = DFSF.combineShadows(PrimitiveShadow, PtrShadow, Pos); 2456 if (ShouldTrackOrigins) { 2457 Shadows.push_back(PtrShadow); 2458 Origins.push_back(DFSF.getOrigin(LI.getPointerOperand())); 2459 } 2460 } 2461 if (!DFSF.DFS.isZeroShadow(PrimitiveShadow)) 2462 DFSF.NonZeroChecks.push_back(PrimitiveShadow); 2463 2464 Value *Shadow = 2465 DFSF.expandFromPrimitiveShadow(LI.getType(), PrimitiveShadow, Pos); 2466 DFSF.setShadow(&LI, Shadow); 2467 2468 if (ShouldTrackOrigins) { 2469 DFSF.setOrigin(&LI, DFSF.combineOrigins(Shadows, Origins, Pos)); 2470 } 2471 2472 if (ClEventCallbacks) { 2473 IRBuilder<> IRB(Pos); 2474 Value *Addr8 = IRB.CreateBitCast(LI.getPointerOperand(), DFSF.DFS.Int8Ptr); 2475 IRB.CreateCall(DFSF.DFS.DFSanLoadCallbackFn, {PrimitiveShadow, Addr8}); 2476 } 2477 } 2478 2479 Value *DFSanFunction::updateOriginIfTainted(Value *Shadow, Value *Origin, 2480 IRBuilder<> &IRB) { 2481 assert(DFS.shouldTrackOrigins()); 2482 return IRB.CreateCall(DFS.DFSanChainOriginIfTaintedFn, {Shadow, Origin}); 2483 } 2484 2485 Value *DFSanFunction::updateOrigin(Value *V, IRBuilder<> &IRB) { 2486 if (!DFS.shouldTrackOrigins()) 2487 return V; 2488 return IRB.CreateCall(DFS.DFSanChainOriginFn, V); 2489 } 2490 2491 Value *DFSanFunction::originToIntptr(IRBuilder<> &IRB, Value *Origin) { 2492 const unsigned OriginSize = DataFlowSanitizer::OriginWidthBytes; 2493 const DataLayout &DL = F->getParent()->getDataLayout(); 2494 unsigned IntptrSize = DL.getTypeStoreSize(DFS.IntptrTy); 2495 if (IntptrSize == OriginSize) 2496 return Origin; 2497 assert(IntptrSize == OriginSize * 2); 2498 Origin = IRB.CreateIntCast(Origin, DFS.IntptrTy, /* isSigned */ false); 2499 return IRB.CreateOr(Origin, IRB.CreateShl(Origin, OriginSize * 8)); 2500 } 2501 2502 void DFSanFunction::paintOrigin(IRBuilder<> &IRB, Value *Origin, 2503 Value *StoreOriginAddr, 2504 uint64_t StoreOriginSize, Align Alignment) { 2505 const unsigned OriginSize = DataFlowSanitizer::OriginWidthBytes; 2506 const DataLayout &DL = F->getParent()->getDataLayout(); 2507 const Align IntptrAlignment = DL.getABITypeAlign(DFS.IntptrTy); 2508 unsigned IntptrSize = DL.getTypeStoreSize(DFS.IntptrTy); 2509 assert(IntptrAlignment >= MinOriginAlignment); 2510 assert(IntptrSize >= OriginSize); 2511 2512 unsigned Ofs = 0; 2513 Align CurrentAlignment = Alignment; 2514 if (Alignment >= IntptrAlignment && IntptrSize > OriginSize) { 2515 Value *IntptrOrigin = originToIntptr(IRB, Origin); 2516 Value *IntptrStoreOriginPtr = IRB.CreatePointerCast( 2517 StoreOriginAddr, PointerType::get(DFS.IntptrTy, 0)); 2518 for (unsigned I = 0; I < StoreOriginSize / IntptrSize; ++I) { 2519 Value *Ptr = 2520 I ? IRB.CreateConstGEP1_32(DFS.IntptrTy, IntptrStoreOriginPtr, I) 2521 : IntptrStoreOriginPtr; 2522 IRB.CreateAlignedStore(IntptrOrigin, Ptr, CurrentAlignment); 2523 Ofs += IntptrSize / OriginSize; 2524 CurrentAlignment = IntptrAlignment; 2525 } 2526 } 2527 2528 for (unsigned I = Ofs; I < (StoreOriginSize + OriginSize - 1) / OriginSize; 2529 ++I) { 2530 Value *GEP = I ? IRB.CreateConstGEP1_32(DFS.OriginTy, StoreOriginAddr, I) 2531 : StoreOriginAddr; 2532 IRB.CreateAlignedStore(Origin, GEP, CurrentAlignment); 2533 CurrentAlignment = MinOriginAlignment; 2534 } 2535 } 2536 2537 Value *DFSanFunction::convertToBool(Value *V, IRBuilder<> &IRB, 2538 const Twine &Name) { 2539 Type *VTy = V->getType(); 2540 assert(VTy->isIntegerTy()); 2541 if (VTy->getIntegerBitWidth() == 1) 2542 // Just converting a bool to a bool, so do nothing. 2543 return V; 2544 return IRB.CreateICmpNE(V, ConstantInt::get(VTy, 0), Name); 2545 } 2546 2547 void DFSanFunction::storeOrigin(Instruction *Pos, Value *Addr, uint64_t Size, 2548 Value *Shadow, Value *Origin, 2549 Value *StoreOriginAddr, Align InstAlignment) { 2550 // Do not write origins for zero shadows because we do not trace origins for 2551 // untainted sinks. 2552 const Align OriginAlignment = getOriginAlign(InstAlignment); 2553 Value *CollapsedShadow = collapseToPrimitiveShadow(Shadow, Pos); 2554 IRBuilder<> IRB(Pos); 2555 if (auto *ConstantShadow = dyn_cast<Constant>(CollapsedShadow)) { 2556 if (!ConstantShadow->isZeroValue()) 2557 paintOrigin(IRB, updateOrigin(Origin, IRB), StoreOriginAddr, Size, 2558 OriginAlignment); 2559 return; 2560 } 2561 2562 if (shouldInstrumentWithCall()) { 2563 IRB.CreateCall(DFS.DFSanMaybeStoreOriginFn, 2564 {CollapsedShadow, 2565 IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()), 2566 ConstantInt::get(DFS.IntptrTy, Size), Origin}); 2567 } else { 2568 Value *Cmp = convertToBool(CollapsedShadow, IRB, "_dfscmp"); 2569 Instruction *CheckTerm = SplitBlockAndInsertIfThen( 2570 Cmp, &*IRB.GetInsertPoint(), false, DFS.OriginStoreWeights, &DT); 2571 IRBuilder<> IRBNew(CheckTerm); 2572 paintOrigin(IRBNew, updateOrigin(Origin, IRBNew), StoreOriginAddr, Size, 2573 OriginAlignment); 2574 ++NumOriginStores; 2575 } 2576 } 2577 2578 void DFSanFunction::storeZeroPrimitiveShadow(Value *Addr, uint64_t Size, 2579 Align ShadowAlign, 2580 Instruction *Pos) { 2581 IRBuilder<> IRB(Pos); 2582 IntegerType *ShadowTy = 2583 IntegerType::get(*DFS.Ctx, Size * DFS.ShadowWidthBits); 2584 Value *ExtZeroShadow = ConstantInt::get(ShadowTy, 0); 2585 Value *ShadowAddr = DFS.getShadowAddress(Addr, Pos); 2586 Value *ExtShadowAddr = 2587 IRB.CreateBitCast(ShadowAddr, PointerType::getUnqual(ShadowTy)); 2588 IRB.CreateAlignedStore(ExtZeroShadow, ExtShadowAddr, ShadowAlign); 2589 // Do not write origins for 0 shadows because we do not trace origins for 2590 // untainted sinks. 2591 } 2592 2593 void DFSanFunction::storePrimitiveShadowOrigin(Value *Addr, uint64_t Size, 2594 Align InstAlignment, 2595 Value *PrimitiveShadow, 2596 Value *Origin, 2597 Instruction *Pos) { 2598 const bool ShouldTrackOrigins = DFS.shouldTrackOrigins() && Origin; 2599 2600 if (AllocaInst *AI = dyn_cast<AllocaInst>(Addr)) { 2601 const auto SI = AllocaShadowMap.find(AI); 2602 if (SI != AllocaShadowMap.end()) { 2603 IRBuilder<> IRB(Pos); 2604 IRB.CreateStore(PrimitiveShadow, SI->second); 2605 2606 // Do not write origins for 0 shadows because we do not trace origins for 2607 // untainted sinks. 2608 if (ShouldTrackOrigins && !DFS.isZeroShadow(PrimitiveShadow)) { 2609 const auto OI = AllocaOriginMap.find(AI); 2610 assert(OI != AllocaOriginMap.end() && Origin); 2611 IRB.CreateStore(Origin, OI->second); 2612 } 2613 return; 2614 } 2615 } 2616 2617 const Align ShadowAlign = getShadowAlign(InstAlignment); 2618 if (DFS.isZeroShadow(PrimitiveShadow)) { 2619 storeZeroPrimitiveShadow(Addr, Size, ShadowAlign, Pos); 2620 return; 2621 } 2622 2623 IRBuilder<> IRB(Pos); 2624 Value *ShadowAddr, *OriginAddr; 2625 std::tie(ShadowAddr, OriginAddr) = 2626 DFS.getShadowOriginAddress(Addr, InstAlignment, Pos); 2627 2628 const unsigned ShadowVecSize = 8; 2629 assert(ShadowVecSize * DFS.ShadowWidthBits <= 128 && 2630 "Shadow vector is too large!"); 2631 2632 uint64_t Offset = 0; 2633 uint64_t LeftSize = Size; 2634 if (LeftSize >= ShadowVecSize) { 2635 auto *ShadowVecTy = 2636 FixedVectorType::get(DFS.PrimitiveShadowTy, ShadowVecSize); 2637 Value *ShadowVec = UndefValue::get(ShadowVecTy); 2638 for (unsigned I = 0; I != ShadowVecSize; ++I) { 2639 ShadowVec = IRB.CreateInsertElement( 2640 ShadowVec, PrimitiveShadow, 2641 ConstantInt::get(Type::getInt32Ty(*DFS.Ctx), I)); 2642 } 2643 Value *ShadowVecAddr = 2644 IRB.CreateBitCast(ShadowAddr, PointerType::getUnqual(ShadowVecTy)); 2645 do { 2646 Value *CurShadowVecAddr = 2647 IRB.CreateConstGEP1_32(ShadowVecTy, ShadowVecAddr, Offset); 2648 IRB.CreateAlignedStore(ShadowVec, CurShadowVecAddr, ShadowAlign); 2649 LeftSize -= ShadowVecSize; 2650 ++Offset; 2651 } while (LeftSize >= ShadowVecSize); 2652 Offset *= ShadowVecSize; 2653 } 2654 while (LeftSize > 0) { 2655 Value *CurShadowAddr = 2656 IRB.CreateConstGEP1_32(DFS.PrimitiveShadowTy, ShadowAddr, Offset); 2657 IRB.CreateAlignedStore(PrimitiveShadow, CurShadowAddr, ShadowAlign); 2658 --LeftSize; 2659 ++Offset; 2660 } 2661 2662 if (ShouldTrackOrigins) { 2663 storeOrigin(Pos, Addr, Size, PrimitiveShadow, Origin, OriginAddr, 2664 InstAlignment); 2665 } 2666 } 2667 2668 static AtomicOrdering addReleaseOrdering(AtomicOrdering AO) { 2669 switch (AO) { 2670 case AtomicOrdering::NotAtomic: 2671 return AtomicOrdering::NotAtomic; 2672 case AtomicOrdering::Unordered: 2673 case AtomicOrdering::Monotonic: 2674 case AtomicOrdering::Release: 2675 return AtomicOrdering::Release; 2676 case AtomicOrdering::Acquire: 2677 case AtomicOrdering::AcquireRelease: 2678 return AtomicOrdering::AcquireRelease; 2679 case AtomicOrdering::SequentiallyConsistent: 2680 return AtomicOrdering::SequentiallyConsistent; 2681 } 2682 llvm_unreachable("Unknown ordering"); 2683 } 2684 2685 void DFSanVisitor::visitStoreInst(StoreInst &SI) { 2686 auto &DL = SI.getModule()->getDataLayout(); 2687 Value *Val = SI.getValueOperand(); 2688 uint64_t Size = DL.getTypeStoreSize(Val->getType()); 2689 if (Size == 0) 2690 return; 2691 2692 // When an application store is atomic, increase atomic ordering between 2693 // atomic application loads and stores to ensure happen-before order; load 2694 // shadow data after application data; store zero shadow data before 2695 // application data. This ensure shadow loads return either labels of the 2696 // initial application data or zeros. 2697 if (SI.isAtomic()) 2698 SI.setOrdering(addReleaseOrdering(SI.getOrdering())); 2699 2700 const bool ShouldTrackOrigins = 2701 DFSF.DFS.shouldTrackOrigins() && !SI.isAtomic(); 2702 std::vector<Value *> Shadows; 2703 std::vector<Value *> Origins; 2704 2705 Value *Shadow = 2706 SI.isAtomic() ? DFSF.DFS.getZeroShadow(Val) : DFSF.getShadow(Val); 2707 2708 if (ShouldTrackOrigins) { 2709 Shadows.push_back(Shadow); 2710 Origins.push_back(DFSF.getOrigin(Val)); 2711 } 2712 2713 Value *PrimitiveShadow; 2714 if (ClCombinePointerLabelsOnStore) { 2715 Value *PtrShadow = DFSF.getShadow(SI.getPointerOperand()); 2716 if (ShouldTrackOrigins) { 2717 Shadows.push_back(PtrShadow); 2718 Origins.push_back(DFSF.getOrigin(SI.getPointerOperand())); 2719 } 2720 PrimitiveShadow = DFSF.combineShadows(Shadow, PtrShadow, &SI); 2721 } else { 2722 PrimitiveShadow = DFSF.collapseToPrimitiveShadow(Shadow, &SI); 2723 } 2724 Value *Origin = nullptr; 2725 if (ShouldTrackOrigins) 2726 Origin = DFSF.combineOrigins(Shadows, Origins, &SI); 2727 DFSF.storePrimitiveShadowOrigin(SI.getPointerOperand(), Size, SI.getAlign(), 2728 PrimitiveShadow, Origin, &SI); 2729 if (ClEventCallbacks) { 2730 IRBuilder<> IRB(&SI); 2731 Value *Addr8 = IRB.CreateBitCast(SI.getPointerOperand(), DFSF.DFS.Int8Ptr); 2732 IRB.CreateCall(DFSF.DFS.DFSanStoreCallbackFn, {PrimitiveShadow, Addr8}); 2733 } 2734 } 2735 2736 void DFSanVisitor::visitCASOrRMW(Align InstAlignment, Instruction &I) { 2737 assert(isa<AtomicRMWInst>(I) || isa<AtomicCmpXchgInst>(I)); 2738 2739 Value *Val = I.getOperand(1); 2740 const auto &DL = I.getModule()->getDataLayout(); 2741 uint64_t Size = DL.getTypeStoreSize(Val->getType()); 2742 if (Size == 0) 2743 return; 2744 2745 // Conservatively set data at stored addresses and return with zero shadow to 2746 // prevent shadow data races. 2747 IRBuilder<> IRB(&I); 2748 Value *Addr = I.getOperand(0); 2749 const Align ShadowAlign = DFSF.getShadowAlign(InstAlignment); 2750 DFSF.storeZeroPrimitiveShadow(Addr, Size, ShadowAlign, &I); 2751 DFSF.setShadow(&I, DFSF.DFS.getZeroShadow(&I)); 2752 DFSF.setOrigin(&I, DFSF.DFS.ZeroOrigin); 2753 } 2754 2755 void DFSanVisitor::visitAtomicRMWInst(AtomicRMWInst &I) { 2756 visitCASOrRMW(I.getAlign(), I); 2757 // TODO: The ordering change follows MSan. It is possible not to change 2758 // ordering because we always set and use 0 shadows. 2759 I.setOrdering(addReleaseOrdering(I.getOrdering())); 2760 } 2761 2762 void DFSanVisitor::visitAtomicCmpXchgInst(AtomicCmpXchgInst &I) { 2763 visitCASOrRMW(I.getAlign(), I); 2764 // TODO: The ordering change follows MSan. It is possible not to change 2765 // ordering because we always set and use 0 shadows. 2766 I.setSuccessOrdering(addReleaseOrdering(I.getSuccessOrdering())); 2767 } 2768 2769 void DFSanVisitor::visitUnaryOperator(UnaryOperator &UO) { 2770 visitInstOperands(UO); 2771 } 2772 2773 void DFSanVisitor::visitBinaryOperator(BinaryOperator &BO) { 2774 visitInstOperands(BO); 2775 } 2776 2777 void DFSanVisitor::visitCastInst(CastInst &CI) { visitInstOperands(CI); } 2778 2779 void DFSanVisitor::visitCmpInst(CmpInst &CI) { 2780 visitInstOperands(CI); 2781 if (ClEventCallbacks) { 2782 IRBuilder<> IRB(&CI); 2783 Value *CombinedShadow = DFSF.getShadow(&CI); 2784 IRB.CreateCall(DFSF.DFS.DFSanCmpCallbackFn, CombinedShadow); 2785 } 2786 } 2787 2788 void DFSanVisitor::visitGetElementPtrInst(GetElementPtrInst &GEPI) { 2789 if (ClCombineOffsetLabelsOnGEP) { 2790 visitInstOperands(GEPI); 2791 return; 2792 } 2793 2794 // Only propagate shadow/origin of base pointer value but ignore those of 2795 // offset operands. 2796 Value *BasePointer = GEPI.getPointerOperand(); 2797 DFSF.setShadow(&GEPI, DFSF.getShadow(BasePointer)); 2798 if (DFSF.DFS.shouldTrackOrigins()) 2799 DFSF.setOrigin(&GEPI, DFSF.getOrigin(BasePointer)); 2800 } 2801 2802 void DFSanVisitor::visitExtractElementInst(ExtractElementInst &I) { 2803 visitInstOperands(I); 2804 } 2805 2806 void DFSanVisitor::visitInsertElementInst(InsertElementInst &I) { 2807 visitInstOperands(I); 2808 } 2809 2810 void DFSanVisitor::visitShuffleVectorInst(ShuffleVectorInst &I) { 2811 visitInstOperands(I); 2812 } 2813 2814 void DFSanVisitor::visitExtractValueInst(ExtractValueInst &I) { 2815 if (!DFSF.DFS.shouldTrackFieldsAndIndices()) { 2816 visitInstOperands(I); 2817 return; 2818 } 2819 2820 IRBuilder<> IRB(&I); 2821 Value *Agg = I.getAggregateOperand(); 2822 Value *AggShadow = DFSF.getShadow(Agg); 2823 Value *ResShadow = IRB.CreateExtractValue(AggShadow, I.getIndices()); 2824 DFSF.setShadow(&I, ResShadow); 2825 visitInstOperandOrigins(I); 2826 } 2827 2828 void DFSanVisitor::visitInsertValueInst(InsertValueInst &I) { 2829 if (!DFSF.DFS.shouldTrackFieldsAndIndices()) { 2830 visitInstOperands(I); 2831 return; 2832 } 2833 2834 IRBuilder<> IRB(&I); 2835 Value *AggShadow = DFSF.getShadow(I.getAggregateOperand()); 2836 Value *InsShadow = DFSF.getShadow(I.getInsertedValueOperand()); 2837 Value *Res = IRB.CreateInsertValue(AggShadow, InsShadow, I.getIndices()); 2838 DFSF.setShadow(&I, Res); 2839 visitInstOperandOrigins(I); 2840 } 2841 2842 void DFSanVisitor::visitAllocaInst(AllocaInst &I) { 2843 bool AllLoadsStores = true; 2844 for (User *U : I.users()) { 2845 if (isa<LoadInst>(U)) 2846 continue; 2847 2848 if (StoreInst *SI = dyn_cast<StoreInst>(U)) { 2849 if (SI->getPointerOperand() == &I) 2850 continue; 2851 } 2852 2853 AllLoadsStores = false; 2854 break; 2855 } 2856 if (AllLoadsStores) { 2857 IRBuilder<> IRB(&I); 2858 DFSF.AllocaShadowMap[&I] = IRB.CreateAlloca(DFSF.DFS.PrimitiveShadowTy); 2859 if (DFSF.DFS.shouldTrackOrigins()) { 2860 DFSF.AllocaOriginMap[&I] = 2861 IRB.CreateAlloca(DFSF.DFS.OriginTy, nullptr, "_dfsa"); 2862 } 2863 } 2864 DFSF.setShadow(&I, DFSF.DFS.ZeroPrimitiveShadow); 2865 DFSF.setOrigin(&I, DFSF.DFS.ZeroOrigin); 2866 } 2867 2868 void DFSanVisitor::visitSelectInst(SelectInst &I) { 2869 Value *CondShadow = DFSF.getShadow(I.getCondition()); 2870 Value *TrueShadow = DFSF.getShadow(I.getTrueValue()); 2871 Value *FalseShadow = DFSF.getShadow(I.getFalseValue()); 2872 Value *ShadowSel = nullptr; 2873 const bool ShouldTrackOrigins = DFSF.DFS.shouldTrackOrigins(); 2874 std::vector<Value *> Shadows; 2875 std::vector<Value *> Origins; 2876 Value *TrueOrigin = 2877 ShouldTrackOrigins ? DFSF.getOrigin(I.getTrueValue()) : nullptr; 2878 Value *FalseOrigin = 2879 ShouldTrackOrigins ? DFSF.getOrigin(I.getFalseValue()) : nullptr; 2880 2881 if (isa<VectorType>(I.getCondition()->getType())) { 2882 ShadowSel = DFSF.combineShadowsThenConvert(I.getType(), TrueShadow, 2883 FalseShadow, &I); 2884 if (ShouldTrackOrigins) { 2885 Shadows.push_back(TrueShadow); 2886 Shadows.push_back(FalseShadow); 2887 Origins.push_back(TrueOrigin); 2888 Origins.push_back(FalseOrigin); 2889 } 2890 } else { 2891 if (TrueShadow == FalseShadow) { 2892 ShadowSel = TrueShadow; 2893 if (ShouldTrackOrigins) { 2894 Shadows.push_back(TrueShadow); 2895 Origins.push_back(TrueOrigin); 2896 } 2897 } else { 2898 ShadowSel = 2899 SelectInst::Create(I.getCondition(), TrueShadow, FalseShadow, "", &I); 2900 if (ShouldTrackOrigins) { 2901 Shadows.push_back(ShadowSel); 2902 Origins.push_back(SelectInst::Create(I.getCondition(), TrueOrigin, 2903 FalseOrigin, "", &I)); 2904 } 2905 } 2906 } 2907 DFSF.setShadow(&I, ClTrackSelectControlFlow 2908 ? DFSF.combineShadowsThenConvert( 2909 I.getType(), CondShadow, ShadowSel, &I) 2910 : ShadowSel); 2911 if (ShouldTrackOrigins) { 2912 if (ClTrackSelectControlFlow) { 2913 Shadows.push_back(CondShadow); 2914 Origins.push_back(DFSF.getOrigin(I.getCondition())); 2915 } 2916 DFSF.setOrigin(&I, DFSF.combineOrigins(Shadows, Origins, &I)); 2917 } 2918 } 2919 2920 void DFSanVisitor::visitMemSetInst(MemSetInst &I) { 2921 IRBuilder<> IRB(&I); 2922 Value *ValShadow = DFSF.getShadow(I.getValue()); 2923 Value *ValOrigin = DFSF.DFS.shouldTrackOrigins() 2924 ? DFSF.getOrigin(I.getValue()) 2925 : DFSF.DFS.ZeroOrigin; 2926 IRB.CreateCall( 2927 DFSF.DFS.DFSanSetLabelFn, 2928 {ValShadow, ValOrigin, 2929 IRB.CreateBitCast(I.getDest(), Type::getInt8PtrTy(*DFSF.DFS.Ctx)), 2930 IRB.CreateZExtOrTrunc(I.getLength(), DFSF.DFS.IntptrTy)}); 2931 } 2932 2933 void DFSanVisitor::visitMemTransferInst(MemTransferInst &I) { 2934 IRBuilder<> IRB(&I); 2935 2936 // CopyOrMoveOrigin transfers origins by refering to their shadows. So we 2937 // need to move origins before moving shadows. 2938 if (DFSF.DFS.shouldTrackOrigins()) { 2939 IRB.CreateCall( 2940 DFSF.DFS.DFSanMemOriginTransferFn, 2941 {IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()), 2942 IRB.CreatePointerCast(I.getArgOperand(1), IRB.getInt8PtrTy()), 2943 IRB.CreateIntCast(I.getArgOperand(2), DFSF.DFS.IntptrTy, false)}); 2944 } 2945 2946 Value *RawDestShadow = DFSF.DFS.getShadowAddress(I.getDest(), &I); 2947 Value *SrcShadow = DFSF.DFS.getShadowAddress(I.getSource(), &I); 2948 Value *LenShadow = 2949 IRB.CreateMul(I.getLength(), ConstantInt::get(I.getLength()->getType(), 2950 DFSF.DFS.ShadowWidthBytes)); 2951 Type *Int8Ptr = Type::getInt8PtrTy(*DFSF.DFS.Ctx); 2952 Value *DestShadow = IRB.CreateBitCast(RawDestShadow, Int8Ptr); 2953 SrcShadow = IRB.CreateBitCast(SrcShadow, Int8Ptr); 2954 auto *MTI = cast<MemTransferInst>( 2955 IRB.CreateCall(I.getFunctionType(), I.getCalledOperand(), 2956 {DestShadow, SrcShadow, LenShadow, I.getVolatileCst()})); 2957 if (ClPreserveAlignment) { 2958 MTI->setDestAlignment(I.getDestAlign() * DFSF.DFS.ShadowWidthBytes); 2959 MTI->setSourceAlignment(I.getSourceAlign() * DFSF.DFS.ShadowWidthBytes); 2960 } else { 2961 MTI->setDestAlignment(Align(DFSF.DFS.ShadowWidthBytes)); 2962 MTI->setSourceAlignment(Align(DFSF.DFS.ShadowWidthBytes)); 2963 } 2964 if (ClEventCallbacks) { 2965 IRB.CreateCall(DFSF.DFS.DFSanMemTransferCallbackFn, 2966 {RawDestShadow, 2967 IRB.CreateZExtOrTrunc(I.getLength(), DFSF.DFS.IntptrTy)}); 2968 } 2969 } 2970 2971 void DFSanVisitor::visitReturnInst(ReturnInst &RI) { 2972 if (!DFSF.IsNativeABI && RI.getReturnValue()) { 2973 switch (DFSF.IA) { 2974 case DataFlowSanitizer::IA_TLS: { 2975 Value *S = DFSF.getShadow(RI.getReturnValue()); 2976 IRBuilder<> IRB(&RI); 2977 Type *RT = DFSF.F->getFunctionType()->getReturnType(); 2978 unsigned Size = 2979 getDataLayout().getTypeAllocSize(DFSF.DFS.getShadowTy(RT)); 2980 if (Size <= RetvalTLSSize) { 2981 // If the size overflows, stores nothing. At callsite, oversized return 2982 // shadows are set to zero. 2983 IRB.CreateAlignedStore(S, DFSF.getRetvalTLS(RT, IRB), 2984 ShadowTLSAlignment); 2985 } 2986 if (DFSF.DFS.shouldTrackOrigins()) { 2987 Value *O = DFSF.getOrigin(RI.getReturnValue()); 2988 IRB.CreateStore(O, DFSF.getRetvalOriginTLS()); 2989 } 2990 break; 2991 } 2992 case DataFlowSanitizer::IA_Args: { 2993 IRBuilder<> IRB(&RI); 2994 Type *RT = DFSF.F->getFunctionType()->getReturnType(); 2995 Value *InsVal = 2996 IRB.CreateInsertValue(UndefValue::get(RT), RI.getReturnValue(), 0); 2997 Value *InsShadow = 2998 IRB.CreateInsertValue(InsVal, DFSF.getShadow(RI.getReturnValue()), 1); 2999 RI.setOperand(0, InsShadow); 3000 break; 3001 } 3002 } 3003 } 3004 } 3005 3006 void DFSanVisitor::addShadowArguments(Function &F, CallBase &CB, 3007 std::vector<Value *> &Args, 3008 IRBuilder<> &IRB) { 3009 FunctionType *FT = F.getFunctionType(); 3010 3011 auto *I = CB.arg_begin(); 3012 3013 // Adds non-variable argument shadows. 3014 for (unsigned N = FT->getNumParams(); N != 0; ++I, --N) 3015 Args.push_back(DFSF.collapseToPrimitiveShadow(DFSF.getShadow(*I), &CB)); 3016 3017 // Adds variable argument shadows. 3018 if (FT->isVarArg()) { 3019 auto *LabelVATy = ArrayType::get(DFSF.DFS.PrimitiveShadowTy, 3020 CB.arg_size() - FT->getNumParams()); 3021 auto *LabelVAAlloca = 3022 new AllocaInst(LabelVATy, getDataLayout().getAllocaAddrSpace(), 3023 "labelva", &DFSF.F->getEntryBlock().front()); 3024 3025 for (unsigned N = 0; I != CB.arg_end(); ++I, ++N) { 3026 auto *LabelVAPtr = IRB.CreateStructGEP(LabelVATy, LabelVAAlloca, N); 3027 IRB.CreateStore(DFSF.collapseToPrimitiveShadow(DFSF.getShadow(*I), &CB), 3028 LabelVAPtr); 3029 } 3030 3031 Args.push_back(IRB.CreateStructGEP(LabelVATy, LabelVAAlloca, 0)); 3032 } 3033 3034 // Adds the return value shadow. 3035 if (!FT->getReturnType()->isVoidTy()) { 3036 if (!DFSF.LabelReturnAlloca) { 3037 DFSF.LabelReturnAlloca = new AllocaInst( 3038 DFSF.DFS.PrimitiveShadowTy, getDataLayout().getAllocaAddrSpace(), 3039 "labelreturn", &DFSF.F->getEntryBlock().front()); 3040 } 3041 Args.push_back(DFSF.LabelReturnAlloca); 3042 } 3043 } 3044 3045 void DFSanVisitor::addOriginArguments(Function &F, CallBase &CB, 3046 std::vector<Value *> &Args, 3047 IRBuilder<> &IRB) { 3048 FunctionType *FT = F.getFunctionType(); 3049 3050 auto *I = CB.arg_begin(); 3051 3052 // Add non-variable argument origins. 3053 for (unsigned N = FT->getNumParams(); N != 0; ++I, --N) 3054 Args.push_back(DFSF.getOrigin(*I)); 3055 3056 // Add variable argument origins. 3057 if (FT->isVarArg()) { 3058 auto *OriginVATy = 3059 ArrayType::get(DFSF.DFS.OriginTy, CB.arg_size() - FT->getNumParams()); 3060 auto *OriginVAAlloca = 3061 new AllocaInst(OriginVATy, getDataLayout().getAllocaAddrSpace(), 3062 "originva", &DFSF.F->getEntryBlock().front()); 3063 3064 for (unsigned N = 0; I != CB.arg_end(); ++I, ++N) { 3065 auto *OriginVAPtr = IRB.CreateStructGEP(OriginVATy, OriginVAAlloca, N); 3066 IRB.CreateStore(DFSF.getOrigin(*I), OriginVAPtr); 3067 } 3068 3069 Args.push_back(IRB.CreateStructGEP(OriginVATy, OriginVAAlloca, 0)); 3070 } 3071 3072 // Add the return value origin. 3073 if (!FT->getReturnType()->isVoidTy()) { 3074 if (!DFSF.OriginReturnAlloca) { 3075 DFSF.OriginReturnAlloca = new AllocaInst( 3076 DFSF.DFS.OriginTy, getDataLayout().getAllocaAddrSpace(), 3077 "originreturn", &DFSF.F->getEntryBlock().front()); 3078 } 3079 Args.push_back(DFSF.OriginReturnAlloca); 3080 } 3081 } 3082 3083 bool DFSanVisitor::visitWrappedCallBase(Function &F, CallBase &CB) { 3084 IRBuilder<> IRB(&CB); 3085 switch (DFSF.DFS.getWrapperKind(&F)) { 3086 case DataFlowSanitizer::WK_Warning: 3087 CB.setCalledFunction(&F); 3088 IRB.CreateCall(DFSF.DFS.DFSanUnimplementedFn, 3089 IRB.CreateGlobalStringPtr(F.getName())); 3090 DFSF.setShadow(&CB, DFSF.DFS.getZeroShadow(&CB)); 3091 DFSF.setOrigin(&CB, DFSF.DFS.ZeroOrigin); 3092 return true; 3093 case DataFlowSanitizer::WK_Discard: 3094 CB.setCalledFunction(&F); 3095 DFSF.setShadow(&CB, DFSF.DFS.getZeroShadow(&CB)); 3096 DFSF.setOrigin(&CB, DFSF.DFS.ZeroOrigin); 3097 return true; 3098 case DataFlowSanitizer::WK_Functional: 3099 CB.setCalledFunction(&F); 3100 visitInstOperands(CB); 3101 return true; 3102 case DataFlowSanitizer::WK_Custom: 3103 // Don't try to handle invokes of custom functions, it's too complicated. 3104 // Instead, invoke the dfsw$ wrapper, which will in turn call the __dfsw_ 3105 // wrapper. 3106 CallInst *CI = dyn_cast<CallInst>(&CB); 3107 if (!CI) 3108 return false; 3109 3110 const bool ShouldTrackOrigins = DFSF.DFS.shouldTrackOrigins(); 3111 FunctionType *FT = F.getFunctionType(); 3112 TransformedFunction CustomFn = DFSF.DFS.getCustomFunctionType(FT); 3113 std::string CustomFName = ShouldTrackOrigins ? "__dfso_" : "__dfsw_"; 3114 CustomFName += F.getName(); 3115 FunctionCallee CustomF = DFSF.DFS.Mod->getOrInsertFunction( 3116 CustomFName, CustomFn.TransformedType); 3117 if (Function *CustomFn = dyn_cast<Function>(CustomF.getCallee())) { 3118 CustomFn->copyAttributesFrom(&F); 3119 3120 // Custom functions returning non-void will write to the return label. 3121 if (!FT->getReturnType()->isVoidTy()) { 3122 CustomFn->removeAttributes(AttributeList::FunctionIndex, 3123 DFSF.DFS.ReadOnlyNoneAttrs); 3124 } 3125 } 3126 3127 std::vector<Value *> Args; 3128 3129 // Adds non-variable arguments. 3130 auto *I = CB.arg_begin(); 3131 for (unsigned N = FT->getNumParams(); N != 0; ++I, --N) { 3132 Type *T = (*I)->getType(); 3133 FunctionType *ParamFT; 3134 if (isa<PointerType>(T) && 3135 (ParamFT = dyn_cast<FunctionType>(T->getPointerElementType()))) { 3136 std::string TName = "dfst"; 3137 TName += utostr(FT->getNumParams() - N); 3138 TName += "$"; 3139 TName += F.getName(); 3140 Constant *T = DFSF.DFS.getOrBuildTrampolineFunction(ParamFT, TName); 3141 Args.push_back(T); 3142 Args.push_back( 3143 IRB.CreateBitCast(*I, Type::getInt8PtrTy(*DFSF.DFS.Ctx))); 3144 } else { 3145 Args.push_back(*I); 3146 } 3147 } 3148 3149 // Adds shadow arguments. 3150 const unsigned ShadowArgStart = Args.size(); 3151 addShadowArguments(F, CB, Args, IRB); 3152 3153 // Adds origin arguments. 3154 const unsigned OriginArgStart = Args.size(); 3155 if (ShouldTrackOrigins) 3156 addOriginArguments(F, CB, Args, IRB); 3157 3158 // Adds variable arguments. 3159 append_range(Args, drop_begin(CB.args(), FT->getNumParams())); 3160 3161 CallInst *CustomCI = IRB.CreateCall(CustomF, Args); 3162 CustomCI->setCallingConv(CI->getCallingConv()); 3163 CustomCI->setAttributes(transformFunctionAttributes( 3164 CustomFn, CI->getContext(), CI->getAttributes())); 3165 3166 // Update the parameter attributes of the custom call instruction to 3167 // zero extend the shadow parameters. This is required for targets 3168 // which consider PrimitiveShadowTy an illegal type. 3169 for (unsigned N = 0; N < FT->getNumParams(); N++) { 3170 const unsigned ArgNo = ShadowArgStart + N; 3171 if (CustomCI->getArgOperand(ArgNo)->getType() == 3172 DFSF.DFS.PrimitiveShadowTy) 3173 CustomCI->addParamAttr(ArgNo, Attribute::ZExt); 3174 if (ShouldTrackOrigins) { 3175 const unsigned OriginArgNo = OriginArgStart + N; 3176 if (CustomCI->getArgOperand(OriginArgNo)->getType() == 3177 DFSF.DFS.OriginTy) 3178 CustomCI->addParamAttr(OriginArgNo, Attribute::ZExt); 3179 } 3180 } 3181 3182 // Loads the return value shadow and origin. 3183 if (!FT->getReturnType()->isVoidTy()) { 3184 LoadInst *LabelLoad = 3185 IRB.CreateLoad(DFSF.DFS.PrimitiveShadowTy, DFSF.LabelReturnAlloca); 3186 DFSF.setShadow(CustomCI, DFSF.expandFromPrimitiveShadow( 3187 FT->getReturnType(), LabelLoad, &CB)); 3188 if (ShouldTrackOrigins) { 3189 LoadInst *OriginLoad = 3190 IRB.CreateLoad(DFSF.DFS.OriginTy, DFSF.OriginReturnAlloca); 3191 DFSF.setOrigin(CustomCI, OriginLoad); 3192 } 3193 } 3194 3195 CI->replaceAllUsesWith(CustomCI); 3196 CI->eraseFromParent(); 3197 return true; 3198 } 3199 return false; 3200 } 3201 3202 void DFSanVisitor::visitCallBase(CallBase &CB) { 3203 Function *F = CB.getCalledFunction(); 3204 if ((F && F->isIntrinsic()) || CB.isInlineAsm()) { 3205 visitInstOperands(CB); 3206 return; 3207 } 3208 3209 // Calls to this function are synthesized in wrappers, and we shouldn't 3210 // instrument them. 3211 if (F == DFSF.DFS.DFSanVarargWrapperFn.getCallee()->stripPointerCasts()) 3212 return; 3213 3214 DenseMap<Value *, Function *>::iterator UnwrappedFnIt = 3215 DFSF.DFS.UnwrappedFnMap.find(CB.getCalledOperand()); 3216 if (UnwrappedFnIt != DFSF.DFS.UnwrappedFnMap.end()) 3217 if (visitWrappedCallBase(*UnwrappedFnIt->second, CB)) 3218 return; 3219 3220 IRBuilder<> IRB(&CB); 3221 3222 const bool ShouldTrackOrigins = DFSF.DFS.shouldTrackOrigins(); 3223 FunctionType *FT = CB.getFunctionType(); 3224 if (DFSF.DFS.getInstrumentedABI() == DataFlowSanitizer::IA_TLS) { 3225 // Stores argument shadows. 3226 unsigned ArgOffset = 0; 3227 const DataLayout &DL = getDataLayout(); 3228 for (unsigned I = 0, N = FT->getNumParams(); I != N; ++I) { 3229 if (ShouldTrackOrigins) { 3230 // Ignore overflowed origins 3231 Value *ArgShadow = DFSF.getShadow(CB.getArgOperand(I)); 3232 if (I < DFSF.DFS.NumOfElementsInArgOrgTLS && 3233 !DFSF.DFS.isZeroShadow(ArgShadow)) 3234 IRB.CreateStore(DFSF.getOrigin(CB.getArgOperand(I)), 3235 DFSF.getArgOriginTLS(I, IRB)); 3236 } 3237 3238 unsigned Size = 3239 DL.getTypeAllocSize(DFSF.DFS.getShadowTy(FT->getParamType(I))); 3240 // Stop storing if arguments' size overflows. Inside a function, arguments 3241 // after overflow have zero shadow values. 3242 if (ArgOffset + Size > ArgTLSSize) 3243 break; 3244 IRB.CreateAlignedStore( 3245 DFSF.getShadow(CB.getArgOperand(I)), 3246 DFSF.getArgTLS(FT->getParamType(I), ArgOffset, IRB), 3247 ShadowTLSAlignment); 3248 ArgOffset += alignTo(Size, ShadowTLSAlignment); 3249 } 3250 } 3251 3252 Instruction *Next = nullptr; 3253 if (!CB.getType()->isVoidTy()) { 3254 if (InvokeInst *II = dyn_cast<InvokeInst>(&CB)) { 3255 if (II->getNormalDest()->getSinglePredecessor()) { 3256 Next = &II->getNormalDest()->front(); 3257 } else { 3258 BasicBlock *NewBB = 3259 SplitEdge(II->getParent(), II->getNormalDest(), &DFSF.DT); 3260 Next = &NewBB->front(); 3261 } 3262 } else { 3263 assert(CB.getIterator() != CB.getParent()->end()); 3264 Next = CB.getNextNode(); 3265 } 3266 3267 if (DFSF.DFS.getInstrumentedABI() == DataFlowSanitizer::IA_TLS) { 3268 // Loads the return value shadow. 3269 IRBuilder<> NextIRB(Next); 3270 const DataLayout &DL = getDataLayout(); 3271 unsigned Size = DL.getTypeAllocSize(DFSF.DFS.getShadowTy(&CB)); 3272 if (Size > RetvalTLSSize) { 3273 // Set overflowed return shadow to be zero. 3274 DFSF.setShadow(&CB, DFSF.DFS.getZeroShadow(&CB)); 3275 } else { 3276 LoadInst *LI = NextIRB.CreateAlignedLoad( 3277 DFSF.DFS.getShadowTy(&CB), DFSF.getRetvalTLS(CB.getType(), NextIRB), 3278 ShadowTLSAlignment, "_dfsret"); 3279 DFSF.SkipInsts.insert(LI); 3280 DFSF.setShadow(&CB, LI); 3281 DFSF.NonZeroChecks.push_back(LI); 3282 } 3283 3284 if (ShouldTrackOrigins) { 3285 LoadInst *LI = NextIRB.CreateLoad( 3286 DFSF.DFS.OriginTy, DFSF.getRetvalOriginTLS(), "_dfsret_o"); 3287 DFSF.SkipInsts.insert(LI); 3288 DFSF.setOrigin(&CB, LI); 3289 } 3290 } 3291 } 3292 3293 // Do all instrumentation for IA_Args down here to defer tampering with the 3294 // CFG in a way that SplitEdge may be able to detect. 3295 if (DFSF.DFS.getInstrumentedABI() == DataFlowSanitizer::IA_Args) { 3296 FunctionType *NewFT = DFSF.DFS.getArgsFunctionType(FT); 3297 Value *Func = 3298 IRB.CreateBitCast(CB.getCalledOperand(), PointerType::getUnqual(NewFT)); 3299 3300 const unsigned NumParams = FT->getNumParams(); 3301 3302 // Copy original arguments. 3303 auto *ArgIt = CB.arg_begin(), *ArgEnd = CB.arg_end(); 3304 std::vector<Value *> Args(NumParams); 3305 std::copy_n(ArgIt, NumParams, Args.begin()); 3306 3307 // Add shadow arguments by transforming original arguments. 3308 std::generate_n(std::back_inserter(Args), NumParams, 3309 [&]() { return DFSF.getShadow(*ArgIt++); }); 3310 3311 if (FT->isVarArg()) { 3312 unsigned VarArgSize = CB.arg_size() - NumParams; 3313 ArrayType *VarArgArrayTy = 3314 ArrayType::get(DFSF.DFS.PrimitiveShadowTy, VarArgSize); 3315 AllocaInst *VarArgShadow = 3316 new AllocaInst(VarArgArrayTy, getDataLayout().getAllocaAddrSpace(), 3317 "", &DFSF.F->getEntryBlock().front()); 3318 Args.push_back(IRB.CreateConstGEP2_32(VarArgArrayTy, VarArgShadow, 0, 0)); 3319 3320 // Copy remaining var args. 3321 unsigned GepIndex = 0; 3322 std::for_each(ArgIt, ArgEnd, [&](Value *Arg) { 3323 IRB.CreateStore( 3324 DFSF.getShadow(Arg), 3325 IRB.CreateConstGEP2_32(VarArgArrayTy, VarArgShadow, 0, GepIndex++)); 3326 Args.push_back(Arg); 3327 }); 3328 } 3329 3330 CallBase *NewCB; 3331 if (InvokeInst *II = dyn_cast<InvokeInst>(&CB)) { 3332 NewCB = IRB.CreateInvoke(NewFT, Func, II->getNormalDest(), 3333 II->getUnwindDest(), Args); 3334 } else { 3335 NewCB = IRB.CreateCall(NewFT, Func, Args); 3336 } 3337 NewCB->setCallingConv(CB.getCallingConv()); 3338 NewCB->setAttributes(CB.getAttributes().removeAttributes( 3339 *DFSF.DFS.Ctx, AttributeList::ReturnIndex, 3340 AttributeFuncs::typeIncompatible(NewCB->getType()))); 3341 3342 if (Next) { 3343 ExtractValueInst *ExVal = ExtractValueInst::Create(NewCB, 0, "", Next); 3344 DFSF.SkipInsts.insert(ExVal); 3345 ExtractValueInst *ExShadow = ExtractValueInst::Create(NewCB, 1, "", Next); 3346 DFSF.SkipInsts.insert(ExShadow); 3347 DFSF.setShadow(ExVal, ExShadow); 3348 DFSF.NonZeroChecks.push_back(ExShadow); 3349 3350 CB.replaceAllUsesWith(ExVal); 3351 } 3352 3353 CB.eraseFromParent(); 3354 } 3355 } 3356 3357 void DFSanVisitor::visitPHINode(PHINode &PN) { 3358 Type *ShadowTy = DFSF.DFS.getShadowTy(&PN); 3359 PHINode *ShadowPN = 3360 PHINode::Create(ShadowTy, PN.getNumIncomingValues(), "", &PN); 3361 3362 // Give the shadow phi node valid predecessors to fool SplitEdge into working. 3363 Value *UndefShadow = UndefValue::get(ShadowTy); 3364 for (BasicBlock *BB : PN.blocks()) 3365 ShadowPN->addIncoming(UndefShadow, BB); 3366 3367 DFSF.setShadow(&PN, ShadowPN); 3368 3369 PHINode *OriginPN = nullptr; 3370 if (DFSF.DFS.shouldTrackOrigins()) { 3371 OriginPN = 3372 PHINode::Create(DFSF.DFS.OriginTy, PN.getNumIncomingValues(), "", &PN); 3373 Value *UndefOrigin = UndefValue::get(DFSF.DFS.OriginTy); 3374 for (BasicBlock *BB : PN.blocks()) 3375 OriginPN->addIncoming(UndefOrigin, BB); 3376 DFSF.setOrigin(&PN, OriginPN); 3377 } 3378 3379 DFSF.PHIFixups.push_back({&PN, ShadowPN, OriginPN}); 3380 } 3381 3382 namespace { 3383 class DataFlowSanitizerLegacyPass : public ModulePass { 3384 private: 3385 std::vector<std::string> ABIListFiles; 3386 3387 public: 3388 static char ID; 3389 3390 DataFlowSanitizerLegacyPass( 3391 const std::vector<std::string> &ABIListFiles = std::vector<std::string>()) 3392 : ModulePass(ID), ABIListFiles(ABIListFiles) {} 3393 3394 bool runOnModule(Module &M) override { 3395 return DataFlowSanitizer(ABIListFiles).runImpl(M); 3396 } 3397 }; 3398 } // namespace 3399 3400 char DataFlowSanitizerLegacyPass::ID; 3401 3402 INITIALIZE_PASS(DataFlowSanitizerLegacyPass, "dfsan", 3403 "DataFlowSanitizer: dynamic data flow analysis.", false, false) 3404 3405 ModulePass *llvm::createDataFlowSanitizerLegacyPassPass( 3406 const std::vector<std::string> &ABIListFiles) { 3407 return new DataFlowSanitizerLegacyPass(ABIListFiles); 3408 } 3409 3410 PreservedAnalyses DataFlowSanitizerPass::run(Module &M, 3411 ModuleAnalysisManager &AM) { 3412 if (DataFlowSanitizer(ABIListFiles).runImpl(M)) { 3413 return PreservedAnalyses::none(); 3414 } 3415 return PreservedAnalyses::all(); 3416 } 3417