1 //===- DataFlowSanitizer.cpp - dynamic data flow analysis -----------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 /// \file 10 /// This file is a part of DataFlowSanitizer, a generalised dynamic data flow 11 /// analysis. 12 /// 13 /// Unlike other Sanitizer tools, this tool is not designed to detect a specific 14 /// class of bugs on its own. Instead, it provides a generic dynamic data flow 15 /// analysis framework to be used by clients to help detect application-specific 16 /// issues within their own code. 17 /// 18 /// The analysis is based on automatic propagation of data flow labels (also 19 /// known as taint labels) through a program as it performs computation. Each 20 /// byte of application memory is backed by two bytes of shadow memory which 21 /// hold the label. On Linux/x86_64, memory is laid out as follows: 22 /// 23 /// +--------------------+ 0x800000000000 (top of memory) 24 /// | application memory | 25 /// +--------------------+ 0x700000008000 (kAppAddr) 26 /// | | 27 /// | unused | 28 /// | | 29 /// +--------------------+ 0x200200000000 (kUnusedAddr) 30 /// | union table | 31 /// +--------------------+ 0x200000000000 (kUnionTableAddr) 32 /// | shadow memory | 33 /// +--------------------+ 0x000000010000 (kShadowAddr) 34 /// | reserved by kernel | 35 /// +--------------------+ 0x000000000000 36 /// 37 /// To derive a shadow memory address from an application memory address, 38 /// bits 44-46 are cleared to bring the address into the range 39 /// [0x000000008000,0x100000000000). Then the address is shifted left by 1 to 40 /// account for the double byte representation of shadow labels and move the 41 /// address into the shadow memory range. See the function 42 /// DataFlowSanitizer::getShadowAddress below. 43 /// 44 /// For more information, please refer to the design document: 45 /// http://clang.llvm.org/docs/DataFlowSanitizerDesign.html 46 // 47 //===----------------------------------------------------------------------===// 48 49 #include "llvm/Transforms/Instrumentation/DataFlowSanitizer.h" 50 #include "llvm/ADT/DenseMap.h" 51 #include "llvm/ADT/DenseSet.h" 52 #include "llvm/ADT/DepthFirstIterator.h" 53 #include "llvm/ADT/None.h" 54 #include "llvm/ADT/SmallPtrSet.h" 55 #include "llvm/ADT/SmallVector.h" 56 #include "llvm/ADT/StringExtras.h" 57 #include "llvm/ADT/StringRef.h" 58 #include "llvm/ADT/Triple.h" 59 #include "llvm/Analysis/ValueTracking.h" 60 #include "llvm/IR/Argument.h" 61 #include "llvm/IR/Attributes.h" 62 #include "llvm/IR/BasicBlock.h" 63 #include "llvm/IR/Constant.h" 64 #include "llvm/IR/Constants.h" 65 #include "llvm/IR/DataLayout.h" 66 #include "llvm/IR/DerivedTypes.h" 67 #include "llvm/IR/Dominators.h" 68 #include "llvm/IR/Function.h" 69 #include "llvm/IR/GlobalAlias.h" 70 #include "llvm/IR/GlobalValue.h" 71 #include "llvm/IR/GlobalVariable.h" 72 #include "llvm/IR/IRBuilder.h" 73 #include "llvm/IR/InlineAsm.h" 74 #include "llvm/IR/InstVisitor.h" 75 #include "llvm/IR/InstrTypes.h" 76 #include "llvm/IR/Instruction.h" 77 #include "llvm/IR/Instructions.h" 78 #include "llvm/IR/IntrinsicInst.h" 79 #include "llvm/IR/LLVMContext.h" 80 #include "llvm/IR/MDBuilder.h" 81 #include "llvm/IR/Module.h" 82 #include "llvm/IR/PassManager.h" 83 #include "llvm/IR/Type.h" 84 #include "llvm/IR/User.h" 85 #include "llvm/IR/Value.h" 86 #include "llvm/InitializePasses.h" 87 #include "llvm/Pass.h" 88 #include "llvm/Support/Casting.h" 89 #include "llvm/Support/CommandLine.h" 90 #include "llvm/Support/ErrorHandling.h" 91 #include "llvm/Support/SpecialCaseList.h" 92 #include "llvm/Support/VirtualFileSystem.h" 93 #include "llvm/Transforms/Instrumentation.h" 94 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 95 #include "llvm/Transforms/Utils/Local.h" 96 #include <algorithm> 97 #include <cassert> 98 #include <cstddef> 99 #include <cstdint> 100 #include <iterator> 101 #include <memory> 102 #include <set> 103 #include <string> 104 #include <utility> 105 #include <vector> 106 107 using namespace llvm; 108 109 // External symbol to be used when generating the shadow address for 110 // architectures with multiple VMAs. Instead of using a constant integer 111 // the runtime will set the external mask based on the VMA range. 112 static const char *const kDFSanExternShadowPtrMask = "__dfsan_shadow_ptr_mask"; 113 114 // The -dfsan-preserve-alignment flag controls whether this pass assumes that 115 // alignment requirements provided by the input IR are correct. For example, 116 // if the input IR contains a load with alignment 8, this flag will cause 117 // the shadow load to have alignment 16. This flag is disabled by default as 118 // we have unfortunately encountered too much code (including Clang itself; 119 // see PR14291) which performs misaligned access. 120 static cl::opt<bool> ClPreserveAlignment( 121 "dfsan-preserve-alignment", 122 cl::desc("respect alignment requirements provided by input IR"), cl::Hidden, 123 cl::init(false)); 124 125 // The ABI list files control how shadow parameters are passed. The pass treats 126 // every function labelled "uninstrumented" in the ABI list file as conforming 127 // to the "native" (i.e. unsanitized) ABI. Unless the ABI list contains 128 // additional annotations for those functions, a call to one of those functions 129 // will produce a warning message, as the labelling behaviour of the function is 130 // unknown. The other supported annotations are "functional" and "discard", 131 // which are described below under DataFlowSanitizer::WrapperKind. 132 static cl::list<std::string> ClABIListFiles( 133 "dfsan-abilist", 134 cl::desc("File listing native ABI functions and how the pass treats them"), 135 cl::Hidden); 136 137 // Controls whether the pass uses IA_Args or IA_TLS as the ABI for instrumented 138 // functions (see DataFlowSanitizer::InstrumentedABI below). 139 static cl::opt<bool> ClArgsABI( 140 "dfsan-args-abi", 141 cl::desc("Use the argument ABI rather than the TLS ABI"), 142 cl::Hidden); 143 144 // Controls whether the pass includes or ignores the labels of pointers in load 145 // instructions. 146 static cl::opt<bool> ClCombinePointerLabelsOnLoad( 147 "dfsan-combine-pointer-labels-on-load", 148 cl::desc("Combine the label of the pointer with the label of the data when " 149 "loading from memory."), 150 cl::Hidden, cl::init(true)); 151 152 // Controls whether the pass includes or ignores the labels of pointers in 153 // stores instructions. 154 static cl::opt<bool> ClCombinePointerLabelsOnStore( 155 "dfsan-combine-pointer-labels-on-store", 156 cl::desc("Combine the label of the pointer with the label of the data when " 157 "storing in memory."), 158 cl::Hidden, cl::init(false)); 159 160 static cl::opt<bool> ClDebugNonzeroLabels( 161 "dfsan-debug-nonzero-labels", 162 cl::desc("Insert calls to __dfsan_nonzero_label on observing a parameter, " 163 "load or return with a nonzero label"), 164 cl::Hidden); 165 166 // Experimental feature that inserts callbacks for certain data events. 167 // Currently callbacks are only inserted for loads, stores, memory transfers 168 // (i.e. memcpy and memmove), and comparisons. 169 // 170 // If this flag is set to true, the user must provide definitions for the 171 // following callback functions: 172 // void __dfsan_load_callback(dfsan_label Label); 173 // void __dfsan_store_callback(dfsan_label Label); 174 // void __dfsan_mem_transfer_callback(dfsan_label *Start, size_t Len); 175 // void __dfsan_cmp_callback(dfsan_label CombinedLabel); 176 static cl::opt<bool> ClEventCallbacks( 177 "dfsan-event-callbacks", 178 cl::desc("Insert calls to __dfsan_*_callback functions on data events."), 179 cl::Hidden, cl::init(false)); 180 181 // Use a distinct bit for each base label, enabling faster unions with less 182 // instrumentation. Limits the max number of base labels to 16. 183 static cl::opt<bool> ClFast16Labels( 184 "dfsan-fast-16-labels", 185 cl::desc("Use more efficient instrumentation, limiting the number of " 186 "labels to 16."), 187 cl::Hidden, cl::init(false)); 188 189 // Controls whether the pass tracks the control flow of select instructions. 190 static cl::opt<bool> ClTrackSelectControlFlow( 191 "dfsan-track-select-control-flow", 192 cl::desc("Propagate labels from condition values of select instructions " 193 "to results."), 194 cl::Hidden, cl::init(true)); 195 196 static StringRef GetGlobalTypeString(const GlobalValue &G) { 197 // Types of GlobalVariables are always pointer types. 198 Type *GType = G.getValueType(); 199 // For now we support excluding struct types only. 200 if (StructType *SGType = dyn_cast<StructType>(GType)) { 201 if (!SGType->isLiteral()) 202 return SGType->getName(); 203 } 204 return "<unknown type>"; 205 } 206 207 namespace { 208 209 class DFSanABIList { 210 std::unique_ptr<SpecialCaseList> SCL; 211 212 public: 213 DFSanABIList() = default; 214 215 void set(std::unique_ptr<SpecialCaseList> List) { SCL = std::move(List); } 216 217 /// Returns whether either this function or its source file are listed in the 218 /// given category. 219 bool isIn(const Function &F, StringRef Category) const { 220 return isIn(*F.getParent(), Category) || 221 SCL->inSection("dataflow", "fun", F.getName(), Category); 222 } 223 224 /// Returns whether this global alias is listed in the given category. 225 /// 226 /// If GA aliases a function, the alias's name is matched as a function name 227 /// would be. Similarly, aliases of globals are matched like globals. 228 bool isIn(const GlobalAlias &GA, StringRef Category) const { 229 if (isIn(*GA.getParent(), Category)) 230 return true; 231 232 if (isa<FunctionType>(GA.getValueType())) 233 return SCL->inSection("dataflow", "fun", GA.getName(), Category); 234 235 return SCL->inSection("dataflow", "global", GA.getName(), Category) || 236 SCL->inSection("dataflow", "type", GetGlobalTypeString(GA), 237 Category); 238 } 239 240 /// Returns whether this module is listed in the given category. 241 bool isIn(const Module &M, StringRef Category) const { 242 return SCL->inSection("dataflow", "src", M.getModuleIdentifier(), Category); 243 } 244 }; 245 246 /// TransformedFunction is used to express the result of transforming one 247 /// function type into another. This struct is immutable. It holds metadata 248 /// useful for updating calls of the old function to the new type. 249 struct TransformedFunction { 250 TransformedFunction(FunctionType* OriginalType, 251 FunctionType* TransformedType, 252 std::vector<unsigned> ArgumentIndexMapping) 253 : OriginalType(OriginalType), 254 TransformedType(TransformedType), 255 ArgumentIndexMapping(ArgumentIndexMapping) {} 256 257 // Disallow copies. 258 TransformedFunction(const TransformedFunction&) = delete; 259 TransformedFunction& operator=(const TransformedFunction&) = delete; 260 261 // Allow moves. 262 TransformedFunction(TransformedFunction&&) = default; 263 TransformedFunction& operator=(TransformedFunction&&) = default; 264 265 /// Type of the function before the transformation. 266 FunctionType *OriginalType; 267 268 /// Type of the function after the transformation. 269 FunctionType *TransformedType; 270 271 /// Transforming a function may change the position of arguments. This 272 /// member records the mapping from each argument's old position to its new 273 /// position. Argument positions are zero-indexed. If the transformation 274 /// from F to F' made the first argument of F into the third argument of F', 275 /// then ArgumentIndexMapping[0] will equal 2. 276 std::vector<unsigned> ArgumentIndexMapping; 277 }; 278 279 /// Given function attributes from a call site for the original function, 280 /// return function attributes appropriate for a call to the transformed 281 /// function. 282 AttributeList TransformFunctionAttributes( 283 const TransformedFunction& TransformedFunction, 284 LLVMContext& Ctx, AttributeList CallSiteAttrs) { 285 286 // Construct a vector of AttributeSet for each function argument. 287 std::vector<llvm::AttributeSet> ArgumentAttributes( 288 TransformedFunction.TransformedType->getNumParams()); 289 290 // Copy attributes from the parameter of the original function to the 291 // transformed version. 'ArgumentIndexMapping' holds the mapping from 292 // old argument position to new. 293 for (unsigned i=0, ie = TransformedFunction.ArgumentIndexMapping.size(); 294 i < ie; ++i) { 295 unsigned TransformedIndex = TransformedFunction.ArgumentIndexMapping[i]; 296 ArgumentAttributes[TransformedIndex] = CallSiteAttrs.getParamAttributes(i); 297 } 298 299 // Copy annotations on varargs arguments. 300 for (unsigned i = TransformedFunction.OriginalType->getNumParams(), 301 ie = CallSiteAttrs.getNumAttrSets(); i<ie; ++i) { 302 ArgumentAttributes.push_back(CallSiteAttrs.getParamAttributes(i)); 303 } 304 305 return AttributeList::get( 306 Ctx, 307 CallSiteAttrs.getFnAttributes(), 308 CallSiteAttrs.getRetAttributes(), 309 llvm::makeArrayRef(ArgumentAttributes)); 310 } 311 312 class DataFlowSanitizer { 313 friend struct DFSanFunction; 314 friend class DFSanVisitor; 315 316 enum { ShadowWidthBits = 16, ShadowWidthBytes = ShadowWidthBits / 8 }; 317 318 /// Which ABI should be used for instrumented functions? 319 enum InstrumentedABI { 320 /// Argument and return value labels are passed through additional 321 /// arguments and by modifying the return type. 322 IA_Args, 323 324 /// Argument and return value labels are passed through TLS variables 325 /// __dfsan_arg_tls and __dfsan_retval_tls. 326 IA_TLS 327 }; 328 329 /// How should calls to uninstrumented functions be handled? 330 enum WrapperKind { 331 /// This function is present in an uninstrumented form but we don't know 332 /// how it should be handled. Print a warning and call the function anyway. 333 /// Don't label the return value. 334 WK_Warning, 335 336 /// This function does not write to (user-accessible) memory, and its return 337 /// value is unlabelled. 338 WK_Discard, 339 340 /// This function does not write to (user-accessible) memory, and the label 341 /// of its return value is the union of the label of its arguments. 342 WK_Functional, 343 344 /// Instead of calling the function, a custom wrapper __dfsw_F is called, 345 /// where F is the name of the function. This function may wrap the 346 /// original function or provide its own implementation. This is similar to 347 /// the IA_Args ABI, except that IA_Args uses a struct return type to 348 /// pass the return value shadow in a register, while WK_Custom uses an 349 /// extra pointer argument to return the shadow. This allows the wrapped 350 /// form of the function type to be expressed in C. 351 WK_Custom 352 }; 353 354 Module *Mod; 355 LLVMContext *Ctx; 356 IntegerType *ShadowTy; 357 PointerType *ShadowPtrTy; 358 IntegerType *IntptrTy; 359 ConstantInt *ZeroShadow; 360 ConstantInt *ShadowPtrMask; 361 ConstantInt *ShadowPtrMul; 362 Constant *ArgTLS; 363 Constant *RetvalTLS; 364 FunctionType *GetArgTLSTy; 365 FunctionType *GetRetvalTLSTy; 366 Constant *GetArgTLS; 367 Constant *GetRetvalTLS; 368 Constant *ExternalShadowMask; 369 FunctionType *DFSanUnionFnTy; 370 FunctionType *DFSanUnionLoadFnTy; 371 FunctionType *DFSanUnimplementedFnTy; 372 FunctionType *DFSanSetLabelFnTy; 373 FunctionType *DFSanNonzeroLabelFnTy; 374 FunctionType *DFSanVarargWrapperFnTy; 375 FunctionType *DFSanLoadStoreCmpCallbackFnTy; 376 FunctionType *DFSanMemTransferCallbackFnTy; 377 FunctionCallee DFSanUnionFn; 378 FunctionCallee DFSanCheckedUnionFn; 379 FunctionCallee DFSanUnionLoadFn; 380 FunctionCallee DFSanUnionLoadFast16LabelsFn; 381 FunctionCallee DFSanUnimplementedFn; 382 FunctionCallee DFSanSetLabelFn; 383 FunctionCallee DFSanNonzeroLabelFn; 384 FunctionCallee DFSanVarargWrapperFn; 385 FunctionCallee DFSanLoadCallbackFn; 386 FunctionCallee DFSanStoreCallbackFn; 387 FunctionCallee DFSanMemTransferCallbackFn; 388 FunctionCallee DFSanCmpCallbackFn; 389 MDNode *ColdCallWeights; 390 DFSanABIList ABIList; 391 DenseMap<Value *, Function *> UnwrappedFnMap; 392 AttrBuilder ReadOnlyNoneAttrs; 393 bool DFSanRuntimeShadowMask = false; 394 395 Value *getShadowAddress(Value *Addr, Instruction *Pos); 396 bool isInstrumented(const Function *F); 397 bool isInstrumented(const GlobalAlias *GA); 398 FunctionType *getArgsFunctionType(FunctionType *T); 399 FunctionType *getTrampolineFunctionType(FunctionType *T); 400 TransformedFunction getCustomFunctionType(FunctionType *T); 401 InstrumentedABI getInstrumentedABI(); 402 WrapperKind getWrapperKind(Function *F); 403 void addGlobalNamePrefix(GlobalValue *GV); 404 Function *buildWrapperFunction(Function *F, StringRef NewFName, 405 GlobalValue::LinkageTypes NewFLink, 406 FunctionType *NewFT); 407 Constant *getOrBuildTrampolineFunction(FunctionType *FT, StringRef FName); 408 void initializeCallbackFunctions(Module &M); 409 void initializeRuntimeFunctions(Module &M); 410 411 bool init(Module &M); 412 413 public: 414 DataFlowSanitizer(const std::vector<std::string> &ABIListFiles); 415 416 bool runImpl(Module &M); 417 }; 418 419 struct DFSanFunction { 420 DataFlowSanitizer &DFS; 421 Function *F; 422 DominatorTree DT; 423 DataFlowSanitizer::InstrumentedABI IA; 424 bool IsNativeABI; 425 Value *ArgTLSPtr = nullptr; 426 Value *RetvalTLSPtr = nullptr; 427 AllocaInst *LabelReturnAlloca = nullptr; 428 DenseMap<Value *, Value *> ValShadowMap; 429 DenseMap<AllocaInst *, AllocaInst *> AllocaShadowMap; 430 std::vector<std::pair<PHINode *, PHINode *>> PHIFixups; 431 DenseSet<Instruction *> SkipInsts; 432 std::vector<Value *> NonZeroChecks; 433 bool AvoidNewBlocks; 434 435 struct CachedCombinedShadow { 436 BasicBlock *Block; 437 Value *Shadow; 438 }; 439 DenseMap<std::pair<Value *, Value *>, CachedCombinedShadow> 440 CachedCombinedShadows; 441 DenseMap<Value *, std::set<Value *>> ShadowElements; 442 443 DFSanFunction(DataFlowSanitizer &DFS, Function *F, bool IsNativeABI) 444 : DFS(DFS), F(F), IA(DFS.getInstrumentedABI()), IsNativeABI(IsNativeABI) { 445 DT.recalculate(*F); 446 // FIXME: Need to track down the register allocator issue which causes poor 447 // performance in pathological cases with large numbers of basic blocks. 448 AvoidNewBlocks = F->size() > 1000; 449 } 450 451 Value *getArgTLSPtr(); 452 Value *getArgTLS(unsigned Index, Instruction *Pos); 453 Value *getRetvalTLS(); 454 Value *getShadow(Value *V); 455 void setShadow(Instruction *I, Value *Shadow); 456 Value *combineShadows(Value *V1, Value *V2, Instruction *Pos); 457 Value *combineOperandShadows(Instruction *Inst); 458 Value *loadShadow(Value *ShadowAddr, uint64_t Size, uint64_t Align, 459 Instruction *Pos); 460 void storeShadow(Value *Addr, uint64_t Size, Align Alignment, Value *Shadow, 461 Instruction *Pos); 462 }; 463 464 class DFSanVisitor : public InstVisitor<DFSanVisitor> { 465 public: 466 DFSanFunction &DFSF; 467 468 DFSanVisitor(DFSanFunction &DFSF) : DFSF(DFSF) {} 469 470 const DataLayout &getDataLayout() const { 471 return DFSF.F->getParent()->getDataLayout(); 472 } 473 474 // Combines shadow values for all of I's operands. Returns the combined shadow 475 // value. 476 Value *visitOperandShadowInst(Instruction &I); 477 478 void visitUnaryOperator(UnaryOperator &UO); 479 void visitBinaryOperator(BinaryOperator &BO); 480 void visitCastInst(CastInst &CI); 481 void visitCmpInst(CmpInst &CI); 482 void visitGetElementPtrInst(GetElementPtrInst &GEPI); 483 void visitLoadInst(LoadInst &LI); 484 void visitStoreInst(StoreInst &SI); 485 void visitReturnInst(ReturnInst &RI); 486 void visitCallBase(CallBase &CB); 487 void visitPHINode(PHINode &PN); 488 void visitExtractElementInst(ExtractElementInst &I); 489 void visitInsertElementInst(InsertElementInst &I); 490 void visitShuffleVectorInst(ShuffleVectorInst &I); 491 void visitExtractValueInst(ExtractValueInst &I); 492 void visitInsertValueInst(InsertValueInst &I); 493 void visitAllocaInst(AllocaInst &I); 494 void visitSelectInst(SelectInst &I); 495 void visitMemSetInst(MemSetInst &I); 496 void visitMemTransferInst(MemTransferInst &I); 497 }; 498 499 } // end anonymous namespace 500 501 DataFlowSanitizer::DataFlowSanitizer( 502 const std::vector<std::string> &ABIListFiles) { 503 std::vector<std::string> AllABIListFiles(std::move(ABIListFiles)); 504 AllABIListFiles.insert(AllABIListFiles.end(), ClABIListFiles.begin(), 505 ClABIListFiles.end()); 506 // FIXME: should we propagate vfs::FileSystem to this constructor? 507 ABIList.set( 508 SpecialCaseList::createOrDie(AllABIListFiles, *vfs::getRealFileSystem())); 509 } 510 511 FunctionType *DataFlowSanitizer::getArgsFunctionType(FunctionType *T) { 512 SmallVector<Type *, 4> ArgTypes(T->param_begin(), T->param_end()); 513 ArgTypes.append(T->getNumParams(), ShadowTy); 514 if (T->isVarArg()) 515 ArgTypes.push_back(ShadowPtrTy); 516 Type *RetType = T->getReturnType(); 517 if (!RetType->isVoidTy()) 518 RetType = StructType::get(RetType, ShadowTy); 519 return FunctionType::get(RetType, ArgTypes, T->isVarArg()); 520 } 521 522 FunctionType *DataFlowSanitizer::getTrampolineFunctionType(FunctionType *T) { 523 assert(!T->isVarArg()); 524 SmallVector<Type *, 4> ArgTypes; 525 ArgTypes.push_back(T->getPointerTo()); 526 ArgTypes.append(T->param_begin(), T->param_end()); 527 ArgTypes.append(T->getNumParams(), ShadowTy); 528 Type *RetType = T->getReturnType(); 529 if (!RetType->isVoidTy()) 530 ArgTypes.push_back(ShadowPtrTy); 531 return FunctionType::get(T->getReturnType(), ArgTypes, false); 532 } 533 534 TransformedFunction DataFlowSanitizer::getCustomFunctionType(FunctionType *T) { 535 SmallVector<Type *, 4> ArgTypes; 536 537 // Some parameters of the custom function being constructed are 538 // parameters of T. Record the mapping from parameters of T to 539 // parameters of the custom function, so that parameter attributes 540 // at call sites can be updated. 541 std::vector<unsigned> ArgumentIndexMapping; 542 for (unsigned i = 0, ie = T->getNumParams(); i != ie; ++i) { 543 Type* param_type = T->getParamType(i); 544 FunctionType *FT; 545 if (isa<PointerType>(param_type) && (FT = dyn_cast<FunctionType>( 546 cast<PointerType>(param_type)->getElementType()))) { 547 ArgumentIndexMapping.push_back(ArgTypes.size()); 548 ArgTypes.push_back(getTrampolineFunctionType(FT)->getPointerTo()); 549 ArgTypes.push_back(Type::getInt8PtrTy(*Ctx)); 550 } else { 551 ArgumentIndexMapping.push_back(ArgTypes.size()); 552 ArgTypes.push_back(param_type); 553 } 554 } 555 for (unsigned i = 0, e = T->getNumParams(); i != e; ++i) 556 ArgTypes.push_back(ShadowTy); 557 if (T->isVarArg()) 558 ArgTypes.push_back(ShadowPtrTy); 559 Type *RetType = T->getReturnType(); 560 if (!RetType->isVoidTy()) 561 ArgTypes.push_back(ShadowPtrTy); 562 return TransformedFunction( 563 T, FunctionType::get(T->getReturnType(), ArgTypes, T->isVarArg()), 564 ArgumentIndexMapping); 565 } 566 567 bool DataFlowSanitizer::init(Module &M) { 568 Triple TargetTriple(M.getTargetTriple()); 569 bool IsX86_64 = TargetTriple.getArch() == Triple::x86_64; 570 bool IsMIPS64 = TargetTriple.isMIPS64(); 571 bool IsAArch64 = TargetTriple.getArch() == Triple::aarch64 || 572 TargetTriple.getArch() == Triple::aarch64_be; 573 574 const DataLayout &DL = M.getDataLayout(); 575 576 Mod = &M; 577 Ctx = &M.getContext(); 578 ShadowTy = IntegerType::get(*Ctx, ShadowWidthBits); 579 ShadowPtrTy = PointerType::getUnqual(ShadowTy); 580 IntptrTy = DL.getIntPtrType(*Ctx); 581 ZeroShadow = ConstantInt::getSigned(ShadowTy, 0); 582 ShadowPtrMul = ConstantInt::getSigned(IntptrTy, ShadowWidthBytes); 583 if (IsX86_64) 584 ShadowPtrMask = ConstantInt::getSigned(IntptrTy, ~0x700000000000LL); 585 else if (IsMIPS64) 586 ShadowPtrMask = ConstantInt::getSigned(IntptrTy, ~0xF000000000LL); 587 // AArch64 supports multiple VMAs and the shadow mask is set at runtime. 588 else if (IsAArch64) 589 DFSanRuntimeShadowMask = true; 590 else 591 report_fatal_error("unsupported triple"); 592 593 Type *DFSanUnionArgs[2] = { ShadowTy, ShadowTy }; 594 DFSanUnionFnTy = 595 FunctionType::get(ShadowTy, DFSanUnionArgs, /*isVarArg=*/ false); 596 Type *DFSanUnionLoadArgs[2] = { ShadowPtrTy, IntptrTy }; 597 DFSanUnionLoadFnTy = 598 FunctionType::get(ShadowTy, DFSanUnionLoadArgs, /*isVarArg=*/ false); 599 DFSanUnimplementedFnTy = FunctionType::get( 600 Type::getVoidTy(*Ctx), Type::getInt8PtrTy(*Ctx), /*isVarArg=*/false); 601 Type *DFSanSetLabelArgs[3] = { ShadowTy, Type::getInt8PtrTy(*Ctx), IntptrTy }; 602 DFSanSetLabelFnTy = FunctionType::get(Type::getVoidTy(*Ctx), 603 DFSanSetLabelArgs, /*isVarArg=*/false); 604 DFSanNonzeroLabelFnTy = FunctionType::get( 605 Type::getVoidTy(*Ctx), None, /*isVarArg=*/false); 606 DFSanVarargWrapperFnTy = FunctionType::get( 607 Type::getVoidTy(*Ctx), Type::getInt8PtrTy(*Ctx), /*isVarArg=*/false); 608 DFSanLoadStoreCmpCallbackFnTy = 609 FunctionType::get(Type::getVoidTy(*Ctx), ShadowTy, /*isVarArg=*/false); 610 Type *DFSanMemTransferCallbackArgs[2] = {ShadowPtrTy, IntptrTy}; 611 DFSanMemTransferCallbackFnTy = 612 FunctionType::get(Type::getVoidTy(*Ctx), DFSanMemTransferCallbackArgs, 613 /*isVarArg=*/false); 614 615 ColdCallWeights = MDBuilder(*Ctx).createBranchWeights(1, 1000); 616 return true; 617 } 618 619 bool DataFlowSanitizer::isInstrumented(const Function *F) { 620 return !ABIList.isIn(*F, "uninstrumented"); 621 } 622 623 bool DataFlowSanitizer::isInstrumented(const GlobalAlias *GA) { 624 return !ABIList.isIn(*GA, "uninstrumented"); 625 } 626 627 DataFlowSanitizer::InstrumentedABI DataFlowSanitizer::getInstrumentedABI() { 628 return ClArgsABI ? IA_Args : IA_TLS; 629 } 630 631 DataFlowSanitizer::WrapperKind DataFlowSanitizer::getWrapperKind(Function *F) { 632 if (ABIList.isIn(*F, "functional")) 633 return WK_Functional; 634 if (ABIList.isIn(*F, "discard")) 635 return WK_Discard; 636 if (ABIList.isIn(*F, "custom")) 637 return WK_Custom; 638 639 return WK_Warning; 640 } 641 642 void DataFlowSanitizer::addGlobalNamePrefix(GlobalValue *GV) { 643 std::string GVName = std::string(GV->getName()), Prefix = "dfs$"; 644 GV->setName(Prefix + GVName); 645 646 // Try to change the name of the function in module inline asm. We only do 647 // this for specific asm directives, currently only ".symver", to try to avoid 648 // corrupting asm which happens to contain the symbol name as a substring. 649 // Note that the substitution for .symver assumes that the versioned symbol 650 // also has an instrumented name. 651 std::string Asm = GV->getParent()->getModuleInlineAsm(); 652 std::string SearchStr = ".symver " + GVName + ","; 653 size_t Pos = Asm.find(SearchStr); 654 if (Pos != std::string::npos) { 655 Asm.replace(Pos, SearchStr.size(), 656 ".symver " + Prefix + GVName + "," + Prefix); 657 GV->getParent()->setModuleInlineAsm(Asm); 658 } 659 } 660 661 Function * 662 DataFlowSanitizer::buildWrapperFunction(Function *F, StringRef NewFName, 663 GlobalValue::LinkageTypes NewFLink, 664 FunctionType *NewFT) { 665 FunctionType *FT = F->getFunctionType(); 666 Function *NewF = Function::Create(NewFT, NewFLink, F->getAddressSpace(), 667 NewFName, F->getParent()); 668 NewF->copyAttributesFrom(F); 669 NewF->removeAttributes( 670 AttributeList::ReturnIndex, 671 AttributeFuncs::typeIncompatible(NewFT->getReturnType())); 672 673 BasicBlock *BB = BasicBlock::Create(*Ctx, "entry", NewF); 674 if (F->isVarArg()) { 675 NewF->removeAttributes(AttributeList::FunctionIndex, 676 AttrBuilder().addAttribute("split-stack")); 677 CallInst::Create(DFSanVarargWrapperFn, 678 IRBuilder<>(BB).CreateGlobalStringPtr(F->getName()), "", 679 BB); 680 new UnreachableInst(*Ctx, BB); 681 } else { 682 std::vector<Value *> Args; 683 unsigned n = FT->getNumParams(); 684 for (Function::arg_iterator ai = NewF->arg_begin(); n != 0; ++ai, --n) 685 Args.push_back(&*ai); 686 CallInst *CI = CallInst::Create(F, Args, "", BB); 687 if (FT->getReturnType()->isVoidTy()) 688 ReturnInst::Create(*Ctx, BB); 689 else 690 ReturnInst::Create(*Ctx, CI, BB); 691 } 692 693 return NewF; 694 } 695 696 Constant *DataFlowSanitizer::getOrBuildTrampolineFunction(FunctionType *FT, 697 StringRef FName) { 698 FunctionType *FTT = getTrampolineFunctionType(FT); 699 FunctionCallee C = Mod->getOrInsertFunction(FName, FTT); 700 Function *F = dyn_cast<Function>(C.getCallee()); 701 if (F && F->isDeclaration()) { 702 F->setLinkage(GlobalValue::LinkOnceODRLinkage); 703 BasicBlock *BB = BasicBlock::Create(*Ctx, "entry", F); 704 std::vector<Value *> Args; 705 Function::arg_iterator AI = F->arg_begin(); ++AI; 706 for (unsigned N = FT->getNumParams(); N != 0; ++AI, --N) 707 Args.push_back(&*AI); 708 CallInst *CI = CallInst::Create(FT, &*F->arg_begin(), Args, "", BB); 709 ReturnInst *RI; 710 if (FT->getReturnType()->isVoidTy()) 711 RI = ReturnInst::Create(*Ctx, BB); 712 else 713 RI = ReturnInst::Create(*Ctx, CI, BB); 714 715 DFSanFunction DFSF(*this, F, /*IsNativeABI=*/true); 716 Function::arg_iterator ValAI = F->arg_begin(), ShadowAI = AI; ++ValAI; 717 for (unsigned N = FT->getNumParams(); N != 0; ++ValAI, ++ShadowAI, --N) 718 DFSF.ValShadowMap[&*ValAI] = &*ShadowAI; 719 DFSanVisitor(DFSF).visitCallInst(*CI); 720 if (!FT->getReturnType()->isVoidTy()) 721 new StoreInst(DFSF.getShadow(RI->getReturnValue()), 722 &*std::prev(F->arg_end()), RI); 723 } 724 725 return cast<Constant>(C.getCallee()); 726 } 727 728 // Initialize DataFlowSanitizer runtime functions and declare them in the module 729 void DataFlowSanitizer::initializeRuntimeFunctions(Module &M) { 730 { 731 AttributeList AL; 732 AL = AL.addAttribute(M.getContext(), AttributeList::FunctionIndex, 733 Attribute::NoUnwind); 734 AL = AL.addAttribute(M.getContext(), AttributeList::FunctionIndex, 735 Attribute::ReadNone); 736 AL = AL.addAttribute(M.getContext(), AttributeList::ReturnIndex, 737 Attribute::ZExt); 738 AL = AL.addParamAttribute(M.getContext(), 0, Attribute::ZExt); 739 AL = AL.addParamAttribute(M.getContext(), 1, Attribute::ZExt); 740 DFSanUnionFn = 741 Mod->getOrInsertFunction("__dfsan_union", DFSanUnionFnTy, AL); 742 } 743 { 744 AttributeList AL; 745 AL = AL.addAttribute(M.getContext(), AttributeList::FunctionIndex, 746 Attribute::NoUnwind); 747 AL = AL.addAttribute(M.getContext(), AttributeList::FunctionIndex, 748 Attribute::ReadNone); 749 AL = AL.addAttribute(M.getContext(), AttributeList::ReturnIndex, 750 Attribute::ZExt); 751 AL = AL.addParamAttribute(M.getContext(), 0, Attribute::ZExt); 752 AL = AL.addParamAttribute(M.getContext(), 1, Attribute::ZExt); 753 DFSanCheckedUnionFn = 754 Mod->getOrInsertFunction("dfsan_union", DFSanUnionFnTy, AL); 755 } 756 { 757 AttributeList AL; 758 AL = AL.addAttribute(M.getContext(), AttributeList::FunctionIndex, 759 Attribute::NoUnwind); 760 AL = AL.addAttribute(M.getContext(), AttributeList::FunctionIndex, 761 Attribute::ReadOnly); 762 AL = AL.addAttribute(M.getContext(), AttributeList::ReturnIndex, 763 Attribute::ZExt); 764 DFSanUnionLoadFn = 765 Mod->getOrInsertFunction("__dfsan_union_load", DFSanUnionLoadFnTy, AL); 766 } 767 { 768 AttributeList AL; 769 AL = AL.addAttribute(M.getContext(), AttributeList::FunctionIndex, 770 Attribute::NoUnwind); 771 AL = AL.addAttribute(M.getContext(), AttributeList::FunctionIndex, 772 Attribute::ReadOnly); 773 AL = AL.addAttribute(M.getContext(), AttributeList::ReturnIndex, 774 Attribute::ZExt); 775 DFSanUnionLoadFast16LabelsFn = Mod->getOrInsertFunction( 776 "__dfsan_union_load_fast16labels", DFSanUnionLoadFnTy, AL); 777 } 778 DFSanUnimplementedFn = 779 Mod->getOrInsertFunction("__dfsan_unimplemented", DFSanUnimplementedFnTy); 780 { 781 AttributeList AL; 782 AL = AL.addParamAttribute(M.getContext(), 0, Attribute::ZExt); 783 DFSanSetLabelFn = 784 Mod->getOrInsertFunction("__dfsan_set_label", DFSanSetLabelFnTy, AL); 785 } 786 DFSanNonzeroLabelFn = 787 Mod->getOrInsertFunction("__dfsan_nonzero_label", DFSanNonzeroLabelFnTy); 788 DFSanVarargWrapperFn = Mod->getOrInsertFunction("__dfsan_vararg_wrapper", 789 DFSanVarargWrapperFnTy); 790 } 791 792 // Initializes event callback functions and declare them in the module 793 void DataFlowSanitizer::initializeCallbackFunctions(Module &M) { 794 DFSanLoadCallbackFn = Mod->getOrInsertFunction("__dfsan_load_callback", 795 DFSanLoadStoreCmpCallbackFnTy); 796 DFSanStoreCallbackFn = Mod->getOrInsertFunction( 797 "__dfsan_store_callback", DFSanLoadStoreCmpCallbackFnTy); 798 DFSanMemTransferCallbackFn = Mod->getOrInsertFunction( 799 "__dfsan_mem_transfer_callback", DFSanMemTransferCallbackFnTy); 800 DFSanCmpCallbackFn = Mod->getOrInsertFunction("__dfsan_cmp_callback", 801 DFSanLoadStoreCmpCallbackFnTy); 802 } 803 804 bool DataFlowSanitizer::runImpl(Module &M) { 805 init(M); 806 807 if (ABIList.isIn(M, "skip")) 808 return false; 809 810 const unsigned InitialGlobalSize = M.global_size(); 811 const unsigned InitialModuleSize = M.size(); 812 813 bool Changed = false; 814 815 Type *ArgTLSTy = ArrayType::get(ShadowTy, 64); 816 ArgTLS = Mod->getOrInsertGlobal("__dfsan_arg_tls", ArgTLSTy); 817 if (GlobalVariable *G = dyn_cast<GlobalVariable>(ArgTLS)) { 818 Changed |= G->getThreadLocalMode() != GlobalVariable::InitialExecTLSModel; 819 G->setThreadLocalMode(GlobalVariable::InitialExecTLSModel); 820 } 821 RetvalTLS = Mod->getOrInsertGlobal("__dfsan_retval_tls", ShadowTy); 822 if (GlobalVariable *G = dyn_cast<GlobalVariable>(RetvalTLS)) { 823 Changed |= G->getThreadLocalMode() != GlobalVariable::InitialExecTLSModel; 824 G->setThreadLocalMode(GlobalVariable::InitialExecTLSModel); 825 } 826 827 ExternalShadowMask = 828 Mod->getOrInsertGlobal(kDFSanExternShadowPtrMask, IntptrTy); 829 830 initializeCallbackFunctions(M); 831 initializeRuntimeFunctions(M); 832 833 std::vector<Function *> FnsToInstrument; 834 SmallPtrSet<Function *, 2> FnsWithNativeABI; 835 for (Function &i : M) { 836 if (!i.isIntrinsic() && 837 &i != DFSanUnionFn.getCallee()->stripPointerCasts() && 838 &i != DFSanCheckedUnionFn.getCallee()->stripPointerCasts() && 839 &i != DFSanUnionLoadFn.getCallee()->stripPointerCasts() && 840 &i != DFSanUnionLoadFast16LabelsFn.getCallee()->stripPointerCasts() && 841 &i != DFSanUnimplementedFn.getCallee()->stripPointerCasts() && 842 &i != DFSanSetLabelFn.getCallee()->stripPointerCasts() && 843 &i != DFSanNonzeroLabelFn.getCallee()->stripPointerCasts() && 844 &i != DFSanVarargWrapperFn.getCallee()->stripPointerCasts() && 845 &i != DFSanLoadCallbackFn.getCallee()->stripPointerCasts() && 846 &i != DFSanStoreCallbackFn.getCallee()->stripPointerCasts() && 847 &i != DFSanMemTransferCallbackFn.getCallee()->stripPointerCasts() && 848 &i != DFSanCmpCallbackFn.getCallee()->stripPointerCasts()) 849 FnsToInstrument.push_back(&i); 850 } 851 852 // Give function aliases prefixes when necessary, and build wrappers where the 853 // instrumentedness is inconsistent. 854 for (Module::alias_iterator i = M.alias_begin(), e = M.alias_end(); i != e;) { 855 GlobalAlias *GA = &*i; 856 ++i; 857 // Don't stop on weak. We assume people aren't playing games with the 858 // instrumentedness of overridden weak aliases. 859 if (auto F = dyn_cast<Function>(GA->getBaseObject())) { 860 bool GAInst = isInstrumented(GA), FInst = isInstrumented(F); 861 if (GAInst && FInst) { 862 addGlobalNamePrefix(GA); 863 } else if (GAInst != FInst) { 864 // Non-instrumented alias of an instrumented function, or vice versa. 865 // Replace the alias with a native-ABI wrapper of the aliasee. The pass 866 // below will take care of instrumenting it. 867 Function *NewF = 868 buildWrapperFunction(F, "", GA->getLinkage(), F->getFunctionType()); 869 GA->replaceAllUsesWith(ConstantExpr::getBitCast(NewF, GA->getType())); 870 NewF->takeName(GA); 871 GA->eraseFromParent(); 872 FnsToInstrument.push_back(NewF); 873 } 874 } 875 } 876 877 ReadOnlyNoneAttrs.addAttribute(Attribute::ReadOnly) 878 .addAttribute(Attribute::ReadNone); 879 880 // First, change the ABI of every function in the module. ABI-listed 881 // functions keep their original ABI and get a wrapper function. 882 for (std::vector<Function *>::iterator i = FnsToInstrument.begin(), 883 e = FnsToInstrument.end(); 884 i != e; ++i) { 885 Function &F = **i; 886 FunctionType *FT = F.getFunctionType(); 887 888 bool IsZeroArgsVoidRet = (FT->getNumParams() == 0 && !FT->isVarArg() && 889 FT->getReturnType()->isVoidTy()); 890 891 if (isInstrumented(&F)) { 892 // Instrumented functions get a 'dfs$' prefix. This allows us to more 893 // easily identify cases of mismatching ABIs. 894 if (getInstrumentedABI() == IA_Args && !IsZeroArgsVoidRet) { 895 FunctionType *NewFT = getArgsFunctionType(FT); 896 Function *NewF = Function::Create(NewFT, F.getLinkage(), 897 F.getAddressSpace(), "", &M); 898 NewF->copyAttributesFrom(&F); 899 NewF->removeAttributes( 900 AttributeList::ReturnIndex, 901 AttributeFuncs::typeIncompatible(NewFT->getReturnType())); 902 for (Function::arg_iterator FArg = F.arg_begin(), 903 NewFArg = NewF->arg_begin(), 904 FArgEnd = F.arg_end(); 905 FArg != FArgEnd; ++FArg, ++NewFArg) { 906 FArg->replaceAllUsesWith(&*NewFArg); 907 } 908 NewF->getBasicBlockList().splice(NewF->begin(), F.getBasicBlockList()); 909 910 for (Function::user_iterator UI = F.user_begin(), UE = F.user_end(); 911 UI != UE;) { 912 BlockAddress *BA = dyn_cast<BlockAddress>(*UI); 913 ++UI; 914 if (BA) { 915 BA->replaceAllUsesWith( 916 BlockAddress::get(NewF, BA->getBasicBlock())); 917 delete BA; 918 } 919 } 920 F.replaceAllUsesWith( 921 ConstantExpr::getBitCast(NewF, PointerType::getUnqual(FT))); 922 NewF->takeName(&F); 923 F.eraseFromParent(); 924 *i = NewF; 925 addGlobalNamePrefix(NewF); 926 } else { 927 addGlobalNamePrefix(&F); 928 } 929 } else if (!IsZeroArgsVoidRet || getWrapperKind(&F) == WK_Custom) { 930 // Build a wrapper function for F. The wrapper simply calls F, and is 931 // added to FnsToInstrument so that any instrumentation according to its 932 // WrapperKind is done in the second pass below. 933 FunctionType *NewFT = getInstrumentedABI() == IA_Args 934 ? getArgsFunctionType(FT) 935 : FT; 936 937 // If the function being wrapped has local linkage, then preserve the 938 // function's linkage in the wrapper function. 939 GlobalValue::LinkageTypes wrapperLinkage = 940 F.hasLocalLinkage() 941 ? F.getLinkage() 942 : GlobalValue::LinkOnceODRLinkage; 943 944 Function *NewF = buildWrapperFunction( 945 &F, std::string("dfsw$") + std::string(F.getName()), 946 wrapperLinkage, NewFT); 947 if (getInstrumentedABI() == IA_TLS) 948 NewF->removeAttributes(AttributeList::FunctionIndex, ReadOnlyNoneAttrs); 949 950 Value *WrappedFnCst = 951 ConstantExpr::getBitCast(NewF, PointerType::getUnqual(FT)); 952 F.replaceAllUsesWith(WrappedFnCst); 953 954 UnwrappedFnMap[WrappedFnCst] = &F; 955 *i = NewF; 956 957 if (!F.isDeclaration()) { 958 // This function is probably defining an interposition of an 959 // uninstrumented function and hence needs to keep the original ABI. 960 // But any functions it may call need to use the instrumented ABI, so 961 // we instrument it in a mode which preserves the original ABI. 962 FnsWithNativeABI.insert(&F); 963 964 // This code needs to rebuild the iterators, as they may be invalidated 965 // by the push_back, taking care that the new range does not include 966 // any functions added by this code. 967 size_t N = i - FnsToInstrument.begin(), 968 Count = e - FnsToInstrument.begin(); 969 FnsToInstrument.push_back(&F); 970 i = FnsToInstrument.begin() + N; 971 e = FnsToInstrument.begin() + Count; 972 } 973 // Hopefully, nobody will try to indirectly call a vararg 974 // function... yet. 975 } else if (FT->isVarArg()) { 976 UnwrappedFnMap[&F] = &F; 977 *i = nullptr; 978 } 979 } 980 981 for (Function *i : FnsToInstrument) { 982 if (!i || i->isDeclaration()) 983 continue; 984 985 removeUnreachableBlocks(*i); 986 987 DFSanFunction DFSF(*this, i, FnsWithNativeABI.count(i)); 988 989 // DFSanVisitor may create new basic blocks, which confuses df_iterator. 990 // Build a copy of the list before iterating over it. 991 SmallVector<BasicBlock *, 4> BBList(depth_first(&i->getEntryBlock())); 992 993 for (BasicBlock *i : BBList) { 994 Instruction *Inst = &i->front(); 995 while (true) { 996 // DFSanVisitor may split the current basic block, changing the current 997 // instruction's next pointer and moving the next instruction to the 998 // tail block from which we should continue. 999 Instruction *Next = Inst->getNextNode(); 1000 // DFSanVisitor may delete Inst, so keep track of whether it was a 1001 // terminator. 1002 bool IsTerminator = Inst->isTerminator(); 1003 if (!DFSF.SkipInsts.count(Inst)) 1004 DFSanVisitor(DFSF).visit(Inst); 1005 if (IsTerminator) 1006 break; 1007 Inst = Next; 1008 } 1009 } 1010 1011 // We will not necessarily be able to compute the shadow for every phi node 1012 // until we have visited every block. Therefore, the code that handles phi 1013 // nodes adds them to the PHIFixups list so that they can be properly 1014 // handled here. 1015 for (std::vector<std::pair<PHINode *, PHINode *>>::iterator 1016 i = DFSF.PHIFixups.begin(), 1017 e = DFSF.PHIFixups.end(); 1018 i != e; ++i) { 1019 for (unsigned val = 0, n = i->first->getNumIncomingValues(); val != n; 1020 ++val) { 1021 i->second->setIncomingValue( 1022 val, DFSF.getShadow(i->first->getIncomingValue(val))); 1023 } 1024 } 1025 1026 // -dfsan-debug-nonzero-labels will split the CFG in all kinds of crazy 1027 // places (i.e. instructions in basic blocks we haven't even begun visiting 1028 // yet). To make our life easier, do this work in a pass after the main 1029 // instrumentation. 1030 if (ClDebugNonzeroLabels) { 1031 for (Value *V : DFSF.NonZeroChecks) { 1032 Instruction *Pos; 1033 if (Instruction *I = dyn_cast<Instruction>(V)) 1034 Pos = I->getNextNode(); 1035 else 1036 Pos = &DFSF.F->getEntryBlock().front(); 1037 while (isa<PHINode>(Pos) || isa<AllocaInst>(Pos)) 1038 Pos = Pos->getNextNode(); 1039 IRBuilder<> IRB(Pos); 1040 Value *Ne = IRB.CreateICmpNE(V, DFSF.DFS.ZeroShadow); 1041 BranchInst *BI = cast<BranchInst>(SplitBlockAndInsertIfThen( 1042 Ne, Pos, /*Unreachable=*/false, ColdCallWeights)); 1043 IRBuilder<> ThenIRB(BI); 1044 ThenIRB.CreateCall(DFSF.DFS.DFSanNonzeroLabelFn, {}); 1045 } 1046 } 1047 } 1048 1049 return Changed || !FnsToInstrument.empty() || 1050 M.global_size() != InitialGlobalSize || M.size() != InitialModuleSize; 1051 } 1052 1053 Value *DFSanFunction::getArgTLSPtr() { 1054 if (ArgTLSPtr) 1055 return ArgTLSPtr; 1056 if (DFS.ArgTLS) 1057 return ArgTLSPtr = DFS.ArgTLS; 1058 1059 IRBuilder<> IRB(&F->getEntryBlock().front()); 1060 return ArgTLSPtr = IRB.CreateCall(DFS.GetArgTLSTy, DFS.GetArgTLS, {}); 1061 } 1062 1063 Value *DFSanFunction::getRetvalTLS() { 1064 if (RetvalTLSPtr) 1065 return RetvalTLSPtr; 1066 if (DFS.RetvalTLS) 1067 return RetvalTLSPtr = DFS.RetvalTLS; 1068 1069 IRBuilder<> IRB(&F->getEntryBlock().front()); 1070 return RetvalTLSPtr = 1071 IRB.CreateCall(DFS.GetRetvalTLSTy, DFS.GetRetvalTLS, {}); 1072 } 1073 1074 Value *DFSanFunction::getArgTLS(unsigned Idx, Instruction *Pos) { 1075 IRBuilder<> IRB(Pos); 1076 return IRB.CreateConstGEP2_64(ArrayType::get(DFS.ShadowTy, 64), 1077 getArgTLSPtr(), 0, Idx); 1078 } 1079 1080 Value *DFSanFunction::getShadow(Value *V) { 1081 if (!isa<Argument>(V) && !isa<Instruction>(V)) 1082 return DFS.ZeroShadow; 1083 Value *&Shadow = ValShadowMap[V]; 1084 if (!Shadow) { 1085 if (Argument *A = dyn_cast<Argument>(V)) { 1086 if (IsNativeABI) 1087 return DFS.ZeroShadow; 1088 switch (IA) { 1089 case DataFlowSanitizer::IA_TLS: { 1090 Value *ArgTLSPtr = getArgTLSPtr(); 1091 Instruction *ArgTLSPos = 1092 DFS.ArgTLS ? &*F->getEntryBlock().begin() 1093 : cast<Instruction>(ArgTLSPtr)->getNextNode(); 1094 IRBuilder<> IRB(ArgTLSPos); 1095 Shadow = 1096 IRB.CreateLoad(DFS.ShadowTy, getArgTLS(A->getArgNo(), ArgTLSPos)); 1097 break; 1098 } 1099 case DataFlowSanitizer::IA_Args: { 1100 unsigned ArgIdx = A->getArgNo() + F->arg_size() / 2; 1101 Function::arg_iterator i = F->arg_begin(); 1102 while (ArgIdx--) 1103 ++i; 1104 Shadow = &*i; 1105 assert(Shadow->getType() == DFS.ShadowTy); 1106 break; 1107 } 1108 } 1109 NonZeroChecks.push_back(Shadow); 1110 } else { 1111 Shadow = DFS.ZeroShadow; 1112 } 1113 } 1114 return Shadow; 1115 } 1116 1117 void DFSanFunction::setShadow(Instruction *I, Value *Shadow) { 1118 assert(!ValShadowMap.count(I)); 1119 assert(Shadow->getType() == DFS.ShadowTy); 1120 ValShadowMap[I] = Shadow; 1121 } 1122 1123 Value *DataFlowSanitizer::getShadowAddress(Value *Addr, Instruction *Pos) { 1124 assert(Addr != RetvalTLS && "Reinstrumenting?"); 1125 IRBuilder<> IRB(Pos); 1126 Value *ShadowPtrMaskValue; 1127 if (DFSanRuntimeShadowMask) 1128 ShadowPtrMaskValue = IRB.CreateLoad(IntptrTy, ExternalShadowMask); 1129 else 1130 ShadowPtrMaskValue = ShadowPtrMask; 1131 return IRB.CreateIntToPtr( 1132 IRB.CreateMul( 1133 IRB.CreateAnd(IRB.CreatePtrToInt(Addr, IntptrTy), 1134 IRB.CreatePtrToInt(ShadowPtrMaskValue, IntptrTy)), 1135 ShadowPtrMul), 1136 ShadowPtrTy); 1137 } 1138 1139 // Generates IR to compute the union of the two given shadows, inserting it 1140 // before Pos. Returns the computed union Value. 1141 Value *DFSanFunction::combineShadows(Value *V1, Value *V2, Instruction *Pos) { 1142 if (V1 == DFS.ZeroShadow) 1143 return V2; 1144 if (V2 == DFS.ZeroShadow) 1145 return V1; 1146 if (V1 == V2) 1147 return V1; 1148 1149 auto V1Elems = ShadowElements.find(V1); 1150 auto V2Elems = ShadowElements.find(V2); 1151 if (V1Elems != ShadowElements.end() && V2Elems != ShadowElements.end()) { 1152 if (std::includes(V1Elems->second.begin(), V1Elems->second.end(), 1153 V2Elems->second.begin(), V2Elems->second.end())) { 1154 return V1; 1155 } else if (std::includes(V2Elems->second.begin(), V2Elems->second.end(), 1156 V1Elems->second.begin(), V1Elems->second.end())) { 1157 return V2; 1158 } 1159 } else if (V1Elems != ShadowElements.end()) { 1160 if (V1Elems->second.count(V2)) 1161 return V1; 1162 } else if (V2Elems != ShadowElements.end()) { 1163 if (V2Elems->second.count(V1)) 1164 return V2; 1165 } 1166 1167 auto Key = std::make_pair(V1, V2); 1168 if (V1 > V2) 1169 std::swap(Key.first, Key.second); 1170 CachedCombinedShadow &CCS = CachedCombinedShadows[Key]; 1171 if (CCS.Block && DT.dominates(CCS.Block, Pos->getParent())) 1172 return CCS.Shadow; 1173 1174 IRBuilder<> IRB(Pos); 1175 if (ClFast16Labels) { 1176 CCS.Block = Pos->getParent(); 1177 CCS.Shadow = IRB.CreateOr(V1, V2); 1178 } else if (AvoidNewBlocks) { 1179 CallInst *Call = IRB.CreateCall(DFS.DFSanCheckedUnionFn, {V1, V2}); 1180 Call->addAttribute(AttributeList::ReturnIndex, Attribute::ZExt); 1181 Call->addParamAttr(0, Attribute::ZExt); 1182 Call->addParamAttr(1, Attribute::ZExt); 1183 1184 CCS.Block = Pos->getParent(); 1185 CCS.Shadow = Call; 1186 } else { 1187 BasicBlock *Head = Pos->getParent(); 1188 Value *Ne = IRB.CreateICmpNE(V1, V2); 1189 BranchInst *BI = cast<BranchInst>(SplitBlockAndInsertIfThen( 1190 Ne, Pos, /*Unreachable=*/false, DFS.ColdCallWeights, &DT)); 1191 IRBuilder<> ThenIRB(BI); 1192 CallInst *Call = ThenIRB.CreateCall(DFS.DFSanUnionFn, {V1, V2}); 1193 Call->addAttribute(AttributeList::ReturnIndex, Attribute::ZExt); 1194 Call->addParamAttr(0, Attribute::ZExt); 1195 Call->addParamAttr(1, Attribute::ZExt); 1196 1197 BasicBlock *Tail = BI->getSuccessor(0); 1198 PHINode *Phi = PHINode::Create(DFS.ShadowTy, 2, "", &Tail->front()); 1199 Phi->addIncoming(Call, Call->getParent()); 1200 Phi->addIncoming(V1, Head); 1201 1202 CCS.Block = Tail; 1203 CCS.Shadow = Phi; 1204 } 1205 1206 std::set<Value *> UnionElems; 1207 if (V1Elems != ShadowElements.end()) { 1208 UnionElems = V1Elems->second; 1209 } else { 1210 UnionElems.insert(V1); 1211 } 1212 if (V2Elems != ShadowElements.end()) { 1213 UnionElems.insert(V2Elems->second.begin(), V2Elems->second.end()); 1214 } else { 1215 UnionElems.insert(V2); 1216 } 1217 ShadowElements[CCS.Shadow] = std::move(UnionElems); 1218 1219 return CCS.Shadow; 1220 } 1221 1222 // A convenience function which folds the shadows of each of the operands 1223 // of the provided instruction Inst, inserting the IR before Inst. Returns 1224 // the computed union Value. 1225 Value *DFSanFunction::combineOperandShadows(Instruction *Inst) { 1226 if (Inst->getNumOperands() == 0) 1227 return DFS.ZeroShadow; 1228 1229 Value *Shadow = getShadow(Inst->getOperand(0)); 1230 for (unsigned i = 1, n = Inst->getNumOperands(); i != n; ++i) { 1231 Shadow = combineShadows(Shadow, getShadow(Inst->getOperand(i)), Inst); 1232 } 1233 return Shadow; 1234 } 1235 1236 Value *DFSanVisitor::visitOperandShadowInst(Instruction &I) { 1237 Value *CombinedShadow = DFSF.combineOperandShadows(&I); 1238 DFSF.setShadow(&I, CombinedShadow); 1239 return CombinedShadow; 1240 } 1241 1242 // Generates IR to load shadow corresponding to bytes [Addr, Addr+Size), where 1243 // Addr has alignment Align, and take the union of each of those shadows. 1244 Value *DFSanFunction::loadShadow(Value *Addr, uint64_t Size, uint64_t Align, 1245 Instruction *Pos) { 1246 if (AllocaInst *AI = dyn_cast<AllocaInst>(Addr)) { 1247 const auto i = AllocaShadowMap.find(AI); 1248 if (i != AllocaShadowMap.end()) { 1249 IRBuilder<> IRB(Pos); 1250 return IRB.CreateLoad(DFS.ShadowTy, i->second); 1251 } 1252 } 1253 1254 const llvm::Align ShadowAlign(Align * DFS.ShadowWidthBytes); 1255 SmallVector<const Value *, 2> Objs; 1256 getUnderlyingObjects(Addr, Objs); 1257 bool AllConstants = true; 1258 for (const Value *Obj : Objs) { 1259 if (isa<Function>(Obj) || isa<BlockAddress>(Obj)) 1260 continue; 1261 if (isa<GlobalVariable>(Obj) && cast<GlobalVariable>(Obj)->isConstant()) 1262 continue; 1263 1264 AllConstants = false; 1265 break; 1266 } 1267 if (AllConstants) 1268 return DFS.ZeroShadow; 1269 1270 Value *ShadowAddr = DFS.getShadowAddress(Addr, Pos); 1271 switch (Size) { 1272 case 0: 1273 return DFS.ZeroShadow; 1274 case 1: { 1275 LoadInst *LI = new LoadInst(DFS.ShadowTy, ShadowAddr, "", Pos); 1276 LI->setAlignment(ShadowAlign); 1277 return LI; 1278 } 1279 case 2: { 1280 IRBuilder<> IRB(Pos); 1281 Value *ShadowAddr1 = IRB.CreateGEP(DFS.ShadowTy, ShadowAddr, 1282 ConstantInt::get(DFS.IntptrTy, 1)); 1283 return combineShadows( 1284 IRB.CreateAlignedLoad(DFS.ShadowTy, ShadowAddr, ShadowAlign), 1285 IRB.CreateAlignedLoad(DFS.ShadowTy, ShadowAddr1, ShadowAlign), Pos); 1286 } 1287 } 1288 1289 if (ClFast16Labels && Size % (64 / DFS.ShadowWidthBits) == 0) { 1290 // First OR all the WideShadows, then OR individual shadows within the 1291 // combined WideShadow. This is fewer instructions than ORing shadows 1292 // individually. 1293 IRBuilder<> IRB(Pos); 1294 Value *WideAddr = 1295 IRB.CreateBitCast(ShadowAddr, Type::getInt64PtrTy(*DFS.Ctx)); 1296 Value *CombinedWideShadow = 1297 IRB.CreateAlignedLoad(IRB.getInt64Ty(), WideAddr, ShadowAlign); 1298 for (uint64_t Ofs = 64 / DFS.ShadowWidthBits; Ofs != Size; 1299 Ofs += 64 / DFS.ShadowWidthBits) { 1300 WideAddr = IRB.CreateGEP(Type::getInt64Ty(*DFS.Ctx), WideAddr, 1301 ConstantInt::get(DFS.IntptrTy, 1)); 1302 Value *NextWideShadow = 1303 IRB.CreateAlignedLoad(IRB.getInt64Ty(), WideAddr, ShadowAlign); 1304 CombinedWideShadow = IRB.CreateOr(CombinedWideShadow, NextWideShadow); 1305 } 1306 for (unsigned Width = 32; Width >= DFS.ShadowWidthBits; Width >>= 1) { 1307 Value *ShrShadow = IRB.CreateLShr(CombinedWideShadow, Width); 1308 CombinedWideShadow = IRB.CreateOr(CombinedWideShadow, ShrShadow); 1309 } 1310 return IRB.CreateTrunc(CombinedWideShadow, DFS.ShadowTy); 1311 } 1312 if (!AvoidNewBlocks && Size % (64 / DFS.ShadowWidthBits) == 0) { 1313 // Fast path for the common case where each byte has identical shadow: load 1314 // shadow 64 bits at a time, fall out to a __dfsan_union_load call if any 1315 // shadow is non-equal. 1316 BasicBlock *FallbackBB = BasicBlock::Create(*DFS.Ctx, "", F); 1317 IRBuilder<> FallbackIRB(FallbackBB); 1318 CallInst *FallbackCall = FallbackIRB.CreateCall( 1319 DFS.DFSanUnionLoadFn, 1320 {ShadowAddr, ConstantInt::get(DFS.IntptrTy, Size)}); 1321 FallbackCall->addAttribute(AttributeList::ReturnIndex, Attribute::ZExt); 1322 1323 // Compare each of the shadows stored in the loaded 64 bits to each other, 1324 // by computing (WideShadow rotl ShadowWidthBits) == WideShadow. 1325 IRBuilder<> IRB(Pos); 1326 Value *WideAddr = 1327 IRB.CreateBitCast(ShadowAddr, Type::getInt64PtrTy(*DFS.Ctx)); 1328 Value *WideShadow = 1329 IRB.CreateAlignedLoad(IRB.getInt64Ty(), WideAddr, ShadowAlign); 1330 Value *TruncShadow = IRB.CreateTrunc(WideShadow, DFS.ShadowTy); 1331 Value *ShlShadow = IRB.CreateShl(WideShadow, DFS.ShadowWidthBits); 1332 Value *ShrShadow = IRB.CreateLShr(WideShadow, 64 - DFS.ShadowWidthBits); 1333 Value *RotShadow = IRB.CreateOr(ShlShadow, ShrShadow); 1334 Value *ShadowsEq = IRB.CreateICmpEQ(WideShadow, RotShadow); 1335 1336 BasicBlock *Head = Pos->getParent(); 1337 BasicBlock *Tail = Head->splitBasicBlock(Pos->getIterator()); 1338 1339 if (DomTreeNode *OldNode = DT.getNode(Head)) { 1340 std::vector<DomTreeNode *> Children(OldNode->begin(), OldNode->end()); 1341 1342 DomTreeNode *NewNode = DT.addNewBlock(Tail, Head); 1343 for (auto Child : Children) 1344 DT.changeImmediateDominator(Child, NewNode); 1345 } 1346 1347 // In the following code LastBr will refer to the previous basic block's 1348 // conditional branch instruction, whose true successor is fixed up to point 1349 // to the next block during the loop below or to the tail after the final 1350 // iteration. 1351 BranchInst *LastBr = BranchInst::Create(FallbackBB, FallbackBB, ShadowsEq); 1352 ReplaceInstWithInst(Head->getTerminator(), LastBr); 1353 DT.addNewBlock(FallbackBB, Head); 1354 1355 for (uint64_t Ofs = 64 / DFS.ShadowWidthBits; Ofs != Size; 1356 Ofs += 64 / DFS.ShadowWidthBits) { 1357 BasicBlock *NextBB = BasicBlock::Create(*DFS.Ctx, "", F); 1358 DT.addNewBlock(NextBB, LastBr->getParent()); 1359 IRBuilder<> NextIRB(NextBB); 1360 WideAddr = NextIRB.CreateGEP(Type::getInt64Ty(*DFS.Ctx), WideAddr, 1361 ConstantInt::get(DFS.IntptrTy, 1)); 1362 Value *NextWideShadow = NextIRB.CreateAlignedLoad(NextIRB.getInt64Ty(), 1363 WideAddr, ShadowAlign); 1364 ShadowsEq = NextIRB.CreateICmpEQ(WideShadow, NextWideShadow); 1365 LastBr->setSuccessor(0, NextBB); 1366 LastBr = NextIRB.CreateCondBr(ShadowsEq, FallbackBB, FallbackBB); 1367 } 1368 1369 LastBr->setSuccessor(0, Tail); 1370 FallbackIRB.CreateBr(Tail); 1371 PHINode *Shadow = PHINode::Create(DFS.ShadowTy, 2, "", &Tail->front()); 1372 Shadow->addIncoming(FallbackCall, FallbackBB); 1373 Shadow->addIncoming(TruncShadow, LastBr->getParent()); 1374 return Shadow; 1375 } 1376 1377 IRBuilder<> IRB(Pos); 1378 FunctionCallee &UnionLoadFn = 1379 ClFast16Labels ? DFS.DFSanUnionLoadFast16LabelsFn : DFS.DFSanUnionLoadFn; 1380 CallInst *FallbackCall = IRB.CreateCall( 1381 UnionLoadFn, {ShadowAddr, ConstantInt::get(DFS.IntptrTy, Size)}); 1382 FallbackCall->addAttribute(AttributeList::ReturnIndex, Attribute::ZExt); 1383 return FallbackCall; 1384 } 1385 1386 void DFSanVisitor::visitLoadInst(LoadInst &LI) { 1387 auto &DL = LI.getModule()->getDataLayout(); 1388 uint64_t Size = DL.getTypeStoreSize(LI.getType()); 1389 if (Size == 0) { 1390 DFSF.setShadow(&LI, DFSF.DFS.ZeroShadow); 1391 return; 1392 } 1393 1394 Align Alignment = ClPreserveAlignment ? LI.getAlign() : Align(1); 1395 Value *Shadow = 1396 DFSF.loadShadow(LI.getPointerOperand(), Size, Alignment.value(), &LI); 1397 if (ClCombinePointerLabelsOnLoad) { 1398 Value *PtrShadow = DFSF.getShadow(LI.getPointerOperand()); 1399 Shadow = DFSF.combineShadows(Shadow, PtrShadow, &LI); 1400 } 1401 if (Shadow != DFSF.DFS.ZeroShadow) 1402 DFSF.NonZeroChecks.push_back(Shadow); 1403 1404 DFSF.setShadow(&LI, Shadow); 1405 if (ClEventCallbacks) { 1406 IRBuilder<> IRB(&LI); 1407 IRB.CreateCall(DFSF.DFS.DFSanLoadCallbackFn, Shadow); 1408 } 1409 } 1410 1411 void DFSanFunction::storeShadow(Value *Addr, uint64_t Size, Align Alignment, 1412 Value *Shadow, Instruction *Pos) { 1413 if (AllocaInst *AI = dyn_cast<AllocaInst>(Addr)) { 1414 const auto i = AllocaShadowMap.find(AI); 1415 if (i != AllocaShadowMap.end()) { 1416 IRBuilder<> IRB(Pos); 1417 IRB.CreateStore(Shadow, i->second); 1418 return; 1419 } 1420 } 1421 1422 const Align ShadowAlign(Alignment.value() * DFS.ShadowWidthBytes); 1423 IRBuilder<> IRB(Pos); 1424 Value *ShadowAddr = DFS.getShadowAddress(Addr, Pos); 1425 if (Shadow == DFS.ZeroShadow) { 1426 IntegerType *ShadowTy = 1427 IntegerType::get(*DFS.Ctx, Size * DFS.ShadowWidthBits); 1428 Value *ExtZeroShadow = ConstantInt::get(ShadowTy, 0); 1429 Value *ExtShadowAddr = 1430 IRB.CreateBitCast(ShadowAddr, PointerType::getUnqual(ShadowTy)); 1431 IRB.CreateAlignedStore(ExtZeroShadow, ExtShadowAddr, ShadowAlign); 1432 return; 1433 } 1434 1435 const unsigned ShadowVecSize = 128 / DFS.ShadowWidthBits; 1436 uint64_t Offset = 0; 1437 if (Size >= ShadowVecSize) { 1438 auto *ShadowVecTy = FixedVectorType::get(DFS.ShadowTy, ShadowVecSize); 1439 Value *ShadowVec = UndefValue::get(ShadowVecTy); 1440 for (unsigned i = 0; i != ShadowVecSize; ++i) { 1441 ShadowVec = IRB.CreateInsertElement( 1442 ShadowVec, Shadow, ConstantInt::get(Type::getInt32Ty(*DFS.Ctx), i)); 1443 } 1444 Value *ShadowVecAddr = 1445 IRB.CreateBitCast(ShadowAddr, PointerType::getUnqual(ShadowVecTy)); 1446 do { 1447 Value *CurShadowVecAddr = 1448 IRB.CreateConstGEP1_32(ShadowVecTy, ShadowVecAddr, Offset); 1449 IRB.CreateAlignedStore(ShadowVec, CurShadowVecAddr, ShadowAlign); 1450 Size -= ShadowVecSize; 1451 ++Offset; 1452 } while (Size >= ShadowVecSize); 1453 Offset *= ShadowVecSize; 1454 } 1455 while (Size > 0) { 1456 Value *CurShadowAddr = 1457 IRB.CreateConstGEP1_32(DFS.ShadowTy, ShadowAddr, Offset); 1458 IRB.CreateAlignedStore(Shadow, CurShadowAddr, ShadowAlign); 1459 --Size; 1460 ++Offset; 1461 } 1462 } 1463 1464 void DFSanVisitor::visitStoreInst(StoreInst &SI) { 1465 auto &DL = SI.getModule()->getDataLayout(); 1466 uint64_t Size = DL.getTypeStoreSize(SI.getValueOperand()->getType()); 1467 if (Size == 0) 1468 return; 1469 1470 const Align Alignment = ClPreserveAlignment ? SI.getAlign() : Align(1); 1471 1472 Value* Shadow = DFSF.getShadow(SI.getValueOperand()); 1473 if (ClCombinePointerLabelsOnStore) { 1474 Value *PtrShadow = DFSF.getShadow(SI.getPointerOperand()); 1475 Shadow = DFSF.combineShadows(Shadow, PtrShadow, &SI); 1476 } 1477 DFSF.storeShadow(SI.getPointerOperand(), Size, Alignment, Shadow, &SI); 1478 if (ClEventCallbacks) { 1479 IRBuilder<> IRB(&SI); 1480 IRB.CreateCall(DFSF.DFS.DFSanStoreCallbackFn, Shadow); 1481 } 1482 } 1483 1484 void DFSanVisitor::visitUnaryOperator(UnaryOperator &UO) { 1485 visitOperandShadowInst(UO); 1486 } 1487 1488 void DFSanVisitor::visitBinaryOperator(BinaryOperator &BO) { 1489 visitOperandShadowInst(BO); 1490 } 1491 1492 void DFSanVisitor::visitCastInst(CastInst &CI) { visitOperandShadowInst(CI); } 1493 1494 void DFSanVisitor::visitCmpInst(CmpInst &CI) { 1495 Value *CombinedShadow = visitOperandShadowInst(CI); 1496 if (ClEventCallbacks) { 1497 IRBuilder<> IRB(&CI); 1498 IRB.CreateCall(DFSF.DFS.DFSanCmpCallbackFn, CombinedShadow); 1499 } 1500 } 1501 1502 void DFSanVisitor::visitGetElementPtrInst(GetElementPtrInst &GEPI) { 1503 visitOperandShadowInst(GEPI); 1504 } 1505 1506 void DFSanVisitor::visitExtractElementInst(ExtractElementInst &I) { 1507 visitOperandShadowInst(I); 1508 } 1509 1510 void DFSanVisitor::visitInsertElementInst(InsertElementInst &I) { 1511 visitOperandShadowInst(I); 1512 } 1513 1514 void DFSanVisitor::visitShuffleVectorInst(ShuffleVectorInst &I) { 1515 visitOperandShadowInst(I); 1516 } 1517 1518 void DFSanVisitor::visitExtractValueInst(ExtractValueInst &I) { 1519 visitOperandShadowInst(I); 1520 } 1521 1522 void DFSanVisitor::visitInsertValueInst(InsertValueInst &I) { 1523 visitOperandShadowInst(I); 1524 } 1525 1526 void DFSanVisitor::visitAllocaInst(AllocaInst &I) { 1527 bool AllLoadsStores = true; 1528 for (User *U : I.users()) { 1529 if (isa<LoadInst>(U)) 1530 continue; 1531 1532 if (StoreInst *SI = dyn_cast<StoreInst>(U)) { 1533 if (SI->getPointerOperand() == &I) 1534 continue; 1535 } 1536 1537 AllLoadsStores = false; 1538 break; 1539 } 1540 if (AllLoadsStores) { 1541 IRBuilder<> IRB(&I); 1542 DFSF.AllocaShadowMap[&I] = IRB.CreateAlloca(DFSF.DFS.ShadowTy); 1543 } 1544 DFSF.setShadow(&I, DFSF.DFS.ZeroShadow); 1545 } 1546 1547 void DFSanVisitor::visitSelectInst(SelectInst &I) { 1548 Value *CondShadow = DFSF.getShadow(I.getCondition()); 1549 Value *TrueShadow = DFSF.getShadow(I.getTrueValue()); 1550 Value *FalseShadow = DFSF.getShadow(I.getFalseValue()); 1551 Value *ShadowSel = nullptr; 1552 1553 if (isa<VectorType>(I.getCondition()->getType())) { 1554 ShadowSel = DFSF.combineShadows(TrueShadow, FalseShadow, &I); 1555 } else { 1556 if (TrueShadow == FalseShadow) { 1557 ShadowSel = TrueShadow; 1558 } else { 1559 ShadowSel = 1560 SelectInst::Create(I.getCondition(), TrueShadow, FalseShadow, "", &I); 1561 } 1562 } 1563 DFSF.setShadow(&I, ClTrackSelectControlFlow 1564 ? DFSF.combineShadows(CondShadow, ShadowSel, &I) 1565 : ShadowSel); 1566 } 1567 1568 void DFSanVisitor::visitMemSetInst(MemSetInst &I) { 1569 IRBuilder<> IRB(&I); 1570 Value *ValShadow = DFSF.getShadow(I.getValue()); 1571 IRB.CreateCall(DFSF.DFS.DFSanSetLabelFn, 1572 {ValShadow, IRB.CreateBitCast(I.getDest(), Type::getInt8PtrTy( 1573 *DFSF.DFS.Ctx)), 1574 IRB.CreateZExtOrTrunc(I.getLength(), DFSF.DFS.IntptrTy)}); 1575 } 1576 1577 void DFSanVisitor::visitMemTransferInst(MemTransferInst &I) { 1578 IRBuilder<> IRB(&I); 1579 Value *RawDestShadow = DFSF.DFS.getShadowAddress(I.getDest(), &I); 1580 Value *SrcShadow = DFSF.DFS.getShadowAddress(I.getSource(), &I); 1581 Value *LenShadow = 1582 IRB.CreateMul(I.getLength(), ConstantInt::get(I.getLength()->getType(), 1583 DFSF.DFS.ShadowWidthBytes)); 1584 Type *Int8Ptr = Type::getInt8PtrTy(*DFSF.DFS.Ctx); 1585 Value *DestShadow = IRB.CreateBitCast(RawDestShadow, Int8Ptr); 1586 SrcShadow = IRB.CreateBitCast(SrcShadow, Int8Ptr); 1587 auto *MTI = cast<MemTransferInst>( 1588 IRB.CreateCall(I.getFunctionType(), I.getCalledOperand(), 1589 {DestShadow, SrcShadow, LenShadow, I.getVolatileCst()})); 1590 if (ClPreserveAlignment) { 1591 MTI->setDestAlignment(I.getDestAlign() * DFSF.DFS.ShadowWidthBytes); 1592 MTI->setSourceAlignment(I.getSourceAlign() * DFSF.DFS.ShadowWidthBytes); 1593 } else { 1594 MTI->setDestAlignment(Align(DFSF.DFS.ShadowWidthBytes)); 1595 MTI->setSourceAlignment(Align(DFSF.DFS.ShadowWidthBytes)); 1596 } 1597 if (ClEventCallbacks) { 1598 IRB.CreateCall(DFSF.DFS.DFSanMemTransferCallbackFn, 1599 {RawDestShadow, I.getLength()}); 1600 } 1601 } 1602 1603 void DFSanVisitor::visitReturnInst(ReturnInst &RI) { 1604 if (!DFSF.IsNativeABI && RI.getReturnValue()) { 1605 switch (DFSF.IA) { 1606 case DataFlowSanitizer::IA_TLS: { 1607 Value *S = DFSF.getShadow(RI.getReturnValue()); 1608 IRBuilder<> IRB(&RI); 1609 IRB.CreateStore(S, DFSF.getRetvalTLS()); 1610 break; 1611 } 1612 case DataFlowSanitizer::IA_Args: { 1613 IRBuilder<> IRB(&RI); 1614 Type *RT = DFSF.F->getFunctionType()->getReturnType(); 1615 Value *InsVal = 1616 IRB.CreateInsertValue(UndefValue::get(RT), RI.getReturnValue(), 0); 1617 Value *InsShadow = 1618 IRB.CreateInsertValue(InsVal, DFSF.getShadow(RI.getReturnValue()), 1); 1619 RI.setOperand(0, InsShadow); 1620 break; 1621 } 1622 } 1623 } 1624 } 1625 1626 void DFSanVisitor::visitCallBase(CallBase &CB) { 1627 Function *F = CB.getCalledFunction(); 1628 if ((F && F->isIntrinsic()) || CB.isInlineAsm()) { 1629 visitOperandShadowInst(CB); 1630 return; 1631 } 1632 1633 // Calls to this function are synthesized in wrappers, and we shouldn't 1634 // instrument them. 1635 if (F == DFSF.DFS.DFSanVarargWrapperFn.getCallee()->stripPointerCasts()) 1636 return; 1637 1638 IRBuilder<> IRB(&CB); 1639 1640 DenseMap<Value *, Function *>::iterator i = 1641 DFSF.DFS.UnwrappedFnMap.find(CB.getCalledOperand()); 1642 if (i != DFSF.DFS.UnwrappedFnMap.end()) { 1643 Function *F = i->second; 1644 switch (DFSF.DFS.getWrapperKind(F)) { 1645 case DataFlowSanitizer::WK_Warning: 1646 CB.setCalledFunction(F); 1647 IRB.CreateCall(DFSF.DFS.DFSanUnimplementedFn, 1648 IRB.CreateGlobalStringPtr(F->getName())); 1649 DFSF.setShadow(&CB, DFSF.DFS.ZeroShadow); 1650 return; 1651 case DataFlowSanitizer::WK_Discard: 1652 CB.setCalledFunction(F); 1653 DFSF.setShadow(&CB, DFSF.DFS.ZeroShadow); 1654 return; 1655 case DataFlowSanitizer::WK_Functional: 1656 CB.setCalledFunction(F); 1657 visitOperandShadowInst(CB); 1658 return; 1659 case DataFlowSanitizer::WK_Custom: 1660 // Don't try to handle invokes of custom functions, it's too complicated. 1661 // Instead, invoke the dfsw$ wrapper, which will in turn call the __dfsw_ 1662 // wrapper. 1663 if (CallInst *CI = dyn_cast<CallInst>(&CB)) { 1664 FunctionType *FT = F->getFunctionType(); 1665 TransformedFunction CustomFn = DFSF.DFS.getCustomFunctionType(FT); 1666 std::string CustomFName = "__dfsw_"; 1667 CustomFName += F->getName(); 1668 FunctionCallee CustomF = DFSF.DFS.Mod->getOrInsertFunction( 1669 CustomFName, CustomFn.TransformedType); 1670 if (Function *CustomFn = dyn_cast<Function>(CustomF.getCallee())) { 1671 CustomFn->copyAttributesFrom(F); 1672 1673 // Custom functions returning non-void will write to the return label. 1674 if (!FT->getReturnType()->isVoidTy()) { 1675 CustomFn->removeAttributes(AttributeList::FunctionIndex, 1676 DFSF.DFS.ReadOnlyNoneAttrs); 1677 } 1678 } 1679 1680 std::vector<Value *> Args; 1681 1682 auto i = CB.arg_begin(); 1683 for (unsigned n = FT->getNumParams(); n != 0; ++i, --n) { 1684 Type *T = (*i)->getType(); 1685 FunctionType *ParamFT; 1686 if (isa<PointerType>(T) && 1687 (ParamFT = dyn_cast<FunctionType>( 1688 cast<PointerType>(T)->getElementType()))) { 1689 std::string TName = "dfst"; 1690 TName += utostr(FT->getNumParams() - n); 1691 TName += "$"; 1692 TName += F->getName(); 1693 Constant *T = DFSF.DFS.getOrBuildTrampolineFunction(ParamFT, TName); 1694 Args.push_back(T); 1695 Args.push_back( 1696 IRB.CreateBitCast(*i, Type::getInt8PtrTy(*DFSF.DFS.Ctx))); 1697 } else { 1698 Args.push_back(*i); 1699 } 1700 } 1701 1702 i = CB.arg_begin(); 1703 const unsigned ShadowArgStart = Args.size(); 1704 for (unsigned n = FT->getNumParams(); n != 0; ++i, --n) 1705 Args.push_back(DFSF.getShadow(*i)); 1706 1707 if (FT->isVarArg()) { 1708 auto *LabelVATy = ArrayType::get(DFSF.DFS.ShadowTy, 1709 CB.arg_size() - FT->getNumParams()); 1710 auto *LabelVAAlloca = new AllocaInst( 1711 LabelVATy, getDataLayout().getAllocaAddrSpace(), 1712 "labelva", &DFSF.F->getEntryBlock().front()); 1713 1714 for (unsigned n = 0; i != CB.arg_end(); ++i, ++n) { 1715 auto LabelVAPtr = IRB.CreateStructGEP(LabelVATy, LabelVAAlloca, n); 1716 IRB.CreateStore(DFSF.getShadow(*i), LabelVAPtr); 1717 } 1718 1719 Args.push_back(IRB.CreateStructGEP(LabelVATy, LabelVAAlloca, 0)); 1720 } 1721 1722 if (!FT->getReturnType()->isVoidTy()) { 1723 if (!DFSF.LabelReturnAlloca) { 1724 DFSF.LabelReturnAlloca = 1725 new AllocaInst(DFSF.DFS.ShadowTy, 1726 getDataLayout().getAllocaAddrSpace(), 1727 "labelreturn", &DFSF.F->getEntryBlock().front()); 1728 } 1729 Args.push_back(DFSF.LabelReturnAlloca); 1730 } 1731 1732 for (i = CB.arg_begin() + FT->getNumParams(); i != CB.arg_end(); ++i) 1733 Args.push_back(*i); 1734 1735 CallInst *CustomCI = IRB.CreateCall(CustomF, Args); 1736 CustomCI->setCallingConv(CI->getCallingConv()); 1737 CustomCI->setAttributes(TransformFunctionAttributes(CustomFn, 1738 CI->getContext(), CI->getAttributes())); 1739 1740 // Update the parameter attributes of the custom call instruction to 1741 // zero extend the shadow parameters. This is required for targets 1742 // which consider ShadowTy an illegal type. 1743 for (unsigned n = 0; n < FT->getNumParams(); n++) { 1744 const unsigned ArgNo = ShadowArgStart + n; 1745 if (CustomCI->getArgOperand(ArgNo)->getType() == DFSF.DFS.ShadowTy) 1746 CustomCI->addParamAttr(ArgNo, Attribute::ZExt); 1747 } 1748 1749 if (!FT->getReturnType()->isVoidTy()) { 1750 LoadInst *LabelLoad = 1751 IRB.CreateLoad(DFSF.DFS.ShadowTy, DFSF.LabelReturnAlloca); 1752 DFSF.setShadow(CustomCI, LabelLoad); 1753 } 1754 1755 CI->replaceAllUsesWith(CustomCI); 1756 CI->eraseFromParent(); 1757 return; 1758 } 1759 break; 1760 } 1761 } 1762 1763 FunctionType *FT = CB.getFunctionType(); 1764 if (DFSF.DFS.getInstrumentedABI() == DataFlowSanitizer::IA_TLS) { 1765 for (unsigned i = 0, n = FT->getNumParams(); i != n; ++i) { 1766 IRB.CreateStore(DFSF.getShadow(CB.getArgOperand(i)), 1767 DFSF.getArgTLS(i, &CB)); 1768 } 1769 } 1770 1771 Instruction *Next = nullptr; 1772 if (!CB.getType()->isVoidTy()) { 1773 if (InvokeInst *II = dyn_cast<InvokeInst>(&CB)) { 1774 if (II->getNormalDest()->getSinglePredecessor()) { 1775 Next = &II->getNormalDest()->front(); 1776 } else { 1777 BasicBlock *NewBB = 1778 SplitEdge(II->getParent(), II->getNormalDest(), &DFSF.DT); 1779 Next = &NewBB->front(); 1780 } 1781 } else { 1782 assert(CB.getIterator() != CB.getParent()->end()); 1783 Next = CB.getNextNode(); 1784 } 1785 1786 if (DFSF.DFS.getInstrumentedABI() == DataFlowSanitizer::IA_TLS) { 1787 IRBuilder<> NextIRB(Next); 1788 LoadInst *LI = NextIRB.CreateLoad(DFSF.DFS.ShadowTy, DFSF.getRetvalTLS()); 1789 DFSF.SkipInsts.insert(LI); 1790 DFSF.setShadow(&CB, LI); 1791 DFSF.NonZeroChecks.push_back(LI); 1792 } 1793 } 1794 1795 // Do all instrumentation for IA_Args down here to defer tampering with the 1796 // CFG in a way that SplitEdge may be able to detect. 1797 if (DFSF.DFS.getInstrumentedABI() == DataFlowSanitizer::IA_Args) { 1798 FunctionType *NewFT = DFSF.DFS.getArgsFunctionType(FT); 1799 Value *Func = 1800 IRB.CreateBitCast(CB.getCalledOperand(), PointerType::getUnqual(NewFT)); 1801 std::vector<Value *> Args; 1802 1803 auto i = CB.arg_begin(), E = CB.arg_end(); 1804 for (unsigned n = FT->getNumParams(); n != 0; ++i, --n) 1805 Args.push_back(*i); 1806 1807 i = CB.arg_begin(); 1808 for (unsigned n = FT->getNumParams(); n != 0; ++i, --n) 1809 Args.push_back(DFSF.getShadow(*i)); 1810 1811 if (FT->isVarArg()) { 1812 unsigned VarArgSize = CB.arg_size() - FT->getNumParams(); 1813 ArrayType *VarArgArrayTy = ArrayType::get(DFSF.DFS.ShadowTy, VarArgSize); 1814 AllocaInst *VarArgShadow = 1815 new AllocaInst(VarArgArrayTy, getDataLayout().getAllocaAddrSpace(), 1816 "", &DFSF.F->getEntryBlock().front()); 1817 Args.push_back(IRB.CreateConstGEP2_32(VarArgArrayTy, VarArgShadow, 0, 0)); 1818 for (unsigned n = 0; i != E; ++i, ++n) { 1819 IRB.CreateStore( 1820 DFSF.getShadow(*i), 1821 IRB.CreateConstGEP2_32(VarArgArrayTy, VarArgShadow, 0, n)); 1822 Args.push_back(*i); 1823 } 1824 } 1825 1826 CallBase *NewCB; 1827 if (InvokeInst *II = dyn_cast<InvokeInst>(&CB)) { 1828 NewCB = IRB.CreateInvoke(NewFT, Func, II->getNormalDest(), 1829 II->getUnwindDest(), Args); 1830 } else { 1831 NewCB = IRB.CreateCall(NewFT, Func, Args); 1832 } 1833 NewCB->setCallingConv(CB.getCallingConv()); 1834 NewCB->setAttributes(CB.getAttributes().removeAttributes( 1835 *DFSF.DFS.Ctx, AttributeList::ReturnIndex, 1836 AttributeFuncs::typeIncompatible(NewCB->getType()))); 1837 1838 if (Next) { 1839 ExtractValueInst *ExVal = ExtractValueInst::Create(NewCB, 0, "", Next); 1840 DFSF.SkipInsts.insert(ExVal); 1841 ExtractValueInst *ExShadow = ExtractValueInst::Create(NewCB, 1, "", Next); 1842 DFSF.SkipInsts.insert(ExShadow); 1843 DFSF.setShadow(ExVal, ExShadow); 1844 DFSF.NonZeroChecks.push_back(ExShadow); 1845 1846 CB.replaceAllUsesWith(ExVal); 1847 } 1848 1849 CB.eraseFromParent(); 1850 } 1851 } 1852 1853 void DFSanVisitor::visitPHINode(PHINode &PN) { 1854 PHINode *ShadowPN = 1855 PHINode::Create(DFSF.DFS.ShadowTy, PN.getNumIncomingValues(), "", &PN); 1856 1857 // Give the shadow phi node valid predecessors to fool SplitEdge into working. 1858 Value *UndefShadow = UndefValue::get(DFSF.DFS.ShadowTy); 1859 for (PHINode::block_iterator i = PN.block_begin(), e = PN.block_end(); i != e; 1860 ++i) { 1861 ShadowPN->addIncoming(UndefShadow, *i); 1862 } 1863 1864 DFSF.PHIFixups.push_back(std::make_pair(&PN, ShadowPN)); 1865 DFSF.setShadow(&PN, ShadowPN); 1866 } 1867 1868 namespace { 1869 class DataFlowSanitizerLegacyPass : public ModulePass { 1870 private: 1871 std::vector<std::string> ABIListFiles; 1872 1873 public: 1874 static char ID; 1875 1876 DataFlowSanitizerLegacyPass( 1877 const std::vector<std::string> &ABIListFiles = std::vector<std::string>()) 1878 : ModulePass(ID), ABIListFiles(ABIListFiles) {} 1879 1880 bool runOnModule(Module &M) override { 1881 return DataFlowSanitizer(ABIListFiles).runImpl(M); 1882 } 1883 }; 1884 } // namespace 1885 1886 char DataFlowSanitizerLegacyPass::ID; 1887 1888 INITIALIZE_PASS(DataFlowSanitizerLegacyPass, "dfsan", 1889 "DataFlowSanitizer: dynamic data flow analysis.", false, false) 1890 1891 ModulePass *llvm::createDataFlowSanitizerLegacyPassPass( 1892 const std::vector<std::string> &ABIListFiles) { 1893 return new DataFlowSanitizerLegacyPass(ABIListFiles); 1894 } 1895 1896 PreservedAnalyses DataFlowSanitizerPass::run(Module &M, 1897 ModuleAnalysisManager &AM) { 1898 if (DataFlowSanitizer(ABIListFiles).runImpl(M)) { 1899 return PreservedAnalyses::none(); 1900 } 1901 return PreservedAnalyses::all(); 1902 } 1903