1 //===- DataFlowSanitizer.cpp - dynamic data flow analysis -----------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 /// \file 10 /// This file is a part of DataFlowSanitizer, a generalised dynamic data flow 11 /// analysis. 12 /// 13 /// Unlike other Sanitizer tools, this tool is not designed to detect a specific 14 /// class of bugs on its own. Instead, it provides a generic dynamic data flow 15 /// analysis framework to be used by clients to help detect application-specific 16 /// issues within their own code. 17 /// 18 /// The analysis is based on automatic propagation of data flow labels (also 19 /// known as taint labels) through a program as it performs computation. Each 20 /// byte of application memory is backed by two bytes of shadow memory which 21 /// hold the label. On Linux/x86_64, memory is laid out as follows: 22 /// 23 /// +--------------------+ 0x800000000000 (top of memory) 24 /// | application memory | 25 /// +--------------------+ 0x700000008000 (kAppAddr) 26 /// | | 27 /// | unused | 28 /// | | 29 /// +--------------------+ 0x200200000000 (kUnusedAddr) 30 /// | union table | 31 /// +--------------------+ 0x200000000000 (kUnionTableAddr) 32 /// | shadow memory | 33 /// +--------------------+ 0x000000010000 (kShadowAddr) 34 /// | reserved by kernel | 35 /// +--------------------+ 0x000000000000 36 /// 37 /// To derive a shadow memory address from an application memory address, 38 /// bits 44-46 are cleared to bring the address into the range 39 /// [0x000000008000,0x100000000000). Then the address is shifted left by 1 to 40 /// account for the double byte representation of shadow labels and move the 41 /// address into the shadow memory range. See the function 42 /// DataFlowSanitizer::getShadowAddress below. 43 /// 44 /// For more information, please refer to the design document: 45 /// http://clang.llvm.org/docs/DataFlowSanitizerDesign.html 46 // 47 //===----------------------------------------------------------------------===// 48 49 #include "llvm/Transforms/Instrumentation/DataFlowSanitizer.h" 50 #include "llvm/ADT/DenseMap.h" 51 #include "llvm/ADT/DenseSet.h" 52 #include "llvm/ADT/DepthFirstIterator.h" 53 #include "llvm/ADT/None.h" 54 #include "llvm/ADT/SmallPtrSet.h" 55 #include "llvm/ADT/SmallVector.h" 56 #include "llvm/ADT/StringExtras.h" 57 #include "llvm/ADT/StringRef.h" 58 #include "llvm/ADT/Triple.h" 59 #include "llvm/Analysis/ValueTracking.h" 60 #include "llvm/IR/Argument.h" 61 #include "llvm/IR/Attributes.h" 62 #include "llvm/IR/BasicBlock.h" 63 #include "llvm/IR/Constant.h" 64 #include "llvm/IR/Constants.h" 65 #include "llvm/IR/DataLayout.h" 66 #include "llvm/IR/DerivedTypes.h" 67 #include "llvm/IR/Dominators.h" 68 #include "llvm/IR/Function.h" 69 #include "llvm/IR/GlobalAlias.h" 70 #include "llvm/IR/GlobalValue.h" 71 #include "llvm/IR/GlobalVariable.h" 72 #include "llvm/IR/IRBuilder.h" 73 #include "llvm/IR/InlineAsm.h" 74 #include "llvm/IR/InstVisitor.h" 75 #include "llvm/IR/InstrTypes.h" 76 #include "llvm/IR/Instruction.h" 77 #include "llvm/IR/Instructions.h" 78 #include "llvm/IR/IntrinsicInst.h" 79 #include "llvm/IR/LLVMContext.h" 80 #include "llvm/IR/MDBuilder.h" 81 #include "llvm/IR/Module.h" 82 #include "llvm/IR/PassManager.h" 83 #include "llvm/IR/Type.h" 84 #include "llvm/IR/User.h" 85 #include "llvm/IR/Value.h" 86 #include "llvm/InitializePasses.h" 87 #include "llvm/Pass.h" 88 #include "llvm/Support/Casting.h" 89 #include "llvm/Support/CommandLine.h" 90 #include "llvm/Support/ErrorHandling.h" 91 #include "llvm/Support/SpecialCaseList.h" 92 #include "llvm/Support/VirtualFileSystem.h" 93 #include "llvm/Transforms/Instrumentation.h" 94 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 95 #include "llvm/Transforms/Utils/Local.h" 96 #include <algorithm> 97 #include <cassert> 98 #include <cstddef> 99 #include <cstdint> 100 #include <iterator> 101 #include <memory> 102 #include <set> 103 #include <string> 104 #include <utility> 105 #include <vector> 106 107 using namespace llvm; 108 109 // External symbol to be used when generating the shadow address for 110 // architectures with multiple VMAs. Instead of using a constant integer 111 // the runtime will set the external mask based on the VMA range. 112 static const char *const kDFSanExternShadowPtrMask = "__dfsan_shadow_ptr_mask"; 113 114 // The -dfsan-preserve-alignment flag controls whether this pass assumes that 115 // alignment requirements provided by the input IR are correct. For example, 116 // if the input IR contains a load with alignment 8, this flag will cause 117 // the shadow load to have alignment 16. This flag is disabled by default as 118 // we have unfortunately encountered too much code (including Clang itself; 119 // see PR14291) which performs misaligned access. 120 static cl::opt<bool> ClPreserveAlignment( 121 "dfsan-preserve-alignment", 122 cl::desc("respect alignment requirements provided by input IR"), cl::Hidden, 123 cl::init(false)); 124 125 // The ABI list files control how shadow parameters are passed. The pass treats 126 // every function labelled "uninstrumented" in the ABI list file as conforming 127 // to the "native" (i.e. unsanitized) ABI. Unless the ABI list contains 128 // additional annotations for those functions, a call to one of those functions 129 // will produce a warning message, as the labelling behaviour of the function is 130 // unknown. The other supported annotations are "functional" and "discard", 131 // which are described below under DataFlowSanitizer::WrapperKind. 132 static cl::list<std::string> ClABIListFiles( 133 "dfsan-abilist", 134 cl::desc("File listing native ABI functions and how the pass treats them"), 135 cl::Hidden); 136 137 // Controls whether the pass uses IA_Args or IA_TLS as the ABI for instrumented 138 // functions (see DataFlowSanitizer::InstrumentedABI below). 139 static cl::opt<bool> ClArgsABI( 140 "dfsan-args-abi", 141 cl::desc("Use the argument ABI rather than the TLS ABI"), 142 cl::Hidden); 143 144 // Controls whether the pass includes or ignores the labels of pointers in load 145 // instructions. 146 static cl::opt<bool> ClCombinePointerLabelsOnLoad( 147 "dfsan-combine-pointer-labels-on-load", 148 cl::desc("Combine the label of the pointer with the label of the data when " 149 "loading from memory."), 150 cl::Hidden, cl::init(true)); 151 152 // Controls whether the pass includes or ignores the labels of pointers in 153 // stores instructions. 154 static cl::opt<bool> ClCombinePointerLabelsOnStore( 155 "dfsan-combine-pointer-labels-on-store", 156 cl::desc("Combine the label of the pointer with the label of the data when " 157 "storing in memory."), 158 cl::Hidden, cl::init(false)); 159 160 static cl::opt<bool> ClDebugNonzeroLabels( 161 "dfsan-debug-nonzero-labels", 162 cl::desc("Insert calls to __dfsan_nonzero_label on observing a parameter, " 163 "load or return with a nonzero label"), 164 cl::Hidden); 165 166 // Experimental feature that inserts callbacks for certain data events. 167 // Currently callbacks are only inserted for loads, stores, memory transfers 168 // (i.e. memcpy and memmove), and comparisons. 169 // 170 // If this flag is set to true, the user must provide definitions for the 171 // following callback functions: 172 // void __dfsan_load_callback(dfsan_label Label); 173 // void __dfsan_store_callback(dfsan_label Label); 174 // void __dfsan_mem_transfer_callback(dfsan_label *Start, size_t Len); 175 // void __dfsan_cmp_callback(dfsan_label CombinedLabel); 176 static cl::opt<bool> ClEventCallbacks( 177 "dfsan-event-callbacks", 178 cl::desc("Insert calls to __dfsan_*_callback functions on data events."), 179 cl::Hidden, cl::init(false)); 180 181 // Use a distinct bit for each base label, enabling faster unions with less 182 // instrumentation. Limits the max number of base labels to 16. 183 static cl::opt<bool> ClFast16Labels( 184 "dfsan-fast-16-labels", 185 cl::desc("Use more efficient instrumentation, limiting the number of " 186 "labels to 16."), 187 cl::Hidden, cl::init(false)); 188 189 static StringRef GetGlobalTypeString(const GlobalValue &G) { 190 // Types of GlobalVariables are always pointer types. 191 Type *GType = G.getValueType(); 192 // For now we support excluding struct types only. 193 if (StructType *SGType = dyn_cast<StructType>(GType)) { 194 if (!SGType->isLiteral()) 195 return SGType->getName(); 196 } 197 return "<unknown type>"; 198 } 199 200 namespace { 201 202 class DFSanABIList { 203 std::unique_ptr<SpecialCaseList> SCL; 204 205 public: 206 DFSanABIList() = default; 207 208 void set(std::unique_ptr<SpecialCaseList> List) { SCL = std::move(List); } 209 210 /// Returns whether either this function or its source file are listed in the 211 /// given category. 212 bool isIn(const Function &F, StringRef Category) const { 213 return isIn(*F.getParent(), Category) || 214 SCL->inSection("dataflow", "fun", F.getName(), Category); 215 } 216 217 /// Returns whether this global alias is listed in the given category. 218 /// 219 /// If GA aliases a function, the alias's name is matched as a function name 220 /// would be. Similarly, aliases of globals are matched like globals. 221 bool isIn(const GlobalAlias &GA, StringRef Category) const { 222 if (isIn(*GA.getParent(), Category)) 223 return true; 224 225 if (isa<FunctionType>(GA.getValueType())) 226 return SCL->inSection("dataflow", "fun", GA.getName(), Category); 227 228 return SCL->inSection("dataflow", "global", GA.getName(), Category) || 229 SCL->inSection("dataflow", "type", GetGlobalTypeString(GA), 230 Category); 231 } 232 233 /// Returns whether this module is listed in the given category. 234 bool isIn(const Module &M, StringRef Category) const { 235 return SCL->inSection("dataflow", "src", M.getModuleIdentifier(), Category); 236 } 237 }; 238 239 /// TransformedFunction is used to express the result of transforming one 240 /// function type into another. This struct is immutable. It holds metadata 241 /// useful for updating calls of the old function to the new type. 242 struct TransformedFunction { 243 TransformedFunction(FunctionType* OriginalType, 244 FunctionType* TransformedType, 245 std::vector<unsigned> ArgumentIndexMapping) 246 : OriginalType(OriginalType), 247 TransformedType(TransformedType), 248 ArgumentIndexMapping(ArgumentIndexMapping) {} 249 250 // Disallow copies. 251 TransformedFunction(const TransformedFunction&) = delete; 252 TransformedFunction& operator=(const TransformedFunction&) = delete; 253 254 // Allow moves. 255 TransformedFunction(TransformedFunction&&) = default; 256 TransformedFunction& operator=(TransformedFunction&&) = default; 257 258 /// Type of the function before the transformation. 259 FunctionType *OriginalType; 260 261 /// Type of the function after the transformation. 262 FunctionType *TransformedType; 263 264 /// Transforming a function may change the position of arguments. This 265 /// member records the mapping from each argument's old position to its new 266 /// position. Argument positions are zero-indexed. If the transformation 267 /// from F to F' made the first argument of F into the third argument of F', 268 /// then ArgumentIndexMapping[0] will equal 2. 269 std::vector<unsigned> ArgumentIndexMapping; 270 }; 271 272 /// Given function attributes from a call site for the original function, 273 /// return function attributes appropriate for a call to the transformed 274 /// function. 275 AttributeList TransformFunctionAttributes( 276 const TransformedFunction& TransformedFunction, 277 LLVMContext& Ctx, AttributeList CallSiteAttrs) { 278 279 // Construct a vector of AttributeSet for each function argument. 280 std::vector<llvm::AttributeSet> ArgumentAttributes( 281 TransformedFunction.TransformedType->getNumParams()); 282 283 // Copy attributes from the parameter of the original function to the 284 // transformed version. 'ArgumentIndexMapping' holds the mapping from 285 // old argument position to new. 286 for (unsigned i=0, ie = TransformedFunction.ArgumentIndexMapping.size(); 287 i < ie; ++i) { 288 unsigned TransformedIndex = TransformedFunction.ArgumentIndexMapping[i]; 289 ArgumentAttributes[TransformedIndex] = CallSiteAttrs.getParamAttributes(i); 290 } 291 292 // Copy annotations on varargs arguments. 293 for (unsigned i = TransformedFunction.OriginalType->getNumParams(), 294 ie = CallSiteAttrs.getNumAttrSets(); i<ie; ++i) { 295 ArgumentAttributes.push_back(CallSiteAttrs.getParamAttributes(i)); 296 } 297 298 return AttributeList::get( 299 Ctx, 300 CallSiteAttrs.getFnAttributes(), 301 CallSiteAttrs.getRetAttributes(), 302 llvm::makeArrayRef(ArgumentAttributes)); 303 } 304 305 class DataFlowSanitizer { 306 friend struct DFSanFunction; 307 friend class DFSanVisitor; 308 309 enum { ShadowWidthBits = 16, ShadowWidthBytes = ShadowWidthBits / 8 }; 310 311 /// Which ABI should be used for instrumented functions? 312 enum InstrumentedABI { 313 /// Argument and return value labels are passed through additional 314 /// arguments and by modifying the return type. 315 IA_Args, 316 317 /// Argument and return value labels are passed through TLS variables 318 /// __dfsan_arg_tls and __dfsan_retval_tls. 319 IA_TLS 320 }; 321 322 /// How should calls to uninstrumented functions be handled? 323 enum WrapperKind { 324 /// This function is present in an uninstrumented form but we don't know 325 /// how it should be handled. Print a warning and call the function anyway. 326 /// Don't label the return value. 327 WK_Warning, 328 329 /// This function does not write to (user-accessible) memory, and its return 330 /// value is unlabelled. 331 WK_Discard, 332 333 /// This function does not write to (user-accessible) memory, and the label 334 /// of its return value is the union of the label of its arguments. 335 WK_Functional, 336 337 /// Instead of calling the function, a custom wrapper __dfsw_F is called, 338 /// where F is the name of the function. This function may wrap the 339 /// original function or provide its own implementation. This is similar to 340 /// the IA_Args ABI, except that IA_Args uses a struct return type to 341 /// pass the return value shadow in a register, while WK_Custom uses an 342 /// extra pointer argument to return the shadow. This allows the wrapped 343 /// form of the function type to be expressed in C. 344 WK_Custom 345 }; 346 347 Module *Mod; 348 LLVMContext *Ctx; 349 IntegerType *ShadowTy; 350 PointerType *ShadowPtrTy; 351 IntegerType *IntptrTy; 352 ConstantInt *ZeroShadow; 353 ConstantInt *ShadowPtrMask; 354 ConstantInt *ShadowPtrMul; 355 Constant *ArgTLS; 356 Constant *RetvalTLS; 357 FunctionType *GetArgTLSTy; 358 FunctionType *GetRetvalTLSTy; 359 Constant *GetArgTLS; 360 Constant *GetRetvalTLS; 361 Constant *ExternalShadowMask; 362 FunctionType *DFSanUnionFnTy; 363 FunctionType *DFSanUnionLoadFnTy; 364 FunctionType *DFSanUnimplementedFnTy; 365 FunctionType *DFSanSetLabelFnTy; 366 FunctionType *DFSanNonzeroLabelFnTy; 367 FunctionType *DFSanVarargWrapperFnTy; 368 FunctionType *DFSanLoadStoreCmpCallbackFnTy; 369 FunctionType *DFSanMemTransferCallbackFnTy; 370 FunctionCallee DFSanUnionFn; 371 FunctionCallee DFSanCheckedUnionFn; 372 FunctionCallee DFSanUnionLoadFn; 373 FunctionCallee DFSanUnionLoadFast16LabelsFn; 374 FunctionCallee DFSanUnimplementedFn; 375 FunctionCallee DFSanSetLabelFn; 376 FunctionCallee DFSanNonzeroLabelFn; 377 FunctionCallee DFSanVarargWrapperFn; 378 FunctionCallee DFSanLoadCallbackFn; 379 FunctionCallee DFSanStoreCallbackFn; 380 FunctionCallee DFSanMemTransferCallbackFn; 381 FunctionCallee DFSanCmpCallbackFn; 382 MDNode *ColdCallWeights; 383 DFSanABIList ABIList; 384 DenseMap<Value *, Function *> UnwrappedFnMap; 385 AttrBuilder ReadOnlyNoneAttrs; 386 bool DFSanRuntimeShadowMask = false; 387 388 Value *getShadowAddress(Value *Addr, Instruction *Pos); 389 bool isInstrumented(const Function *F); 390 bool isInstrumented(const GlobalAlias *GA); 391 FunctionType *getArgsFunctionType(FunctionType *T); 392 FunctionType *getTrampolineFunctionType(FunctionType *T); 393 TransformedFunction getCustomFunctionType(FunctionType *T); 394 InstrumentedABI getInstrumentedABI(); 395 WrapperKind getWrapperKind(Function *F); 396 void addGlobalNamePrefix(GlobalValue *GV); 397 Function *buildWrapperFunction(Function *F, StringRef NewFName, 398 GlobalValue::LinkageTypes NewFLink, 399 FunctionType *NewFT); 400 Constant *getOrBuildTrampolineFunction(FunctionType *FT, StringRef FName); 401 void initializeCallbackFunctions(Module &M); 402 void initializeRuntimeFunctions(Module &M); 403 404 bool init(Module &M); 405 406 public: 407 DataFlowSanitizer(const std::vector<std::string> &ABIListFiles); 408 409 bool runImpl(Module &M); 410 }; 411 412 struct DFSanFunction { 413 DataFlowSanitizer &DFS; 414 Function *F; 415 DominatorTree DT; 416 DataFlowSanitizer::InstrumentedABI IA; 417 bool IsNativeABI; 418 Value *ArgTLSPtr = nullptr; 419 Value *RetvalTLSPtr = nullptr; 420 AllocaInst *LabelReturnAlloca = nullptr; 421 DenseMap<Value *, Value *> ValShadowMap; 422 DenseMap<AllocaInst *, AllocaInst *> AllocaShadowMap; 423 std::vector<std::pair<PHINode *, PHINode *>> PHIFixups; 424 DenseSet<Instruction *> SkipInsts; 425 std::vector<Value *> NonZeroChecks; 426 bool AvoidNewBlocks; 427 428 struct CachedCombinedShadow { 429 BasicBlock *Block; 430 Value *Shadow; 431 }; 432 DenseMap<std::pair<Value *, Value *>, CachedCombinedShadow> 433 CachedCombinedShadows; 434 DenseMap<Value *, std::set<Value *>> ShadowElements; 435 436 DFSanFunction(DataFlowSanitizer &DFS, Function *F, bool IsNativeABI) 437 : DFS(DFS), F(F), IA(DFS.getInstrumentedABI()), IsNativeABI(IsNativeABI) { 438 DT.recalculate(*F); 439 // FIXME: Need to track down the register allocator issue which causes poor 440 // performance in pathological cases with large numbers of basic blocks. 441 AvoidNewBlocks = F->size() > 1000; 442 } 443 444 Value *getArgTLSPtr(); 445 Value *getArgTLS(unsigned Index, Instruction *Pos); 446 Value *getRetvalTLS(); 447 Value *getShadow(Value *V); 448 void setShadow(Instruction *I, Value *Shadow); 449 Value *combineShadows(Value *V1, Value *V2, Instruction *Pos); 450 Value *combineOperandShadows(Instruction *Inst); 451 Value *loadShadow(Value *ShadowAddr, uint64_t Size, uint64_t Align, 452 Instruction *Pos); 453 void storeShadow(Value *Addr, uint64_t Size, Align Alignment, Value *Shadow, 454 Instruction *Pos); 455 }; 456 457 class DFSanVisitor : public InstVisitor<DFSanVisitor> { 458 public: 459 DFSanFunction &DFSF; 460 461 DFSanVisitor(DFSanFunction &DFSF) : DFSF(DFSF) {} 462 463 const DataLayout &getDataLayout() const { 464 return DFSF.F->getParent()->getDataLayout(); 465 } 466 467 // Combines shadow values for all of I's operands. Returns the combined shadow 468 // value. 469 Value *visitOperandShadowInst(Instruction &I); 470 471 void visitUnaryOperator(UnaryOperator &UO); 472 void visitBinaryOperator(BinaryOperator &BO); 473 void visitCastInst(CastInst &CI); 474 void visitCmpInst(CmpInst &CI); 475 void visitGetElementPtrInst(GetElementPtrInst &GEPI); 476 void visitLoadInst(LoadInst &LI); 477 void visitStoreInst(StoreInst &SI); 478 void visitReturnInst(ReturnInst &RI); 479 void visitCallBase(CallBase &CB); 480 void visitPHINode(PHINode &PN); 481 void visitExtractElementInst(ExtractElementInst &I); 482 void visitInsertElementInst(InsertElementInst &I); 483 void visitShuffleVectorInst(ShuffleVectorInst &I); 484 void visitExtractValueInst(ExtractValueInst &I); 485 void visitInsertValueInst(InsertValueInst &I); 486 void visitAllocaInst(AllocaInst &I); 487 void visitSelectInst(SelectInst &I); 488 void visitMemSetInst(MemSetInst &I); 489 void visitMemTransferInst(MemTransferInst &I); 490 }; 491 492 } // end anonymous namespace 493 494 DataFlowSanitizer::DataFlowSanitizer( 495 const std::vector<std::string> &ABIListFiles) { 496 std::vector<std::string> AllABIListFiles(std::move(ABIListFiles)); 497 AllABIListFiles.insert(AllABIListFiles.end(), ClABIListFiles.begin(), 498 ClABIListFiles.end()); 499 // FIXME: should we propagate vfs::FileSystem to this constructor? 500 ABIList.set( 501 SpecialCaseList::createOrDie(AllABIListFiles, *vfs::getRealFileSystem())); 502 } 503 504 FunctionType *DataFlowSanitizer::getArgsFunctionType(FunctionType *T) { 505 SmallVector<Type *, 4> ArgTypes(T->param_begin(), T->param_end()); 506 ArgTypes.append(T->getNumParams(), ShadowTy); 507 if (T->isVarArg()) 508 ArgTypes.push_back(ShadowPtrTy); 509 Type *RetType = T->getReturnType(); 510 if (!RetType->isVoidTy()) 511 RetType = StructType::get(RetType, ShadowTy); 512 return FunctionType::get(RetType, ArgTypes, T->isVarArg()); 513 } 514 515 FunctionType *DataFlowSanitizer::getTrampolineFunctionType(FunctionType *T) { 516 assert(!T->isVarArg()); 517 SmallVector<Type *, 4> ArgTypes; 518 ArgTypes.push_back(T->getPointerTo()); 519 ArgTypes.append(T->param_begin(), T->param_end()); 520 ArgTypes.append(T->getNumParams(), ShadowTy); 521 Type *RetType = T->getReturnType(); 522 if (!RetType->isVoidTy()) 523 ArgTypes.push_back(ShadowPtrTy); 524 return FunctionType::get(T->getReturnType(), ArgTypes, false); 525 } 526 527 TransformedFunction DataFlowSanitizer::getCustomFunctionType(FunctionType *T) { 528 SmallVector<Type *, 4> ArgTypes; 529 530 // Some parameters of the custom function being constructed are 531 // parameters of T. Record the mapping from parameters of T to 532 // parameters of the custom function, so that parameter attributes 533 // at call sites can be updated. 534 std::vector<unsigned> ArgumentIndexMapping; 535 for (unsigned i = 0, ie = T->getNumParams(); i != ie; ++i) { 536 Type* param_type = T->getParamType(i); 537 FunctionType *FT; 538 if (isa<PointerType>(param_type) && (FT = dyn_cast<FunctionType>( 539 cast<PointerType>(param_type)->getElementType()))) { 540 ArgumentIndexMapping.push_back(ArgTypes.size()); 541 ArgTypes.push_back(getTrampolineFunctionType(FT)->getPointerTo()); 542 ArgTypes.push_back(Type::getInt8PtrTy(*Ctx)); 543 } else { 544 ArgumentIndexMapping.push_back(ArgTypes.size()); 545 ArgTypes.push_back(param_type); 546 } 547 } 548 for (unsigned i = 0, e = T->getNumParams(); i != e; ++i) 549 ArgTypes.push_back(ShadowTy); 550 if (T->isVarArg()) 551 ArgTypes.push_back(ShadowPtrTy); 552 Type *RetType = T->getReturnType(); 553 if (!RetType->isVoidTy()) 554 ArgTypes.push_back(ShadowPtrTy); 555 return TransformedFunction( 556 T, FunctionType::get(T->getReturnType(), ArgTypes, T->isVarArg()), 557 ArgumentIndexMapping); 558 } 559 560 bool DataFlowSanitizer::init(Module &M) { 561 Triple TargetTriple(M.getTargetTriple()); 562 bool IsX86_64 = TargetTriple.getArch() == Triple::x86_64; 563 bool IsMIPS64 = TargetTriple.isMIPS64(); 564 bool IsAArch64 = TargetTriple.getArch() == Triple::aarch64 || 565 TargetTriple.getArch() == Triple::aarch64_be; 566 567 const DataLayout &DL = M.getDataLayout(); 568 569 Mod = &M; 570 Ctx = &M.getContext(); 571 ShadowTy = IntegerType::get(*Ctx, ShadowWidthBits); 572 ShadowPtrTy = PointerType::getUnqual(ShadowTy); 573 IntptrTy = DL.getIntPtrType(*Ctx); 574 ZeroShadow = ConstantInt::getSigned(ShadowTy, 0); 575 ShadowPtrMul = ConstantInt::getSigned(IntptrTy, ShadowWidthBytes); 576 if (IsX86_64) 577 ShadowPtrMask = ConstantInt::getSigned(IntptrTy, ~0x700000000000LL); 578 else if (IsMIPS64) 579 ShadowPtrMask = ConstantInt::getSigned(IntptrTy, ~0xF000000000LL); 580 // AArch64 supports multiple VMAs and the shadow mask is set at runtime. 581 else if (IsAArch64) 582 DFSanRuntimeShadowMask = true; 583 else 584 report_fatal_error("unsupported triple"); 585 586 Type *DFSanUnionArgs[2] = { ShadowTy, ShadowTy }; 587 DFSanUnionFnTy = 588 FunctionType::get(ShadowTy, DFSanUnionArgs, /*isVarArg=*/ false); 589 Type *DFSanUnionLoadArgs[2] = { ShadowPtrTy, IntptrTy }; 590 DFSanUnionLoadFnTy = 591 FunctionType::get(ShadowTy, DFSanUnionLoadArgs, /*isVarArg=*/ false); 592 DFSanUnimplementedFnTy = FunctionType::get( 593 Type::getVoidTy(*Ctx), Type::getInt8PtrTy(*Ctx), /*isVarArg=*/false); 594 Type *DFSanSetLabelArgs[3] = { ShadowTy, Type::getInt8PtrTy(*Ctx), IntptrTy }; 595 DFSanSetLabelFnTy = FunctionType::get(Type::getVoidTy(*Ctx), 596 DFSanSetLabelArgs, /*isVarArg=*/false); 597 DFSanNonzeroLabelFnTy = FunctionType::get( 598 Type::getVoidTy(*Ctx), None, /*isVarArg=*/false); 599 DFSanVarargWrapperFnTy = FunctionType::get( 600 Type::getVoidTy(*Ctx), Type::getInt8PtrTy(*Ctx), /*isVarArg=*/false); 601 DFSanLoadStoreCmpCallbackFnTy = 602 FunctionType::get(Type::getVoidTy(*Ctx), ShadowTy, /*isVarArg=*/false); 603 Type *DFSanMemTransferCallbackArgs[2] = {ShadowPtrTy, IntptrTy}; 604 DFSanMemTransferCallbackFnTy = 605 FunctionType::get(Type::getVoidTy(*Ctx), DFSanMemTransferCallbackArgs, 606 /*isVarArg=*/false); 607 608 ColdCallWeights = MDBuilder(*Ctx).createBranchWeights(1, 1000); 609 return true; 610 } 611 612 bool DataFlowSanitizer::isInstrumented(const Function *F) { 613 return !ABIList.isIn(*F, "uninstrumented"); 614 } 615 616 bool DataFlowSanitizer::isInstrumented(const GlobalAlias *GA) { 617 return !ABIList.isIn(*GA, "uninstrumented"); 618 } 619 620 DataFlowSanitizer::InstrumentedABI DataFlowSanitizer::getInstrumentedABI() { 621 return ClArgsABI ? IA_Args : IA_TLS; 622 } 623 624 DataFlowSanitizer::WrapperKind DataFlowSanitizer::getWrapperKind(Function *F) { 625 if (ABIList.isIn(*F, "functional")) 626 return WK_Functional; 627 if (ABIList.isIn(*F, "discard")) 628 return WK_Discard; 629 if (ABIList.isIn(*F, "custom")) 630 return WK_Custom; 631 632 return WK_Warning; 633 } 634 635 void DataFlowSanitizer::addGlobalNamePrefix(GlobalValue *GV) { 636 std::string GVName = std::string(GV->getName()), Prefix = "dfs$"; 637 GV->setName(Prefix + GVName); 638 639 // Try to change the name of the function in module inline asm. We only do 640 // this for specific asm directives, currently only ".symver", to try to avoid 641 // corrupting asm which happens to contain the symbol name as a substring. 642 // Note that the substitution for .symver assumes that the versioned symbol 643 // also has an instrumented name. 644 std::string Asm = GV->getParent()->getModuleInlineAsm(); 645 std::string SearchStr = ".symver " + GVName + ","; 646 size_t Pos = Asm.find(SearchStr); 647 if (Pos != std::string::npos) { 648 Asm.replace(Pos, SearchStr.size(), 649 ".symver " + Prefix + GVName + "," + Prefix); 650 GV->getParent()->setModuleInlineAsm(Asm); 651 } 652 } 653 654 Function * 655 DataFlowSanitizer::buildWrapperFunction(Function *F, StringRef NewFName, 656 GlobalValue::LinkageTypes NewFLink, 657 FunctionType *NewFT) { 658 FunctionType *FT = F->getFunctionType(); 659 Function *NewF = Function::Create(NewFT, NewFLink, F->getAddressSpace(), 660 NewFName, F->getParent()); 661 NewF->copyAttributesFrom(F); 662 NewF->removeAttributes( 663 AttributeList::ReturnIndex, 664 AttributeFuncs::typeIncompatible(NewFT->getReturnType())); 665 666 BasicBlock *BB = BasicBlock::Create(*Ctx, "entry", NewF); 667 if (F->isVarArg()) { 668 NewF->removeAttributes(AttributeList::FunctionIndex, 669 AttrBuilder().addAttribute("split-stack")); 670 CallInst::Create(DFSanVarargWrapperFn, 671 IRBuilder<>(BB).CreateGlobalStringPtr(F->getName()), "", 672 BB); 673 new UnreachableInst(*Ctx, BB); 674 } else { 675 std::vector<Value *> Args; 676 unsigned n = FT->getNumParams(); 677 for (Function::arg_iterator ai = NewF->arg_begin(); n != 0; ++ai, --n) 678 Args.push_back(&*ai); 679 CallInst *CI = CallInst::Create(F, Args, "", BB); 680 if (FT->getReturnType()->isVoidTy()) 681 ReturnInst::Create(*Ctx, BB); 682 else 683 ReturnInst::Create(*Ctx, CI, BB); 684 } 685 686 return NewF; 687 } 688 689 Constant *DataFlowSanitizer::getOrBuildTrampolineFunction(FunctionType *FT, 690 StringRef FName) { 691 FunctionType *FTT = getTrampolineFunctionType(FT); 692 FunctionCallee C = Mod->getOrInsertFunction(FName, FTT); 693 Function *F = dyn_cast<Function>(C.getCallee()); 694 if (F && F->isDeclaration()) { 695 F->setLinkage(GlobalValue::LinkOnceODRLinkage); 696 BasicBlock *BB = BasicBlock::Create(*Ctx, "entry", F); 697 std::vector<Value *> Args; 698 Function::arg_iterator AI = F->arg_begin(); ++AI; 699 for (unsigned N = FT->getNumParams(); N != 0; ++AI, --N) 700 Args.push_back(&*AI); 701 CallInst *CI = CallInst::Create(FT, &*F->arg_begin(), Args, "", BB); 702 ReturnInst *RI; 703 if (FT->getReturnType()->isVoidTy()) 704 RI = ReturnInst::Create(*Ctx, BB); 705 else 706 RI = ReturnInst::Create(*Ctx, CI, BB); 707 708 DFSanFunction DFSF(*this, F, /*IsNativeABI=*/true); 709 Function::arg_iterator ValAI = F->arg_begin(), ShadowAI = AI; ++ValAI; 710 for (unsigned N = FT->getNumParams(); N != 0; ++ValAI, ++ShadowAI, --N) 711 DFSF.ValShadowMap[&*ValAI] = &*ShadowAI; 712 DFSanVisitor(DFSF).visitCallInst(*CI); 713 if (!FT->getReturnType()->isVoidTy()) 714 new StoreInst(DFSF.getShadow(RI->getReturnValue()), 715 &*std::prev(F->arg_end()), RI); 716 } 717 718 return cast<Constant>(C.getCallee()); 719 } 720 721 // Initialize DataFlowSanitizer runtime functions and declare them in the module 722 void DataFlowSanitizer::initializeRuntimeFunctions(Module &M) { 723 { 724 AttributeList AL; 725 AL = AL.addAttribute(M.getContext(), AttributeList::FunctionIndex, 726 Attribute::NoUnwind); 727 AL = AL.addAttribute(M.getContext(), AttributeList::FunctionIndex, 728 Attribute::ReadNone); 729 AL = AL.addAttribute(M.getContext(), AttributeList::ReturnIndex, 730 Attribute::ZExt); 731 AL = AL.addParamAttribute(M.getContext(), 0, Attribute::ZExt); 732 AL = AL.addParamAttribute(M.getContext(), 1, Attribute::ZExt); 733 DFSanUnionFn = 734 Mod->getOrInsertFunction("__dfsan_union", DFSanUnionFnTy, AL); 735 } 736 { 737 AttributeList AL; 738 AL = AL.addAttribute(M.getContext(), AttributeList::FunctionIndex, 739 Attribute::NoUnwind); 740 AL = AL.addAttribute(M.getContext(), AttributeList::FunctionIndex, 741 Attribute::ReadNone); 742 AL = AL.addAttribute(M.getContext(), AttributeList::ReturnIndex, 743 Attribute::ZExt); 744 AL = AL.addParamAttribute(M.getContext(), 0, Attribute::ZExt); 745 AL = AL.addParamAttribute(M.getContext(), 1, Attribute::ZExt); 746 DFSanCheckedUnionFn = 747 Mod->getOrInsertFunction("dfsan_union", DFSanUnionFnTy, AL); 748 } 749 { 750 AttributeList AL; 751 AL = AL.addAttribute(M.getContext(), AttributeList::FunctionIndex, 752 Attribute::NoUnwind); 753 AL = AL.addAttribute(M.getContext(), AttributeList::FunctionIndex, 754 Attribute::ReadOnly); 755 AL = AL.addAttribute(M.getContext(), AttributeList::ReturnIndex, 756 Attribute::ZExt); 757 DFSanUnionLoadFn = 758 Mod->getOrInsertFunction("__dfsan_union_load", DFSanUnionLoadFnTy, AL); 759 } 760 { 761 AttributeList AL; 762 AL = AL.addAttribute(M.getContext(), AttributeList::FunctionIndex, 763 Attribute::NoUnwind); 764 AL = AL.addAttribute(M.getContext(), AttributeList::FunctionIndex, 765 Attribute::ReadOnly); 766 AL = AL.addAttribute(M.getContext(), AttributeList::ReturnIndex, 767 Attribute::ZExt); 768 DFSanUnionLoadFast16LabelsFn = Mod->getOrInsertFunction( 769 "__dfsan_union_load_fast16labels", DFSanUnionLoadFnTy, AL); 770 } 771 DFSanUnimplementedFn = 772 Mod->getOrInsertFunction("__dfsan_unimplemented", DFSanUnimplementedFnTy); 773 { 774 AttributeList AL; 775 AL = AL.addParamAttribute(M.getContext(), 0, Attribute::ZExt); 776 DFSanSetLabelFn = 777 Mod->getOrInsertFunction("__dfsan_set_label", DFSanSetLabelFnTy, AL); 778 } 779 DFSanNonzeroLabelFn = 780 Mod->getOrInsertFunction("__dfsan_nonzero_label", DFSanNonzeroLabelFnTy); 781 DFSanVarargWrapperFn = Mod->getOrInsertFunction("__dfsan_vararg_wrapper", 782 DFSanVarargWrapperFnTy); 783 } 784 785 // Initializes event callback functions and declare them in the module 786 void DataFlowSanitizer::initializeCallbackFunctions(Module &M) { 787 DFSanLoadCallbackFn = Mod->getOrInsertFunction("__dfsan_load_callback", 788 DFSanLoadStoreCmpCallbackFnTy); 789 DFSanStoreCallbackFn = Mod->getOrInsertFunction( 790 "__dfsan_store_callback", DFSanLoadStoreCmpCallbackFnTy); 791 DFSanMemTransferCallbackFn = Mod->getOrInsertFunction( 792 "__dfsan_mem_transfer_callback", DFSanMemTransferCallbackFnTy); 793 DFSanCmpCallbackFn = Mod->getOrInsertFunction("__dfsan_cmp_callback", 794 DFSanLoadStoreCmpCallbackFnTy); 795 } 796 797 bool DataFlowSanitizer::runImpl(Module &M) { 798 init(M); 799 800 if (ABIList.isIn(M, "skip")) 801 return false; 802 803 const unsigned InitialGlobalSize = M.global_size(); 804 const unsigned InitialModuleSize = M.size(); 805 806 bool Changed = false; 807 808 Type *ArgTLSTy = ArrayType::get(ShadowTy, 64); 809 ArgTLS = Mod->getOrInsertGlobal("__dfsan_arg_tls", ArgTLSTy); 810 if (GlobalVariable *G = dyn_cast<GlobalVariable>(ArgTLS)) { 811 Changed |= G->getThreadLocalMode() != GlobalVariable::InitialExecTLSModel; 812 G->setThreadLocalMode(GlobalVariable::InitialExecTLSModel); 813 } 814 RetvalTLS = Mod->getOrInsertGlobal("__dfsan_retval_tls", ShadowTy); 815 if (GlobalVariable *G = dyn_cast<GlobalVariable>(RetvalTLS)) { 816 Changed |= G->getThreadLocalMode() != GlobalVariable::InitialExecTLSModel; 817 G->setThreadLocalMode(GlobalVariable::InitialExecTLSModel); 818 } 819 820 ExternalShadowMask = 821 Mod->getOrInsertGlobal(kDFSanExternShadowPtrMask, IntptrTy); 822 823 initializeCallbackFunctions(M); 824 initializeRuntimeFunctions(M); 825 826 std::vector<Function *> FnsToInstrument; 827 SmallPtrSet<Function *, 2> FnsWithNativeABI; 828 for (Function &i : M) { 829 if (!i.isIntrinsic() && 830 &i != DFSanUnionFn.getCallee()->stripPointerCasts() && 831 &i != DFSanCheckedUnionFn.getCallee()->stripPointerCasts() && 832 &i != DFSanUnionLoadFn.getCallee()->stripPointerCasts() && 833 &i != DFSanUnionLoadFast16LabelsFn.getCallee()->stripPointerCasts() && 834 &i != DFSanUnimplementedFn.getCallee()->stripPointerCasts() && 835 &i != DFSanSetLabelFn.getCallee()->stripPointerCasts() && 836 &i != DFSanNonzeroLabelFn.getCallee()->stripPointerCasts() && 837 &i != DFSanVarargWrapperFn.getCallee()->stripPointerCasts() && 838 &i != DFSanLoadCallbackFn.getCallee()->stripPointerCasts() && 839 &i != DFSanStoreCallbackFn.getCallee()->stripPointerCasts() && 840 &i != DFSanMemTransferCallbackFn.getCallee()->stripPointerCasts() && 841 &i != DFSanCmpCallbackFn.getCallee()->stripPointerCasts()) 842 FnsToInstrument.push_back(&i); 843 } 844 845 // Give function aliases prefixes when necessary, and build wrappers where the 846 // instrumentedness is inconsistent. 847 for (Module::alias_iterator i = M.alias_begin(), e = M.alias_end(); i != e;) { 848 GlobalAlias *GA = &*i; 849 ++i; 850 // Don't stop on weak. We assume people aren't playing games with the 851 // instrumentedness of overridden weak aliases. 852 if (auto F = dyn_cast<Function>(GA->getBaseObject())) { 853 bool GAInst = isInstrumented(GA), FInst = isInstrumented(F); 854 if (GAInst && FInst) { 855 addGlobalNamePrefix(GA); 856 } else if (GAInst != FInst) { 857 // Non-instrumented alias of an instrumented function, or vice versa. 858 // Replace the alias with a native-ABI wrapper of the aliasee. The pass 859 // below will take care of instrumenting it. 860 Function *NewF = 861 buildWrapperFunction(F, "", GA->getLinkage(), F->getFunctionType()); 862 GA->replaceAllUsesWith(ConstantExpr::getBitCast(NewF, GA->getType())); 863 NewF->takeName(GA); 864 GA->eraseFromParent(); 865 FnsToInstrument.push_back(NewF); 866 } 867 } 868 } 869 870 ReadOnlyNoneAttrs.addAttribute(Attribute::ReadOnly) 871 .addAttribute(Attribute::ReadNone); 872 873 // First, change the ABI of every function in the module. ABI-listed 874 // functions keep their original ABI and get a wrapper function. 875 for (std::vector<Function *>::iterator i = FnsToInstrument.begin(), 876 e = FnsToInstrument.end(); 877 i != e; ++i) { 878 Function &F = **i; 879 FunctionType *FT = F.getFunctionType(); 880 881 bool IsZeroArgsVoidRet = (FT->getNumParams() == 0 && !FT->isVarArg() && 882 FT->getReturnType()->isVoidTy()); 883 884 if (isInstrumented(&F)) { 885 // Instrumented functions get a 'dfs$' prefix. This allows us to more 886 // easily identify cases of mismatching ABIs. 887 if (getInstrumentedABI() == IA_Args && !IsZeroArgsVoidRet) { 888 FunctionType *NewFT = getArgsFunctionType(FT); 889 Function *NewF = Function::Create(NewFT, F.getLinkage(), 890 F.getAddressSpace(), "", &M); 891 NewF->copyAttributesFrom(&F); 892 NewF->removeAttributes( 893 AttributeList::ReturnIndex, 894 AttributeFuncs::typeIncompatible(NewFT->getReturnType())); 895 for (Function::arg_iterator FArg = F.arg_begin(), 896 NewFArg = NewF->arg_begin(), 897 FArgEnd = F.arg_end(); 898 FArg != FArgEnd; ++FArg, ++NewFArg) { 899 FArg->replaceAllUsesWith(&*NewFArg); 900 } 901 NewF->getBasicBlockList().splice(NewF->begin(), F.getBasicBlockList()); 902 903 for (Function::user_iterator UI = F.user_begin(), UE = F.user_end(); 904 UI != UE;) { 905 BlockAddress *BA = dyn_cast<BlockAddress>(*UI); 906 ++UI; 907 if (BA) { 908 BA->replaceAllUsesWith( 909 BlockAddress::get(NewF, BA->getBasicBlock())); 910 delete BA; 911 } 912 } 913 F.replaceAllUsesWith( 914 ConstantExpr::getBitCast(NewF, PointerType::getUnqual(FT))); 915 NewF->takeName(&F); 916 F.eraseFromParent(); 917 *i = NewF; 918 addGlobalNamePrefix(NewF); 919 } else { 920 addGlobalNamePrefix(&F); 921 } 922 } else if (!IsZeroArgsVoidRet || getWrapperKind(&F) == WK_Custom) { 923 // Build a wrapper function for F. The wrapper simply calls F, and is 924 // added to FnsToInstrument so that any instrumentation according to its 925 // WrapperKind is done in the second pass below. 926 FunctionType *NewFT = getInstrumentedABI() == IA_Args 927 ? getArgsFunctionType(FT) 928 : FT; 929 930 // If the function being wrapped has local linkage, then preserve the 931 // function's linkage in the wrapper function. 932 GlobalValue::LinkageTypes wrapperLinkage = 933 F.hasLocalLinkage() 934 ? F.getLinkage() 935 : GlobalValue::LinkOnceODRLinkage; 936 937 Function *NewF = buildWrapperFunction( 938 &F, std::string("dfsw$") + std::string(F.getName()), 939 wrapperLinkage, NewFT); 940 if (getInstrumentedABI() == IA_TLS) 941 NewF->removeAttributes(AttributeList::FunctionIndex, ReadOnlyNoneAttrs); 942 943 Value *WrappedFnCst = 944 ConstantExpr::getBitCast(NewF, PointerType::getUnqual(FT)); 945 F.replaceAllUsesWith(WrappedFnCst); 946 947 UnwrappedFnMap[WrappedFnCst] = &F; 948 *i = NewF; 949 950 if (!F.isDeclaration()) { 951 // This function is probably defining an interposition of an 952 // uninstrumented function and hence needs to keep the original ABI. 953 // But any functions it may call need to use the instrumented ABI, so 954 // we instrument it in a mode which preserves the original ABI. 955 FnsWithNativeABI.insert(&F); 956 957 // This code needs to rebuild the iterators, as they may be invalidated 958 // by the push_back, taking care that the new range does not include 959 // any functions added by this code. 960 size_t N = i - FnsToInstrument.begin(), 961 Count = e - FnsToInstrument.begin(); 962 FnsToInstrument.push_back(&F); 963 i = FnsToInstrument.begin() + N; 964 e = FnsToInstrument.begin() + Count; 965 } 966 // Hopefully, nobody will try to indirectly call a vararg 967 // function... yet. 968 } else if (FT->isVarArg()) { 969 UnwrappedFnMap[&F] = &F; 970 *i = nullptr; 971 } 972 } 973 974 for (Function *i : FnsToInstrument) { 975 if (!i || i->isDeclaration()) 976 continue; 977 978 removeUnreachableBlocks(*i); 979 980 DFSanFunction DFSF(*this, i, FnsWithNativeABI.count(i)); 981 982 // DFSanVisitor may create new basic blocks, which confuses df_iterator. 983 // Build a copy of the list before iterating over it. 984 SmallVector<BasicBlock *, 4> BBList(depth_first(&i->getEntryBlock())); 985 986 for (BasicBlock *i : BBList) { 987 Instruction *Inst = &i->front(); 988 while (true) { 989 // DFSanVisitor may split the current basic block, changing the current 990 // instruction's next pointer and moving the next instruction to the 991 // tail block from which we should continue. 992 Instruction *Next = Inst->getNextNode(); 993 // DFSanVisitor may delete Inst, so keep track of whether it was a 994 // terminator. 995 bool IsTerminator = Inst->isTerminator(); 996 if (!DFSF.SkipInsts.count(Inst)) 997 DFSanVisitor(DFSF).visit(Inst); 998 if (IsTerminator) 999 break; 1000 Inst = Next; 1001 } 1002 } 1003 1004 // We will not necessarily be able to compute the shadow for every phi node 1005 // until we have visited every block. Therefore, the code that handles phi 1006 // nodes adds them to the PHIFixups list so that they can be properly 1007 // handled here. 1008 for (std::vector<std::pair<PHINode *, PHINode *>>::iterator 1009 i = DFSF.PHIFixups.begin(), 1010 e = DFSF.PHIFixups.end(); 1011 i != e; ++i) { 1012 for (unsigned val = 0, n = i->first->getNumIncomingValues(); val != n; 1013 ++val) { 1014 i->second->setIncomingValue( 1015 val, DFSF.getShadow(i->first->getIncomingValue(val))); 1016 } 1017 } 1018 1019 // -dfsan-debug-nonzero-labels will split the CFG in all kinds of crazy 1020 // places (i.e. instructions in basic blocks we haven't even begun visiting 1021 // yet). To make our life easier, do this work in a pass after the main 1022 // instrumentation. 1023 if (ClDebugNonzeroLabels) { 1024 for (Value *V : DFSF.NonZeroChecks) { 1025 Instruction *Pos; 1026 if (Instruction *I = dyn_cast<Instruction>(V)) 1027 Pos = I->getNextNode(); 1028 else 1029 Pos = &DFSF.F->getEntryBlock().front(); 1030 while (isa<PHINode>(Pos) || isa<AllocaInst>(Pos)) 1031 Pos = Pos->getNextNode(); 1032 IRBuilder<> IRB(Pos); 1033 Value *Ne = IRB.CreateICmpNE(V, DFSF.DFS.ZeroShadow); 1034 BranchInst *BI = cast<BranchInst>(SplitBlockAndInsertIfThen( 1035 Ne, Pos, /*Unreachable=*/false, ColdCallWeights)); 1036 IRBuilder<> ThenIRB(BI); 1037 ThenIRB.CreateCall(DFSF.DFS.DFSanNonzeroLabelFn, {}); 1038 } 1039 } 1040 } 1041 1042 return Changed || !FnsToInstrument.empty() || 1043 M.global_size() != InitialGlobalSize || M.size() != InitialModuleSize; 1044 } 1045 1046 Value *DFSanFunction::getArgTLSPtr() { 1047 if (ArgTLSPtr) 1048 return ArgTLSPtr; 1049 if (DFS.ArgTLS) 1050 return ArgTLSPtr = DFS.ArgTLS; 1051 1052 IRBuilder<> IRB(&F->getEntryBlock().front()); 1053 return ArgTLSPtr = IRB.CreateCall(DFS.GetArgTLSTy, DFS.GetArgTLS, {}); 1054 } 1055 1056 Value *DFSanFunction::getRetvalTLS() { 1057 if (RetvalTLSPtr) 1058 return RetvalTLSPtr; 1059 if (DFS.RetvalTLS) 1060 return RetvalTLSPtr = DFS.RetvalTLS; 1061 1062 IRBuilder<> IRB(&F->getEntryBlock().front()); 1063 return RetvalTLSPtr = 1064 IRB.CreateCall(DFS.GetRetvalTLSTy, DFS.GetRetvalTLS, {}); 1065 } 1066 1067 Value *DFSanFunction::getArgTLS(unsigned Idx, Instruction *Pos) { 1068 IRBuilder<> IRB(Pos); 1069 return IRB.CreateConstGEP2_64(ArrayType::get(DFS.ShadowTy, 64), 1070 getArgTLSPtr(), 0, Idx); 1071 } 1072 1073 Value *DFSanFunction::getShadow(Value *V) { 1074 if (!isa<Argument>(V) && !isa<Instruction>(V)) 1075 return DFS.ZeroShadow; 1076 Value *&Shadow = ValShadowMap[V]; 1077 if (!Shadow) { 1078 if (Argument *A = dyn_cast<Argument>(V)) { 1079 if (IsNativeABI) 1080 return DFS.ZeroShadow; 1081 switch (IA) { 1082 case DataFlowSanitizer::IA_TLS: { 1083 Value *ArgTLSPtr = getArgTLSPtr(); 1084 Instruction *ArgTLSPos = 1085 DFS.ArgTLS ? &*F->getEntryBlock().begin() 1086 : cast<Instruction>(ArgTLSPtr)->getNextNode(); 1087 IRBuilder<> IRB(ArgTLSPos); 1088 Shadow = 1089 IRB.CreateLoad(DFS.ShadowTy, getArgTLS(A->getArgNo(), ArgTLSPos)); 1090 break; 1091 } 1092 case DataFlowSanitizer::IA_Args: { 1093 unsigned ArgIdx = A->getArgNo() + F->arg_size() / 2; 1094 Function::arg_iterator i = F->arg_begin(); 1095 while (ArgIdx--) 1096 ++i; 1097 Shadow = &*i; 1098 assert(Shadow->getType() == DFS.ShadowTy); 1099 break; 1100 } 1101 } 1102 NonZeroChecks.push_back(Shadow); 1103 } else { 1104 Shadow = DFS.ZeroShadow; 1105 } 1106 } 1107 return Shadow; 1108 } 1109 1110 void DFSanFunction::setShadow(Instruction *I, Value *Shadow) { 1111 assert(!ValShadowMap.count(I)); 1112 assert(Shadow->getType() == DFS.ShadowTy); 1113 ValShadowMap[I] = Shadow; 1114 } 1115 1116 Value *DataFlowSanitizer::getShadowAddress(Value *Addr, Instruction *Pos) { 1117 assert(Addr != RetvalTLS && "Reinstrumenting?"); 1118 IRBuilder<> IRB(Pos); 1119 Value *ShadowPtrMaskValue; 1120 if (DFSanRuntimeShadowMask) 1121 ShadowPtrMaskValue = IRB.CreateLoad(IntptrTy, ExternalShadowMask); 1122 else 1123 ShadowPtrMaskValue = ShadowPtrMask; 1124 return IRB.CreateIntToPtr( 1125 IRB.CreateMul( 1126 IRB.CreateAnd(IRB.CreatePtrToInt(Addr, IntptrTy), 1127 IRB.CreatePtrToInt(ShadowPtrMaskValue, IntptrTy)), 1128 ShadowPtrMul), 1129 ShadowPtrTy); 1130 } 1131 1132 // Generates IR to compute the union of the two given shadows, inserting it 1133 // before Pos. Returns the computed union Value. 1134 Value *DFSanFunction::combineShadows(Value *V1, Value *V2, Instruction *Pos) { 1135 if (V1 == DFS.ZeroShadow) 1136 return V2; 1137 if (V2 == DFS.ZeroShadow) 1138 return V1; 1139 if (V1 == V2) 1140 return V1; 1141 1142 auto V1Elems = ShadowElements.find(V1); 1143 auto V2Elems = ShadowElements.find(V2); 1144 if (V1Elems != ShadowElements.end() && V2Elems != ShadowElements.end()) { 1145 if (std::includes(V1Elems->second.begin(), V1Elems->second.end(), 1146 V2Elems->second.begin(), V2Elems->second.end())) { 1147 return V1; 1148 } else if (std::includes(V2Elems->second.begin(), V2Elems->second.end(), 1149 V1Elems->second.begin(), V1Elems->second.end())) { 1150 return V2; 1151 } 1152 } else if (V1Elems != ShadowElements.end()) { 1153 if (V1Elems->second.count(V2)) 1154 return V1; 1155 } else if (V2Elems != ShadowElements.end()) { 1156 if (V2Elems->second.count(V1)) 1157 return V2; 1158 } 1159 1160 auto Key = std::make_pair(V1, V2); 1161 if (V1 > V2) 1162 std::swap(Key.first, Key.second); 1163 CachedCombinedShadow &CCS = CachedCombinedShadows[Key]; 1164 if (CCS.Block && DT.dominates(CCS.Block, Pos->getParent())) 1165 return CCS.Shadow; 1166 1167 IRBuilder<> IRB(Pos); 1168 if (ClFast16Labels) { 1169 CCS.Block = Pos->getParent(); 1170 CCS.Shadow = IRB.CreateOr(V1, V2); 1171 } else if (AvoidNewBlocks) { 1172 CallInst *Call = IRB.CreateCall(DFS.DFSanCheckedUnionFn, {V1, V2}); 1173 Call->addAttribute(AttributeList::ReturnIndex, Attribute::ZExt); 1174 Call->addParamAttr(0, Attribute::ZExt); 1175 Call->addParamAttr(1, Attribute::ZExt); 1176 1177 CCS.Block = Pos->getParent(); 1178 CCS.Shadow = Call; 1179 } else { 1180 BasicBlock *Head = Pos->getParent(); 1181 Value *Ne = IRB.CreateICmpNE(V1, V2); 1182 BranchInst *BI = cast<BranchInst>(SplitBlockAndInsertIfThen( 1183 Ne, Pos, /*Unreachable=*/false, DFS.ColdCallWeights, &DT)); 1184 IRBuilder<> ThenIRB(BI); 1185 CallInst *Call = ThenIRB.CreateCall(DFS.DFSanUnionFn, {V1, V2}); 1186 Call->addAttribute(AttributeList::ReturnIndex, Attribute::ZExt); 1187 Call->addParamAttr(0, Attribute::ZExt); 1188 Call->addParamAttr(1, Attribute::ZExt); 1189 1190 BasicBlock *Tail = BI->getSuccessor(0); 1191 PHINode *Phi = PHINode::Create(DFS.ShadowTy, 2, "", &Tail->front()); 1192 Phi->addIncoming(Call, Call->getParent()); 1193 Phi->addIncoming(V1, Head); 1194 1195 CCS.Block = Tail; 1196 CCS.Shadow = Phi; 1197 } 1198 1199 std::set<Value *> UnionElems; 1200 if (V1Elems != ShadowElements.end()) { 1201 UnionElems = V1Elems->second; 1202 } else { 1203 UnionElems.insert(V1); 1204 } 1205 if (V2Elems != ShadowElements.end()) { 1206 UnionElems.insert(V2Elems->second.begin(), V2Elems->second.end()); 1207 } else { 1208 UnionElems.insert(V2); 1209 } 1210 ShadowElements[CCS.Shadow] = std::move(UnionElems); 1211 1212 return CCS.Shadow; 1213 } 1214 1215 // A convenience function which folds the shadows of each of the operands 1216 // of the provided instruction Inst, inserting the IR before Inst. Returns 1217 // the computed union Value. 1218 Value *DFSanFunction::combineOperandShadows(Instruction *Inst) { 1219 if (Inst->getNumOperands() == 0) 1220 return DFS.ZeroShadow; 1221 1222 Value *Shadow = getShadow(Inst->getOperand(0)); 1223 for (unsigned i = 1, n = Inst->getNumOperands(); i != n; ++i) { 1224 Shadow = combineShadows(Shadow, getShadow(Inst->getOperand(i)), Inst); 1225 } 1226 return Shadow; 1227 } 1228 1229 Value *DFSanVisitor::visitOperandShadowInst(Instruction &I) { 1230 Value *CombinedShadow = DFSF.combineOperandShadows(&I); 1231 DFSF.setShadow(&I, CombinedShadow); 1232 return CombinedShadow; 1233 } 1234 1235 // Generates IR to load shadow corresponding to bytes [Addr, Addr+Size), where 1236 // Addr has alignment Align, and take the union of each of those shadows. 1237 Value *DFSanFunction::loadShadow(Value *Addr, uint64_t Size, uint64_t Align, 1238 Instruction *Pos) { 1239 if (AllocaInst *AI = dyn_cast<AllocaInst>(Addr)) { 1240 const auto i = AllocaShadowMap.find(AI); 1241 if (i != AllocaShadowMap.end()) { 1242 IRBuilder<> IRB(Pos); 1243 return IRB.CreateLoad(DFS.ShadowTy, i->second); 1244 } 1245 } 1246 1247 const llvm::Align ShadowAlign(Align * DFS.ShadowWidthBytes); 1248 SmallVector<const Value *, 2> Objs; 1249 getUnderlyingObjects(Addr, Objs); 1250 bool AllConstants = true; 1251 for (const Value *Obj : Objs) { 1252 if (isa<Function>(Obj) || isa<BlockAddress>(Obj)) 1253 continue; 1254 if (isa<GlobalVariable>(Obj) && cast<GlobalVariable>(Obj)->isConstant()) 1255 continue; 1256 1257 AllConstants = false; 1258 break; 1259 } 1260 if (AllConstants) 1261 return DFS.ZeroShadow; 1262 1263 Value *ShadowAddr = DFS.getShadowAddress(Addr, Pos); 1264 switch (Size) { 1265 case 0: 1266 return DFS.ZeroShadow; 1267 case 1: { 1268 LoadInst *LI = new LoadInst(DFS.ShadowTy, ShadowAddr, "", Pos); 1269 LI->setAlignment(ShadowAlign); 1270 return LI; 1271 } 1272 case 2: { 1273 IRBuilder<> IRB(Pos); 1274 Value *ShadowAddr1 = IRB.CreateGEP(DFS.ShadowTy, ShadowAddr, 1275 ConstantInt::get(DFS.IntptrTy, 1)); 1276 return combineShadows( 1277 IRB.CreateAlignedLoad(DFS.ShadowTy, ShadowAddr, ShadowAlign), 1278 IRB.CreateAlignedLoad(DFS.ShadowTy, ShadowAddr1, ShadowAlign), Pos); 1279 } 1280 } 1281 1282 if (ClFast16Labels && Size % (64 / DFS.ShadowWidthBits) == 0) { 1283 // First OR all the WideShadows, then OR individual shadows within the 1284 // combined WideShadow. This is fewer instructions than ORing shadows 1285 // individually. 1286 IRBuilder<> IRB(Pos); 1287 Value *WideAddr = 1288 IRB.CreateBitCast(ShadowAddr, Type::getInt64PtrTy(*DFS.Ctx)); 1289 Value *CombinedWideShadow = 1290 IRB.CreateAlignedLoad(IRB.getInt64Ty(), WideAddr, ShadowAlign); 1291 for (uint64_t Ofs = 64 / DFS.ShadowWidthBits; Ofs != Size; 1292 Ofs += 64 / DFS.ShadowWidthBits) { 1293 WideAddr = IRB.CreateGEP(Type::getInt64Ty(*DFS.Ctx), WideAddr, 1294 ConstantInt::get(DFS.IntptrTy, 1)); 1295 Value *NextWideShadow = 1296 IRB.CreateAlignedLoad(IRB.getInt64Ty(), WideAddr, ShadowAlign); 1297 CombinedWideShadow = IRB.CreateOr(CombinedWideShadow, NextWideShadow); 1298 } 1299 for (unsigned Width = 32; Width >= DFS.ShadowWidthBits; Width >>= 1) { 1300 Value *ShrShadow = IRB.CreateLShr(CombinedWideShadow, Width); 1301 CombinedWideShadow = IRB.CreateOr(CombinedWideShadow, ShrShadow); 1302 } 1303 return IRB.CreateTrunc(CombinedWideShadow, DFS.ShadowTy); 1304 } 1305 if (!AvoidNewBlocks && Size % (64 / DFS.ShadowWidthBits) == 0) { 1306 // Fast path for the common case where each byte has identical shadow: load 1307 // shadow 64 bits at a time, fall out to a __dfsan_union_load call if any 1308 // shadow is non-equal. 1309 BasicBlock *FallbackBB = BasicBlock::Create(*DFS.Ctx, "", F); 1310 IRBuilder<> FallbackIRB(FallbackBB); 1311 CallInst *FallbackCall = FallbackIRB.CreateCall( 1312 DFS.DFSanUnionLoadFn, 1313 {ShadowAddr, ConstantInt::get(DFS.IntptrTy, Size)}); 1314 FallbackCall->addAttribute(AttributeList::ReturnIndex, Attribute::ZExt); 1315 1316 // Compare each of the shadows stored in the loaded 64 bits to each other, 1317 // by computing (WideShadow rotl ShadowWidthBits) == WideShadow. 1318 IRBuilder<> IRB(Pos); 1319 Value *WideAddr = 1320 IRB.CreateBitCast(ShadowAddr, Type::getInt64PtrTy(*DFS.Ctx)); 1321 Value *WideShadow = 1322 IRB.CreateAlignedLoad(IRB.getInt64Ty(), WideAddr, ShadowAlign); 1323 Value *TruncShadow = IRB.CreateTrunc(WideShadow, DFS.ShadowTy); 1324 Value *ShlShadow = IRB.CreateShl(WideShadow, DFS.ShadowWidthBits); 1325 Value *ShrShadow = IRB.CreateLShr(WideShadow, 64 - DFS.ShadowWidthBits); 1326 Value *RotShadow = IRB.CreateOr(ShlShadow, ShrShadow); 1327 Value *ShadowsEq = IRB.CreateICmpEQ(WideShadow, RotShadow); 1328 1329 BasicBlock *Head = Pos->getParent(); 1330 BasicBlock *Tail = Head->splitBasicBlock(Pos->getIterator()); 1331 1332 if (DomTreeNode *OldNode = DT.getNode(Head)) { 1333 std::vector<DomTreeNode *> Children(OldNode->begin(), OldNode->end()); 1334 1335 DomTreeNode *NewNode = DT.addNewBlock(Tail, Head); 1336 for (auto Child : Children) 1337 DT.changeImmediateDominator(Child, NewNode); 1338 } 1339 1340 // In the following code LastBr will refer to the previous basic block's 1341 // conditional branch instruction, whose true successor is fixed up to point 1342 // to the next block during the loop below or to the tail after the final 1343 // iteration. 1344 BranchInst *LastBr = BranchInst::Create(FallbackBB, FallbackBB, ShadowsEq); 1345 ReplaceInstWithInst(Head->getTerminator(), LastBr); 1346 DT.addNewBlock(FallbackBB, Head); 1347 1348 for (uint64_t Ofs = 64 / DFS.ShadowWidthBits; Ofs != Size; 1349 Ofs += 64 / DFS.ShadowWidthBits) { 1350 BasicBlock *NextBB = BasicBlock::Create(*DFS.Ctx, "", F); 1351 DT.addNewBlock(NextBB, LastBr->getParent()); 1352 IRBuilder<> NextIRB(NextBB); 1353 WideAddr = NextIRB.CreateGEP(Type::getInt64Ty(*DFS.Ctx), WideAddr, 1354 ConstantInt::get(DFS.IntptrTy, 1)); 1355 Value *NextWideShadow = NextIRB.CreateAlignedLoad(NextIRB.getInt64Ty(), 1356 WideAddr, ShadowAlign); 1357 ShadowsEq = NextIRB.CreateICmpEQ(WideShadow, NextWideShadow); 1358 LastBr->setSuccessor(0, NextBB); 1359 LastBr = NextIRB.CreateCondBr(ShadowsEq, FallbackBB, FallbackBB); 1360 } 1361 1362 LastBr->setSuccessor(0, Tail); 1363 FallbackIRB.CreateBr(Tail); 1364 PHINode *Shadow = PHINode::Create(DFS.ShadowTy, 2, "", &Tail->front()); 1365 Shadow->addIncoming(FallbackCall, FallbackBB); 1366 Shadow->addIncoming(TruncShadow, LastBr->getParent()); 1367 return Shadow; 1368 } 1369 1370 IRBuilder<> IRB(Pos); 1371 FunctionCallee &UnionLoadFn = 1372 ClFast16Labels ? DFS.DFSanUnionLoadFast16LabelsFn : DFS.DFSanUnionLoadFn; 1373 CallInst *FallbackCall = IRB.CreateCall( 1374 UnionLoadFn, {ShadowAddr, ConstantInt::get(DFS.IntptrTy, Size)}); 1375 FallbackCall->addAttribute(AttributeList::ReturnIndex, Attribute::ZExt); 1376 return FallbackCall; 1377 } 1378 1379 void DFSanVisitor::visitLoadInst(LoadInst &LI) { 1380 auto &DL = LI.getModule()->getDataLayout(); 1381 uint64_t Size = DL.getTypeStoreSize(LI.getType()); 1382 if (Size == 0) { 1383 DFSF.setShadow(&LI, DFSF.DFS.ZeroShadow); 1384 return; 1385 } 1386 1387 Align Alignment = ClPreserveAlignment ? LI.getAlign() : Align(1); 1388 Value *Shadow = 1389 DFSF.loadShadow(LI.getPointerOperand(), Size, Alignment.value(), &LI); 1390 if (ClCombinePointerLabelsOnLoad) { 1391 Value *PtrShadow = DFSF.getShadow(LI.getPointerOperand()); 1392 Shadow = DFSF.combineShadows(Shadow, PtrShadow, &LI); 1393 } 1394 if (Shadow != DFSF.DFS.ZeroShadow) 1395 DFSF.NonZeroChecks.push_back(Shadow); 1396 1397 DFSF.setShadow(&LI, Shadow); 1398 if (ClEventCallbacks) { 1399 IRBuilder<> IRB(&LI); 1400 IRB.CreateCall(DFSF.DFS.DFSanLoadCallbackFn, Shadow); 1401 } 1402 } 1403 1404 void DFSanFunction::storeShadow(Value *Addr, uint64_t Size, Align Alignment, 1405 Value *Shadow, Instruction *Pos) { 1406 if (AllocaInst *AI = dyn_cast<AllocaInst>(Addr)) { 1407 const auto i = AllocaShadowMap.find(AI); 1408 if (i != AllocaShadowMap.end()) { 1409 IRBuilder<> IRB(Pos); 1410 IRB.CreateStore(Shadow, i->second); 1411 return; 1412 } 1413 } 1414 1415 const Align ShadowAlign(Alignment.value() * DFS.ShadowWidthBytes); 1416 IRBuilder<> IRB(Pos); 1417 Value *ShadowAddr = DFS.getShadowAddress(Addr, Pos); 1418 if (Shadow == DFS.ZeroShadow) { 1419 IntegerType *ShadowTy = 1420 IntegerType::get(*DFS.Ctx, Size * DFS.ShadowWidthBits); 1421 Value *ExtZeroShadow = ConstantInt::get(ShadowTy, 0); 1422 Value *ExtShadowAddr = 1423 IRB.CreateBitCast(ShadowAddr, PointerType::getUnqual(ShadowTy)); 1424 IRB.CreateAlignedStore(ExtZeroShadow, ExtShadowAddr, ShadowAlign); 1425 return; 1426 } 1427 1428 const unsigned ShadowVecSize = 128 / DFS.ShadowWidthBits; 1429 uint64_t Offset = 0; 1430 if (Size >= ShadowVecSize) { 1431 auto *ShadowVecTy = FixedVectorType::get(DFS.ShadowTy, ShadowVecSize); 1432 Value *ShadowVec = UndefValue::get(ShadowVecTy); 1433 for (unsigned i = 0; i != ShadowVecSize; ++i) { 1434 ShadowVec = IRB.CreateInsertElement( 1435 ShadowVec, Shadow, ConstantInt::get(Type::getInt32Ty(*DFS.Ctx), i)); 1436 } 1437 Value *ShadowVecAddr = 1438 IRB.CreateBitCast(ShadowAddr, PointerType::getUnqual(ShadowVecTy)); 1439 do { 1440 Value *CurShadowVecAddr = 1441 IRB.CreateConstGEP1_32(ShadowVecTy, ShadowVecAddr, Offset); 1442 IRB.CreateAlignedStore(ShadowVec, CurShadowVecAddr, ShadowAlign); 1443 Size -= ShadowVecSize; 1444 ++Offset; 1445 } while (Size >= ShadowVecSize); 1446 Offset *= ShadowVecSize; 1447 } 1448 while (Size > 0) { 1449 Value *CurShadowAddr = 1450 IRB.CreateConstGEP1_32(DFS.ShadowTy, ShadowAddr, Offset); 1451 IRB.CreateAlignedStore(Shadow, CurShadowAddr, ShadowAlign); 1452 --Size; 1453 ++Offset; 1454 } 1455 } 1456 1457 void DFSanVisitor::visitStoreInst(StoreInst &SI) { 1458 auto &DL = SI.getModule()->getDataLayout(); 1459 uint64_t Size = DL.getTypeStoreSize(SI.getValueOperand()->getType()); 1460 if (Size == 0) 1461 return; 1462 1463 const Align Alignment = ClPreserveAlignment ? SI.getAlign() : Align(1); 1464 1465 Value* Shadow = DFSF.getShadow(SI.getValueOperand()); 1466 if (ClCombinePointerLabelsOnStore) { 1467 Value *PtrShadow = DFSF.getShadow(SI.getPointerOperand()); 1468 Shadow = DFSF.combineShadows(Shadow, PtrShadow, &SI); 1469 } 1470 DFSF.storeShadow(SI.getPointerOperand(), Size, Alignment, Shadow, &SI); 1471 if (ClEventCallbacks) { 1472 IRBuilder<> IRB(&SI); 1473 IRB.CreateCall(DFSF.DFS.DFSanStoreCallbackFn, Shadow); 1474 } 1475 } 1476 1477 void DFSanVisitor::visitUnaryOperator(UnaryOperator &UO) { 1478 visitOperandShadowInst(UO); 1479 } 1480 1481 void DFSanVisitor::visitBinaryOperator(BinaryOperator &BO) { 1482 visitOperandShadowInst(BO); 1483 } 1484 1485 void DFSanVisitor::visitCastInst(CastInst &CI) { visitOperandShadowInst(CI); } 1486 1487 void DFSanVisitor::visitCmpInst(CmpInst &CI) { 1488 Value *CombinedShadow = visitOperandShadowInst(CI); 1489 if (ClEventCallbacks) { 1490 IRBuilder<> IRB(&CI); 1491 IRB.CreateCall(DFSF.DFS.DFSanCmpCallbackFn, CombinedShadow); 1492 } 1493 } 1494 1495 void DFSanVisitor::visitGetElementPtrInst(GetElementPtrInst &GEPI) { 1496 visitOperandShadowInst(GEPI); 1497 } 1498 1499 void DFSanVisitor::visitExtractElementInst(ExtractElementInst &I) { 1500 visitOperandShadowInst(I); 1501 } 1502 1503 void DFSanVisitor::visitInsertElementInst(InsertElementInst &I) { 1504 visitOperandShadowInst(I); 1505 } 1506 1507 void DFSanVisitor::visitShuffleVectorInst(ShuffleVectorInst &I) { 1508 visitOperandShadowInst(I); 1509 } 1510 1511 void DFSanVisitor::visitExtractValueInst(ExtractValueInst &I) { 1512 visitOperandShadowInst(I); 1513 } 1514 1515 void DFSanVisitor::visitInsertValueInst(InsertValueInst &I) { 1516 visitOperandShadowInst(I); 1517 } 1518 1519 void DFSanVisitor::visitAllocaInst(AllocaInst &I) { 1520 bool AllLoadsStores = true; 1521 for (User *U : I.users()) { 1522 if (isa<LoadInst>(U)) 1523 continue; 1524 1525 if (StoreInst *SI = dyn_cast<StoreInst>(U)) { 1526 if (SI->getPointerOperand() == &I) 1527 continue; 1528 } 1529 1530 AllLoadsStores = false; 1531 break; 1532 } 1533 if (AllLoadsStores) { 1534 IRBuilder<> IRB(&I); 1535 DFSF.AllocaShadowMap[&I] = IRB.CreateAlloca(DFSF.DFS.ShadowTy); 1536 } 1537 DFSF.setShadow(&I, DFSF.DFS.ZeroShadow); 1538 } 1539 1540 void DFSanVisitor::visitSelectInst(SelectInst &I) { 1541 Value *CondShadow = DFSF.getShadow(I.getCondition()); 1542 Value *TrueShadow = DFSF.getShadow(I.getTrueValue()); 1543 Value *FalseShadow = DFSF.getShadow(I.getFalseValue()); 1544 1545 if (isa<VectorType>(I.getCondition()->getType())) { 1546 DFSF.setShadow( 1547 &I, 1548 DFSF.combineShadows( 1549 CondShadow, DFSF.combineShadows(TrueShadow, FalseShadow, &I), &I)); 1550 } else { 1551 Value *ShadowSel; 1552 if (TrueShadow == FalseShadow) { 1553 ShadowSel = TrueShadow; 1554 } else { 1555 ShadowSel = 1556 SelectInst::Create(I.getCondition(), TrueShadow, FalseShadow, "", &I); 1557 } 1558 DFSF.setShadow(&I, DFSF.combineShadows(CondShadow, ShadowSel, &I)); 1559 } 1560 } 1561 1562 void DFSanVisitor::visitMemSetInst(MemSetInst &I) { 1563 IRBuilder<> IRB(&I); 1564 Value *ValShadow = DFSF.getShadow(I.getValue()); 1565 IRB.CreateCall(DFSF.DFS.DFSanSetLabelFn, 1566 {ValShadow, IRB.CreateBitCast(I.getDest(), Type::getInt8PtrTy( 1567 *DFSF.DFS.Ctx)), 1568 IRB.CreateZExtOrTrunc(I.getLength(), DFSF.DFS.IntptrTy)}); 1569 } 1570 1571 void DFSanVisitor::visitMemTransferInst(MemTransferInst &I) { 1572 IRBuilder<> IRB(&I); 1573 Value *RawDestShadow = DFSF.DFS.getShadowAddress(I.getDest(), &I); 1574 Value *SrcShadow = DFSF.DFS.getShadowAddress(I.getSource(), &I); 1575 Value *LenShadow = 1576 IRB.CreateMul(I.getLength(), ConstantInt::get(I.getLength()->getType(), 1577 DFSF.DFS.ShadowWidthBytes)); 1578 Type *Int8Ptr = Type::getInt8PtrTy(*DFSF.DFS.Ctx); 1579 Value *DestShadow = IRB.CreateBitCast(RawDestShadow, Int8Ptr); 1580 SrcShadow = IRB.CreateBitCast(SrcShadow, Int8Ptr); 1581 auto *MTI = cast<MemTransferInst>( 1582 IRB.CreateCall(I.getFunctionType(), I.getCalledOperand(), 1583 {DestShadow, SrcShadow, LenShadow, I.getVolatileCst()})); 1584 if (ClPreserveAlignment) { 1585 MTI->setDestAlignment(I.getDestAlign() * DFSF.DFS.ShadowWidthBytes); 1586 MTI->setSourceAlignment(I.getSourceAlign() * DFSF.DFS.ShadowWidthBytes); 1587 } else { 1588 MTI->setDestAlignment(Align(DFSF.DFS.ShadowWidthBytes)); 1589 MTI->setSourceAlignment(Align(DFSF.DFS.ShadowWidthBytes)); 1590 } 1591 if (ClEventCallbacks) { 1592 IRB.CreateCall(DFSF.DFS.DFSanMemTransferCallbackFn, 1593 {RawDestShadow, I.getLength()}); 1594 } 1595 } 1596 1597 void DFSanVisitor::visitReturnInst(ReturnInst &RI) { 1598 if (!DFSF.IsNativeABI && RI.getReturnValue()) { 1599 switch (DFSF.IA) { 1600 case DataFlowSanitizer::IA_TLS: { 1601 Value *S = DFSF.getShadow(RI.getReturnValue()); 1602 IRBuilder<> IRB(&RI); 1603 IRB.CreateStore(S, DFSF.getRetvalTLS()); 1604 break; 1605 } 1606 case DataFlowSanitizer::IA_Args: { 1607 IRBuilder<> IRB(&RI); 1608 Type *RT = DFSF.F->getFunctionType()->getReturnType(); 1609 Value *InsVal = 1610 IRB.CreateInsertValue(UndefValue::get(RT), RI.getReturnValue(), 0); 1611 Value *InsShadow = 1612 IRB.CreateInsertValue(InsVal, DFSF.getShadow(RI.getReturnValue()), 1); 1613 RI.setOperand(0, InsShadow); 1614 break; 1615 } 1616 } 1617 } 1618 } 1619 1620 void DFSanVisitor::visitCallBase(CallBase &CB) { 1621 Function *F = CB.getCalledFunction(); 1622 if ((F && F->isIntrinsic()) || CB.isInlineAsm()) { 1623 visitOperandShadowInst(CB); 1624 return; 1625 } 1626 1627 // Calls to this function are synthesized in wrappers, and we shouldn't 1628 // instrument them. 1629 if (F == DFSF.DFS.DFSanVarargWrapperFn.getCallee()->stripPointerCasts()) 1630 return; 1631 1632 IRBuilder<> IRB(&CB); 1633 1634 DenseMap<Value *, Function *>::iterator i = 1635 DFSF.DFS.UnwrappedFnMap.find(CB.getCalledOperand()); 1636 if (i != DFSF.DFS.UnwrappedFnMap.end()) { 1637 Function *F = i->second; 1638 switch (DFSF.DFS.getWrapperKind(F)) { 1639 case DataFlowSanitizer::WK_Warning: 1640 CB.setCalledFunction(F); 1641 IRB.CreateCall(DFSF.DFS.DFSanUnimplementedFn, 1642 IRB.CreateGlobalStringPtr(F->getName())); 1643 DFSF.setShadow(&CB, DFSF.DFS.ZeroShadow); 1644 return; 1645 case DataFlowSanitizer::WK_Discard: 1646 CB.setCalledFunction(F); 1647 DFSF.setShadow(&CB, DFSF.DFS.ZeroShadow); 1648 return; 1649 case DataFlowSanitizer::WK_Functional: 1650 CB.setCalledFunction(F); 1651 visitOperandShadowInst(CB); 1652 return; 1653 case DataFlowSanitizer::WK_Custom: 1654 // Don't try to handle invokes of custom functions, it's too complicated. 1655 // Instead, invoke the dfsw$ wrapper, which will in turn call the __dfsw_ 1656 // wrapper. 1657 if (CallInst *CI = dyn_cast<CallInst>(&CB)) { 1658 FunctionType *FT = F->getFunctionType(); 1659 TransformedFunction CustomFn = DFSF.DFS.getCustomFunctionType(FT); 1660 std::string CustomFName = "__dfsw_"; 1661 CustomFName += F->getName(); 1662 FunctionCallee CustomF = DFSF.DFS.Mod->getOrInsertFunction( 1663 CustomFName, CustomFn.TransformedType); 1664 if (Function *CustomFn = dyn_cast<Function>(CustomF.getCallee())) { 1665 CustomFn->copyAttributesFrom(F); 1666 1667 // Custom functions returning non-void will write to the return label. 1668 if (!FT->getReturnType()->isVoidTy()) { 1669 CustomFn->removeAttributes(AttributeList::FunctionIndex, 1670 DFSF.DFS.ReadOnlyNoneAttrs); 1671 } 1672 } 1673 1674 std::vector<Value *> Args; 1675 1676 auto i = CB.arg_begin(); 1677 for (unsigned n = FT->getNumParams(); n != 0; ++i, --n) { 1678 Type *T = (*i)->getType(); 1679 FunctionType *ParamFT; 1680 if (isa<PointerType>(T) && 1681 (ParamFT = dyn_cast<FunctionType>( 1682 cast<PointerType>(T)->getElementType()))) { 1683 std::string TName = "dfst"; 1684 TName += utostr(FT->getNumParams() - n); 1685 TName += "$"; 1686 TName += F->getName(); 1687 Constant *T = DFSF.DFS.getOrBuildTrampolineFunction(ParamFT, TName); 1688 Args.push_back(T); 1689 Args.push_back( 1690 IRB.CreateBitCast(*i, Type::getInt8PtrTy(*DFSF.DFS.Ctx))); 1691 } else { 1692 Args.push_back(*i); 1693 } 1694 } 1695 1696 i = CB.arg_begin(); 1697 const unsigned ShadowArgStart = Args.size(); 1698 for (unsigned n = FT->getNumParams(); n != 0; ++i, --n) 1699 Args.push_back(DFSF.getShadow(*i)); 1700 1701 if (FT->isVarArg()) { 1702 auto *LabelVATy = ArrayType::get(DFSF.DFS.ShadowTy, 1703 CB.arg_size() - FT->getNumParams()); 1704 auto *LabelVAAlloca = new AllocaInst( 1705 LabelVATy, getDataLayout().getAllocaAddrSpace(), 1706 "labelva", &DFSF.F->getEntryBlock().front()); 1707 1708 for (unsigned n = 0; i != CB.arg_end(); ++i, ++n) { 1709 auto LabelVAPtr = IRB.CreateStructGEP(LabelVATy, LabelVAAlloca, n); 1710 IRB.CreateStore(DFSF.getShadow(*i), LabelVAPtr); 1711 } 1712 1713 Args.push_back(IRB.CreateStructGEP(LabelVATy, LabelVAAlloca, 0)); 1714 } 1715 1716 if (!FT->getReturnType()->isVoidTy()) { 1717 if (!DFSF.LabelReturnAlloca) { 1718 DFSF.LabelReturnAlloca = 1719 new AllocaInst(DFSF.DFS.ShadowTy, 1720 getDataLayout().getAllocaAddrSpace(), 1721 "labelreturn", &DFSF.F->getEntryBlock().front()); 1722 } 1723 Args.push_back(DFSF.LabelReturnAlloca); 1724 } 1725 1726 for (i = CB.arg_begin() + FT->getNumParams(); i != CB.arg_end(); ++i) 1727 Args.push_back(*i); 1728 1729 CallInst *CustomCI = IRB.CreateCall(CustomF, Args); 1730 CustomCI->setCallingConv(CI->getCallingConv()); 1731 CustomCI->setAttributes(TransformFunctionAttributes(CustomFn, 1732 CI->getContext(), CI->getAttributes())); 1733 1734 // Update the parameter attributes of the custom call instruction to 1735 // zero extend the shadow parameters. This is required for targets 1736 // which consider ShadowTy an illegal type. 1737 for (unsigned n = 0; n < FT->getNumParams(); n++) { 1738 const unsigned ArgNo = ShadowArgStart + n; 1739 if (CustomCI->getArgOperand(ArgNo)->getType() == DFSF.DFS.ShadowTy) 1740 CustomCI->addParamAttr(ArgNo, Attribute::ZExt); 1741 } 1742 1743 if (!FT->getReturnType()->isVoidTy()) { 1744 LoadInst *LabelLoad = 1745 IRB.CreateLoad(DFSF.DFS.ShadowTy, DFSF.LabelReturnAlloca); 1746 DFSF.setShadow(CustomCI, LabelLoad); 1747 } 1748 1749 CI->replaceAllUsesWith(CustomCI); 1750 CI->eraseFromParent(); 1751 return; 1752 } 1753 break; 1754 } 1755 } 1756 1757 FunctionType *FT = CB.getFunctionType(); 1758 if (DFSF.DFS.getInstrumentedABI() == DataFlowSanitizer::IA_TLS) { 1759 for (unsigned i = 0, n = FT->getNumParams(); i != n; ++i) { 1760 IRB.CreateStore(DFSF.getShadow(CB.getArgOperand(i)), 1761 DFSF.getArgTLS(i, &CB)); 1762 } 1763 } 1764 1765 Instruction *Next = nullptr; 1766 if (!CB.getType()->isVoidTy()) { 1767 if (InvokeInst *II = dyn_cast<InvokeInst>(&CB)) { 1768 if (II->getNormalDest()->getSinglePredecessor()) { 1769 Next = &II->getNormalDest()->front(); 1770 } else { 1771 BasicBlock *NewBB = 1772 SplitEdge(II->getParent(), II->getNormalDest(), &DFSF.DT); 1773 Next = &NewBB->front(); 1774 } 1775 } else { 1776 assert(CB.getIterator() != CB.getParent()->end()); 1777 Next = CB.getNextNode(); 1778 } 1779 1780 if (DFSF.DFS.getInstrumentedABI() == DataFlowSanitizer::IA_TLS) { 1781 IRBuilder<> NextIRB(Next); 1782 LoadInst *LI = NextIRB.CreateLoad(DFSF.DFS.ShadowTy, DFSF.getRetvalTLS()); 1783 DFSF.SkipInsts.insert(LI); 1784 DFSF.setShadow(&CB, LI); 1785 DFSF.NonZeroChecks.push_back(LI); 1786 } 1787 } 1788 1789 // Do all instrumentation for IA_Args down here to defer tampering with the 1790 // CFG in a way that SplitEdge may be able to detect. 1791 if (DFSF.DFS.getInstrumentedABI() == DataFlowSanitizer::IA_Args) { 1792 FunctionType *NewFT = DFSF.DFS.getArgsFunctionType(FT); 1793 Value *Func = 1794 IRB.CreateBitCast(CB.getCalledOperand(), PointerType::getUnqual(NewFT)); 1795 std::vector<Value *> Args; 1796 1797 auto i = CB.arg_begin(), E = CB.arg_end(); 1798 for (unsigned n = FT->getNumParams(); n != 0; ++i, --n) 1799 Args.push_back(*i); 1800 1801 i = CB.arg_begin(); 1802 for (unsigned n = FT->getNumParams(); n != 0; ++i, --n) 1803 Args.push_back(DFSF.getShadow(*i)); 1804 1805 if (FT->isVarArg()) { 1806 unsigned VarArgSize = CB.arg_size() - FT->getNumParams(); 1807 ArrayType *VarArgArrayTy = ArrayType::get(DFSF.DFS.ShadowTy, VarArgSize); 1808 AllocaInst *VarArgShadow = 1809 new AllocaInst(VarArgArrayTy, getDataLayout().getAllocaAddrSpace(), 1810 "", &DFSF.F->getEntryBlock().front()); 1811 Args.push_back(IRB.CreateConstGEP2_32(VarArgArrayTy, VarArgShadow, 0, 0)); 1812 for (unsigned n = 0; i != E; ++i, ++n) { 1813 IRB.CreateStore( 1814 DFSF.getShadow(*i), 1815 IRB.CreateConstGEP2_32(VarArgArrayTy, VarArgShadow, 0, n)); 1816 Args.push_back(*i); 1817 } 1818 } 1819 1820 CallBase *NewCB; 1821 if (InvokeInst *II = dyn_cast<InvokeInst>(&CB)) { 1822 NewCB = IRB.CreateInvoke(NewFT, Func, II->getNormalDest(), 1823 II->getUnwindDest(), Args); 1824 } else { 1825 NewCB = IRB.CreateCall(NewFT, Func, Args); 1826 } 1827 NewCB->setCallingConv(CB.getCallingConv()); 1828 NewCB->setAttributes(CB.getAttributes().removeAttributes( 1829 *DFSF.DFS.Ctx, AttributeList::ReturnIndex, 1830 AttributeFuncs::typeIncompatible(NewCB->getType()))); 1831 1832 if (Next) { 1833 ExtractValueInst *ExVal = ExtractValueInst::Create(NewCB, 0, "", Next); 1834 DFSF.SkipInsts.insert(ExVal); 1835 ExtractValueInst *ExShadow = ExtractValueInst::Create(NewCB, 1, "", Next); 1836 DFSF.SkipInsts.insert(ExShadow); 1837 DFSF.setShadow(ExVal, ExShadow); 1838 DFSF.NonZeroChecks.push_back(ExShadow); 1839 1840 CB.replaceAllUsesWith(ExVal); 1841 } 1842 1843 CB.eraseFromParent(); 1844 } 1845 } 1846 1847 void DFSanVisitor::visitPHINode(PHINode &PN) { 1848 PHINode *ShadowPN = 1849 PHINode::Create(DFSF.DFS.ShadowTy, PN.getNumIncomingValues(), "", &PN); 1850 1851 // Give the shadow phi node valid predecessors to fool SplitEdge into working. 1852 Value *UndefShadow = UndefValue::get(DFSF.DFS.ShadowTy); 1853 for (PHINode::block_iterator i = PN.block_begin(), e = PN.block_end(); i != e; 1854 ++i) { 1855 ShadowPN->addIncoming(UndefShadow, *i); 1856 } 1857 1858 DFSF.PHIFixups.push_back(std::make_pair(&PN, ShadowPN)); 1859 DFSF.setShadow(&PN, ShadowPN); 1860 } 1861 1862 namespace { 1863 class DataFlowSanitizerLegacyPass : public ModulePass { 1864 private: 1865 std::vector<std::string> ABIListFiles; 1866 1867 public: 1868 static char ID; 1869 1870 DataFlowSanitizerLegacyPass( 1871 const std::vector<std::string> &ABIListFiles = std::vector<std::string>()) 1872 : ModulePass(ID), ABIListFiles(ABIListFiles) {} 1873 1874 bool runOnModule(Module &M) override { 1875 return DataFlowSanitizer(ABIListFiles).runImpl(M); 1876 } 1877 }; 1878 } // namespace 1879 1880 char DataFlowSanitizerLegacyPass::ID; 1881 1882 INITIALIZE_PASS(DataFlowSanitizerLegacyPass, "dfsan", 1883 "DataFlowSanitizer: dynamic data flow analysis.", false, false) 1884 1885 ModulePass *llvm::createDataFlowSanitizerLegacyPassPass( 1886 const std::vector<std::string> &ABIListFiles) { 1887 return new DataFlowSanitizerLegacyPass(ABIListFiles); 1888 } 1889 1890 PreservedAnalyses DataFlowSanitizerPass::run(Module &M, 1891 ModuleAnalysisManager &AM) { 1892 if (DataFlowSanitizer(ABIListFiles).runImpl(M)) { 1893 return PreservedAnalyses::none(); 1894 } 1895 return PreservedAnalyses::all(); 1896 } 1897