1 //===- DataFlowSanitizer.cpp - dynamic data flow analysis -----------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 /// \file 10 /// This file is a part of DataFlowSanitizer, a generalised dynamic data flow 11 /// analysis. 12 /// 13 /// Unlike other Sanitizer tools, this tool is not designed to detect a specific 14 /// class of bugs on its own. Instead, it provides a generic dynamic data flow 15 /// analysis framework to be used by clients to help detect application-specific 16 /// issues within their own code. 17 /// 18 /// The analysis is based on automatic propagation of data flow labels (also 19 /// known as taint labels) through a program as it performs computation. Each 20 /// byte of application memory is backed by two bytes of shadow memory which 21 /// hold the label. On Linux/x86_64, memory is laid out as follows: 22 /// 23 /// +--------------------+ 0x800000000000 (top of memory) 24 /// | application memory | 25 /// +--------------------+ 0x700000008000 (kAppAddr) 26 /// | | 27 /// | unused | 28 /// | | 29 /// +--------------------+ 0x200200000000 (kUnusedAddr) 30 /// | union table | 31 /// +--------------------+ 0x200000000000 (kUnionTableAddr) 32 /// | shadow memory | 33 /// +--------------------+ 0x000000010000 (kShadowAddr) 34 /// | reserved by kernel | 35 /// +--------------------+ 0x000000000000 36 /// 37 /// To derive a shadow memory address from an application memory address, 38 /// bits 44-46 are cleared to bring the address into the range 39 /// [0x000000008000,0x100000000000). Then the address is shifted left by 1 to 40 /// account for the double byte representation of shadow labels and move the 41 /// address into the shadow memory range. See the function 42 /// DataFlowSanitizer::getShadowAddress below. 43 /// 44 /// For more information, please refer to the design document: 45 /// http://clang.llvm.org/docs/DataFlowSanitizerDesign.html 46 // 47 //===----------------------------------------------------------------------===// 48 49 #include "llvm/Transforms/Instrumentation/DataFlowSanitizer.h" 50 #include "llvm/ADT/DenseMap.h" 51 #include "llvm/ADT/DenseSet.h" 52 #include "llvm/ADT/DepthFirstIterator.h" 53 #include "llvm/ADT/None.h" 54 #include "llvm/ADT/SmallPtrSet.h" 55 #include "llvm/ADT/SmallVector.h" 56 #include "llvm/ADT/StringExtras.h" 57 #include "llvm/ADT/StringRef.h" 58 #include "llvm/ADT/Triple.h" 59 #include "llvm/Analysis/ValueTracking.h" 60 #include "llvm/IR/Argument.h" 61 #include "llvm/IR/Attributes.h" 62 #include "llvm/IR/BasicBlock.h" 63 #include "llvm/IR/Constant.h" 64 #include "llvm/IR/Constants.h" 65 #include "llvm/IR/DataLayout.h" 66 #include "llvm/IR/DerivedTypes.h" 67 #include "llvm/IR/Dominators.h" 68 #include "llvm/IR/Function.h" 69 #include "llvm/IR/GlobalAlias.h" 70 #include "llvm/IR/GlobalValue.h" 71 #include "llvm/IR/GlobalVariable.h" 72 #include "llvm/IR/IRBuilder.h" 73 #include "llvm/IR/InlineAsm.h" 74 #include "llvm/IR/InstVisitor.h" 75 #include "llvm/IR/InstrTypes.h" 76 #include "llvm/IR/Instruction.h" 77 #include "llvm/IR/Instructions.h" 78 #include "llvm/IR/IntrinsicInst.h" 79 #include "llvm/IR/LLVMContext.h" 80 #include "llvm/IR/MDBuilder.h" 81 #include "llvm/IR/Module.h" 82 #include "llvm/IR/PassManager.h" 83 #include "llvm/IR/Type.h" 84 #include "llvm/IR/User.h" 85 #include "llvm/IR/Value.h" 86 #include "llvm/InitializePasses.h" 87 #include "llvm/Pass.h" 88 #include "llvm/Support/Casting.h" 89 #include "llvm/Support/CommandLine.h" 90 #include "llvm/Support/ErrorHandling.h" 91 #include "llvm/Support/SpecialCaseList.h" 92 #include "llvm/Support/VirtualFileSystem.h" 93 #include "llvm/Transforms/Instrumentation.h" 94 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 95 #include "llvm/Transforms/Utils/Local.h" 96 #include <algorithm> 97 #include <cassert> 98 #include <cstddef> 99 #include <cstdint> 100 #include <iterator> 101 #include <memory> 102 #include <set> 103 #include <string> 104 #include <utility> 105 #include <vector> 106 107 using namespace llvm; 108 109 // External symbol to be used when generating the shadow address for 110 // architectures with multiple VMAs. Instead of using a constant integer 111 // the runtime will set the external mask based on the VMA range. 112 static const char *const kDFSanExternShadowPtrMask = "__dfsan_shadow_ptr_mask"; 113 114 // The -dfsan-preserve-alignment flag controls whether this pass assumes that 115 // alignment requirements provided by the input IR are correct. For example, 116 // if the input IR contains a load with alignment 8, this flag will cause 117 // the shadow load to have alignment 16. This flag is disabled by default as 118 // we have unfortunately encountered too much code (including Clang itself; 119 // see PR14291) which performs misaligned access. 120 static cl::opt<bool> ClPreserveAlignment( 121 "dfsan-preserve-alignment", 122 cl::desc("respect alignment requirements provided by input IR"), cl::Hidden, 123 cl::init(false)); 124 125 // The ABI list files control how shadow parameters are passed. The pass treats 126 // every function labelled "uninstrumented" in the ABI list file as conforming 127 // to the "native" (i.e. unsanitized) ABI. Unless the ABI list contains 128 // additional annotations for those functions, a call to one of those functions 129 // will produce a warning message, as the labelling behaviour of the function is 130 // unknown. The other supported annotations are "functional" and "discard", 131 // which are described below under DataFlowSanitizer::WrapperKind. 132 static cl::list<std::string> ClABIListFiles( 133 "dfsan-abilist", 134 cl::desc("File listing native ABI functions and how the pass treats them"), 135 cl::Hidden); 136 137 // Controls whether the pass uses IA_Args or IA_TLS as the ABI for instrumented 138 // functions (see DataFlowSanitizer::InstrumentedABI below). 139 static cl::opt<bool> ClArgsABI( 140 "dfsan-args-abi", 141 cl::desc("Use the argument ABI rather than the TLS ABI"), 142 cl::Hidden); 143 144 // Controls whether the pass includes or ignores the labels of pointers in load 145 // instructions. 146 static cl::opt<bool> ClCombinePointerLabelsOnLoad( 147 "dfsan-combine-pointer-labels-on-load", 148 cl::desc("Combine the label of the pointer with the label of the data when " 149 "loading from memory."), 150 cl::Hidden, cl::init(true)); 151 152 // Controls whether the pass includes or ignores the labels of pointers in 153 // stores instructions. 154 static cl::opt<bool> ClCombinePointerLabelsOnStore( 155 "dfsan-combine-pointer-labels-on-store", 156 cl::desc("Combine the label of the pointer with the label of the data when " 157 "storing in memory."), 158 cl::Hidden, cl::init(false)); 159 160 static cl::opt<bool> ClDebugNonzeroLabels( 161 "dfsan-debug-nonzero-labels", 162 cl::desc("Insert calls to __dfsan_nonzero_label on observing a parameter, " 163 "load or return with a nonzero label"), 164 cl::Hidden); 165 166 // Experimental feature that inserts callbacks for certain data events. 167 // Currently callbacks are only inserted for loads, stores, memory transfers 168 // (i.e. memcpy and memmove), and comparisons. 169 // 170 // If this flag is set to true, the user must provide definitions for the 171 // following callback functions: 172 // void __dfsan_load_callback(dfsan_label Label, void* addr); 173 // void __dfsan_store_callback(dfsan_label Label, void* addr); 174 // void __dfsan_mem_transfer_callback(dfsan_label *Start, size_t Len); 175 // void __dfsan_cmp_callback(dfsan_label CombinedLabel); 176 static cl::opt<bool> ClEventCallbacks( 177 "dfsan-event-callbacks", 178 cl::desc("Insert calls to __dfsan_*_callback functions on data events."), 179 cl::Hidden, cl::init(false)); 180 181 // Use a distinct bit for each base label, enabling faster unions with less 182 // instrumentation. Limits the max number of base labels to 16. 183 static cl::opt<bool> ClFast16Labels( 184 "dfsan-fast-16-labels", 185 cl::desc("Use more efficient instrumentation, limiting the number of " 186 "labels to 16."), 187 cl::Hidden, cl::init(false)); 188 189 // Controls whether the pass tracks the control flow of select instructions. 190 static cl::opt<bool> ClTrackSelectControlFlow( 191 "dfsan-track-select-control-flow", 192 cl::desc("Propagate labels from condition values of select instructions " 193 "to results."), 194 cl::Hidden, cl::init(true)); 195 196 static StringRef GetGlobalTypeString(const GlobalValue &G) { 197 // Types of GlobalVariables are always pointer types. 198 Type *GType = G.getValueType(); 199 // For now we support excluding struct types only. 200 if (StructType *SGType = dyn_cast<StructType>(GType)) { 201 if (!SGType->isLiteral()) 202 return SGType->getName(); 203 } 204 return "<unknown type>"; 205 } 206 207 namespace { 208 209 class DFSanABIList { 210 std::unique_ptr<SpecialCaseList> SCL; 211 212 public: 213 DFSanABIList() = default; 214 215 void set(std::unique_ptr<SpecialCaseList> List) { SCL = std::move(List); } 216 217 /// Returns whether either this function or its source file are listed in the 218 /// given category. 219 bool isIn(const Function &F, StringRef Category) const { 220 return isIn(*F.getParent(), Category) || 221 SCL->inSection("dataflow", "fun", F.getName(), Category); 222 } 223 224 /// Returns whether this global alias is listed in the given category. 225 /// 226 /// If GA aliases a function, the alias's name is matched as a function name 227 /// would be. Similarly, aliases of globals are matched like globals. 228 bool isIn(const GlobalAlias &GA, StringRef Category) const { 229 if (isIn(*GA.getParent(), Category)) 230 return true; 231 232 if (isa<FunctionType>(GA.getValueType())) 233 return SCL->inSection("dataflow", "fun", GA.getName(), Category); 234 235 return SCL->inSection("dataflow", "global", GA.getName(), Category) || 236 SCL->inSection("dataflow", "type", GetGlobalTypeString(GA), 237 Category); 238 } 239 240 /// Returns whether this module is listed in the given category. 241 bool isIn(const Module &M, StringRef Category) const { 242 return SCL->inSection("dataflow", "src", M.getModuleIdentifier(), Category); 243 } 244 }; 245 246 /// TransformedFunction is used to express the result of transforming one 247 /// function type into another. This struct is immutable. It holds metadata 248 /// useful for updating calls of the old function to the new type. 249 struct TransformedFunction { 250 TransformedFunction(FunctionType* OriginalType, 251 FunctionType* TransformedType, 252 std::vector<unsigned> ArgumentIndexMapping) 253 : OriginalType(OriginalType), 254 TransformedType(TransformedType), 255 ArgumentIndexMapping(ArgumentIndexMapping) {} 256 257 // Disallow copies. 258 TransformedFunction(const TransformedFunction&) = delete; 259 TransformedFunction& operator=(const TransformedFunction&) = delete; 260 261 // Allow moves. 262 TransformedFunction(TransformedFunction&&) = default; 263 TransformedFunction& operator=(TransformedFunction&&) = default; 264 265 /// Type of the function before the transformation. 266 FunctionType *OriginalType; 267 268 /// Type of the function after the transformation. 269 FunctionType *TransformedType; 270 271 /// Transforming a function may change the position of arguments. This 272 /// member records the mapping from each argument's old position to its new 273 /// position. Argument positions are zero-indexed. If the transformation 274 /// from F to F' made the first argument of F into the third argument of F', 275 /// then ArgumentIndexMapping[0] will equal 2. 276 std::vector<unsigned> ArgumentIndexMapping; 277 }; 278 279 /// Given function attributes from a call site for the original function, 280 /// return function attributes appropriate for a call to the transformed 281 /// function. 282 AttributeList TransformFunctionAttributes( 283 const TransformedFunction& TransformedFunction, 284 LLVMContext& Ctx, AttributeList CallSiteAttrs) { 285 286 // Construct a vector of AttributeSet for each function argument. 287 std::vector<llvm::AttributeSet> ArgumentAttributes( 288 TransformedFunction.TransformedType->getNumParams()); 289 290 // Copy attributes from the parameter of the original function to the 291 // transformed version. 'ArgumentIndexMapping' holds the mapping from 292 // old argument position to new. 293 for (unsigned i=0, ie = TransformedFunction.ArgumentIndexMapping.size(); 294 i < ie; ++i) { 295 unsigned TransformedIndex = TransformedFunction.ArgumentIndexMapping[i]; 296 ArgumentAttributes[TransformedIndex] = CallSiteAttrs.getParamAttributes(i); 297 } 298 299 // Copy annotations on varargs arguments. 300 for (unsigned i = TransformedFunction.OriginalType->getNumParams(), 301 ie = CallSiteAttrs.getNumAttrSets(); i<ie; ++i) { 302 ArgumentAttributes.push_back(CallSiteAttrs.getParamAttributes(i)); 303 } 304 305 return AttributeList::get( 306 Ctx, 307 CallSiteAttrs.getFnAttributes(), 308 CallSiteAttrs.getRetAttributes(), 309 llvm::makeArrayRef(ArgumentAttributes)); 310 } 311 312 class DataFlowSanitizer { 313 friend struct DFSanFunction; 314 friend class DFSanVisitor; 315 316 enum { ShadowWidthBits = 16, ShadowWidthBytes = ShadowWidthBits / 8 }; 317 318 /// Which ABI should be used for instrumented functions? 319 enum InstrumentedABI { 320 /// Argument and return value labels are passed through additional 321 /// arguments and by modifying the return type. 322 IA_Args, 323 324 /// Argument and return value labels are passed through TLS variables 325 /// __dfsan_arg_tls and __dfsan_retval_tls. 326 IA_TLS 327 }; 328 329 /// How should calls to uninstrumented functions be handled? 330 enum WrapperKind { 331 /// This function is present in an uninstrumented form but we don't know 332 /// how it should be handled. Print a warning and call the function anyway. 333 /// Don't label the return value. 334 WK_Warning, 335 336 /// This function does not write to (user-accessible) memory, and its return 337 /// value is unlabelled. 338 WK_Discard, 339 340 /// This function does not write to (user-accessible) memory, and the label 341 /// of its return value is the union of the label of its arguments. 342 WK_Functional, 343 344 /// Instead of calling the function, a custom wrapper __dfsw_F is called, 345 /// where F is the name of the function. This function may wrap the 346 /// original function or provide its own implementation. This is similar to 347 /// the IA_Args ABI, except that IA_Args uses a struct return type to 348 /// pass the return value shadow in a register, while WK_Custom uses an 349 /// extra pointer argument to return the shadow. This allows the wrapped 350 /// form of the function type to be expressed in C. 351 WK_Custom 352 }; 353 354 Module *Mod; 355 LLVMContext *Ctx; 356 Type *Int8Ptr; 357 IntegerType *ShadowTy; 358 PointerType *ShadowPtrTy; 359 IntegerType *IntptrTy; 360 ConstantInt *ZeroShadow; 361 ConstantInt *ShadowPtrMask; 362 ConstantInt *ShadowPtrMul; 363 Constant *ArgTLS; 364 Constant *RetvalTLS; 365 FunctionType *GetArgTLSTy; 366 FunctionType *GetRetvalTLSTy; 367 Constant *GetArgTLS; 368 Constant *GetRetvalTLS; 369 Constant *ExternalShadowMask; 370 FunctionType *DFSanUnionFnTy; 371 FunctionType *DFSanUnionLoadFnTy; 372 FunctionType *DFSanUnimplementedFnTy; 373 FunctionType *DFSanSetLabelFnTy; 374 FunctionType *DFSanNonzeroLabelFnTy; 375 FunctionType *DFSanVarargWrapperFnTy; 376 FunctionType *DFSanCmpCallbackFnTy; 377 FunctionType *DFSanLoadStoreCallbackFnTy; 378 FunctionType *DFSanMemTransferCallbackFnTy; 379 FunctionCallee DFSanUnionFn; 380 FunctionCallee DFSanCheckedUnionFn; 381 FunctionCallee DFSanUnionLoadFn; 382 FunctionCallee DFSanUnionLoadFast16LabelsFn; 383 FunctionCallee DFSanUnimplementedFn; 384 FunctionCallee DFSanSetLabelFn; 385 FunctionCallee DFSanNonzeroLabelFn; 386 FunctionCallee DFSanVarargWrapperFn; 387 FunctionCallee DFSanLoadCallbackFn; 388 FunctionCallee DFSanStoreCallbackFn; 389 FunctionCallee DFSanMemTransferCallbackFn; 390 FunctionCallee DFSanCmpCallbackFn; 391 MDNode *ColdCallWeights; 392 DFSanABIList ABIList; 393 DenseMap<Value *, Function *> UnwrappedFnMap; 394 AttrBuilder ReadOnlyNoneAttrs; 395 bool DFSanRuntimeShadowMask = false; 396 397 Value *getShadowAddress(Value *Addr, Instruction *Pos); 398 bool isInstrumented(const Function *F); 399 bool isInstrumented(const GlobalAlias *GA); 400 FunctionType *getArgsFunctionType(FunctionType *T); 401 FunctionType *getTrampolineFunctionType(FunctionType *T); 402 TransformedFunction getCustomFunctionType(FunctionType *T); 403 InstrumentedABI getInstrumentedABI(); 404 WrapperKind getWrapperKind(Function *F); 405 void addGlobalNamePrefix(GlobalValue *GV); 406 Function *buildWrapperFunction(Function *F, StringRef NewFName, 407 GlobalValue::LinkageTypes NewFLink, 408 FunctionType *NewFT); 409 Constant *getOrBuildTrampolineFunction(FunctionType *FT, StringRef FName); 410 void initializeCallbackFunctions(Module &M); 411 void initializeRuntimeFunctions(Module &M); 412 413 bool init(Module &M); 414 415 public: 416 DataFlowSanitizer(const std::vector<std::string> &ABIListFiles); 417 418 bool runImpl(Module &M); 419 }; 420 421 struct DFSanFunction { 422 DataFlowSanitizer &DFS; 423 Function *F; 424 DominatorTree DT; 425 DataFlowSanitizer::InstrumentedABI IA; 426 bool IsNativeABI; 427 Value *ArgTLSPtr = nullptr; 428 Value *RetvalTLSPtr = nullptr; 429 AllocaInst *LabelReturnAlloca = nullptr; 430 DenseMap<Value *, Value *> ValShadowMap; 431 DenseMap<AllocaInst *, AllocaInst *> AllocaShadowMap; 432 std::vector<std::pair<PHINode *, PHINode *>> PHIFixups; 433 DenseSet<Instruction *> SkipInsts; 434 std::vector<Value *> NonZeroChecks; 435 bool AvoidNewBlocks; 436 437 struct CachedCombinedShadow { 438 BasicBlock *Block; 439 Value *Shadow; 440 }; 441 DenseMap<std::pair<Value *, Value *>, CachedCombinedShadow> 442 CachedCombinedShadows; 443 DenseMap<Value *, std::set<Value *>> ShadowElements; 444 445 DFSanFunction(DataFlowSanitizer &DFS, Function *F, bool IsNativeABI) 446 : DFS(DFS), F(F), IA(DFS.getInstrumentedABI()), IsNativeABI(IsNativeABI) { 447 DT.recalculate(*F); 448 // FIXME: Need to track down the register allocator issue which causes poor 449 // performance in pathological cases with large numbers of basic blocks. 450 AvoidNewBlocks = F->size() > 1000; 451 } 452 453 Value *getArgTLSPtr(); 454 Value *getArgTLS(unsigned Index, Instruction *Pos); 455 Value *getRetvalTLS(); 456 Value *getShadow(Value *V); 457 void setShadow(Instruction *I, Value *Shadow); 458 Value *combineShadows(Value *V1, Value *V2, Instruction *Pos); 459 Value *combineOperandShadows(Instruction *Inst); 460 Value *loadShadow(Value *ShadowAddr, uint64_t Size, uint64_t Align, 461 Instruction *Pos); 462 void storeShadow(Value *Addr, uint64_t Size, Align Alignment, Value *Shadow, 463 Instruction *Pos); 464 }; 465 466 class DFSanVisitor : public InstVisitor<DFSanVisitor> { 467 public: 468 DFSanFunction &DFSF; 469 470 DFSanVisitor(DFSanFunction &DFSF) : DFSF(DFSF) {} 471 472 const DataLayout &getDataLayout() const { 473 return DFSF.F->getParent()->getDataLayout(); 474 } 475 476 // Combines shadow values for all of I's operands. Returns the combined shadow 477 // value. 478 Value *visitOperandShadowInst(Instruction &I); 479 480 void visitUnaryOperator(UnaryOperator &UO); 481 void visitBinaryOperator(BinaryOperator &BO); 482 void visitCastInst(CastInst &CI); 483 void visitCmpInst(CmpInst &CI); 484 void visitGetElementPtrInst(GetElementPtrInst &GEPI); 485 void visitLoadInst(LoadInst &LI); 486 void visitStoreInst(StoreInst &SI); 487 void visitReturnInst(ReturnInst &RI); 488 void visitCallBase(CallBase &CB); 489 void visitPHINode(PHINode &PN); 490 void visitExtractElementInst(ExtractElementInst &I); 491 void visitInsertElementInst(InsertElementInst &I); 492 void visitShuffleVectorInst(ShuffleVectorInst &I); 493 void visitExtractValueInst(ExtractValueInst &I); 494 void visitInsertValueInst(InsertValueInst &I); 495 void visitAllocaInst(AllocaInst &I); 496 void visitSelectInst(SelectInst &I); 497 void visitMemSetInst(MemSetInst &I); 498 void visitMemTransferInst(MemTransferInst &I); 499 }; 500 501 } // end anonymous namespace 502 503 DataFlowSanitizer::DataFlowSanitizer( 504 const std::vector<std::string> &ABIListFiles) { 505 std::vector<std::string> AllABIListFiles(std::move(ABIListFiles)); 506 AllABIListFiles.insert(AllABIListFiles.end(), ClABIListFiles.begin(), 507 ClABIListFiles.end()); 508 // FIXME: should we propagate vfs::FileSystem to this constructor? 509 ABIList.set( 510 SpecialCaseList::createOrDie(AllABIListFiles, *vfs::getRealFileSystem())); 511 } 512 513 FunctionType *DataFlowSanitizer::getArgsFunctionType(FunctionType *T) { 514 SmallVector<Type *, 4> ArgTypes(T->param_begin(), T->param_end()); 515 ArgTypes.append(T->getNumParams(), ShadowTy); 516 if (T->isVarArg()) 517 ArgTypes.push_back(ShadowPtrTy); 518 Type *RetType = T->getReturnType(); 519 if (!RetType->isVoidTy()) 520 RetType = StructType::get(RetType, ShadowTy); 521 return FunctionType::get(RetType, ArgTypes, T->isVarArg()); 522 } 523 524 FunctionType *DataFlowSanitizer::getTrampolineFunctionType(FunctionType *T) { 525 assert(!T->isVarArg()); 526 SmallVector<Type *, 4> ArgTypes; 527 ArgTypes.push_back(T->getPointerTo()); 528 ArgTypes.append(T->param_begin(), T->param_end()); 529 ArgTypes.append(T->getNumParams(), ShadowTy); 530 Type *RetType = T->getReturnType(); 531 if (!RetType->isVoidTy()) 532 ArgTypes.push_back(ShadowPtrTy); 533 return FunctionType::get(T->getReturnType(), ArgTypes, false); 534 } 535 536 TransformedFunction DataFlowSanitizer::getCustomFunctionType(FunctionType *T) { 537 SmallVector<Type *, 4> ArgTypes; 538 539 // Some parameters of the custom function being constructed are 540 // parameters of T. Record the mapping from parameters of T to 541 // parameters of the custom function, so that parameter attributes 542 // at call sites can be updated. 543 std::vector<unsigned> ArgumentIndexMapping; 544 for (unsigned i = 0, ie = T->getNumParams(); i != ie; ++i) { 545 Type* param_type = T->getParamType(i); 546 FunctionType *FT; 547 if (isa<PointerType>(param_type) && (FT = dyn_cast<FunctionType>( 548 cast<PointerType>(param_type)->getElementType()))) { 549 ArgumentIndexMapping.push_back(ArgTypes.size()); 550 ArgTypes.push_back(getTrampolineFunctionType(FT)->getPointerTo()); 551 ArgTypes.push_back(Type::getInt8PtrTy(*Ctx)); 552 } else { 553 ArgumentIndexMapping.push_back(ArgTypes.size()); 554 ArgTypes.push_back(param_type); 555 } 556 } 557 for (unsigned i = 0, e = T->getNumParams(); i != e; ++i) 558 ArgTypes.push_back(ShadowTy); 559 if (T->isVarArg()) 560 ArgTypes.push_back(ShadowPtrTy); 561 Type *RetType = T->getReturnType(); 562 if (!RetType->isVoidTy()) 563 ArgTypes.push_back(ShadowPtrTy); 564 return TransformedFunction( 565 T, FunctionType::get(T->getReturnType(), ArgTypes, T->isVarArg()), 566 ArgumentIndexMapping); 567 } 568 569 bool DataFlowSanitizer::init(Module &M) { 570 Triple TargetTriple(M.getTargetTriple()); 571 bool IsX86_64 = TargetTriple.getArch() == Triple::x86_64; 572 bool IsMIPS64 = TargetTriple.isMIPS64(); 573 bool IsAArch64 = TargetTriple.getArch() == Triple::aarch64 || 574 TargetTriple.getArch() == Triple::aarch64_be; 575 576 const DataLayout &DL = M.getDataLayout(); 577 578 Mod = &M; 579 Ctx = &M.getContext(); 580 Int8Ptr = Type::getInt8PtrTy(*Ctx); 581 ShadowTy = IntegerType::get(*Ctx, ShadowWidthBits); 582 ShadowPtrTy = PointerType::getUnqual(ShadowTy); 583 IntptrTy = DL.getIntPtrType(*Ctx); 584 ZeroShadow = ConstantInt::getSigned(ShadowTy, 0); 585 ShadowPtrMul = ConstantInt::getSigned(IntptrTy, ShadowWidthBytes); 586 if (IsX86_64) 587 ShadowPtrMask = ConstantInt::getSigned(IntptrTy, ~0x700000000000LL); 588 else if (IsMIPS64) 589 ShadowPtrMask = ConstantInt::getSigned(IntptrTy, ~0xF000000000LL); 590 // AArch64 supports multiple VMAs and the shadow mask is set at runtime. 591 else if (IsAArch64) 592 DFSanRuntimeShadowMask = true; 593 else 594 report_fatal_error("unsupported triple"); 595 596 Type *DFSanUnionArgs[2] = { ShadowTy, ShadowTy }; 597 DFSanUnionFnTy = 598 FunctionType::get(ShadowTy, DFSanUnionArgs, /*isVarArg=*/ false); 599 Type *DFSanUnionLoadArgs[2] = { ShadowPtrTy, IntptrTy }; 600 DFSanUnionLoadFnTy = 601 FunctionType::get(ShadowTy, DFSanUnionLoadArgs, /*isVarArg=*/ false); 602 DFSanUnimplementedFnTy = FunctionType::get( 603 Type::getVoidTy(*Ctx), Type::getInt8PtrTy(*Ctx), /*isVarArg=*/false); 604 Type *DFSanSetLabelArgs[3] = { ShadowTy, Type::getInt8PtrTy(*Ctx), IntptrTy }; 605 DFSanSetLabelFnTy = FunctionType::get(Type::getVoidTy(*Ctx), 606 DFSanSetLabelArgs, /*isVarArg=*/false); 607 DFSanNonzeroLabelFnTy = FunctionType::get( 608 Type::getVoidTy(*Ctx), None, /*isVarArg=*/false); 609 DFSanVarargWrapperFnTy = FunctionType::get( 610 Type::getVoidTy(*Ctx), Type::getInt8PtrTy(*Ctx), /*isVarArg=*/false); 611 DFSanCmpCallbackFnTy = FunctionType::get(Type::getVoidTy(*Ctx), ShadowTy, 612 /*isVarArg=*/false); 613 Type *DFSanLoadStoreCallbackArgs[2] = {ShadowTy, Int8Ptr}; 614 DFSanLoadStoreCallbackFnTy = 615 FunctionType::get(Type::getVoidTy(*Ctx), DFSanLoadStoreCallbackArgs, 616 /*isVarArg=*/false); 617 Type *DFSanMemTransferCallbackArgs[2] = {ShadowPtrTy, IntptrTy}; 618 DFSanMemTransferCallbackFnTy = 619 FunctionType::get(Type::getVoidTy(*Ctx), DFSanMemTransferCallbackArgs, 620 /*isVarArg=*/false); 621 622 ColdCallWeights = MDBuilder(*Ctx).createBranchWeights(1, 1000); 623 return true; 624 } 625 626 bool DataFlowSanitizer::isInstrumented(const Function *F) { 627 return !ABIList.isIn(*F, "uninstrumented"); 628 } 629 630 bool DataFlowSanitizer::isInstrumented(const GlobalAlias *GA) { 631 return !ABIList.isIn(*GA, "uninstrumented"); 632 } 633 634 DataFlowSanitizer::InstrumentedABI DataFlowSanitizer::getInstrumentedABI() { 635 return ClArgsABI ? IA_Args : IA_TLS; 636 } 637 638 DataFlowSanitizer::WrapperKind DataFlowSanitizer::getWrapperKind(Function *F) { 639 if (ABIList.isIn(*F, "functional")) 640 return WK_Functional; 641 if (ABIList.isIn(*F, "discard")) 642 return WK_Discard; 643 if (ABIList.isIn(*F, "custom")) 644 return WK_Custom; 645 646 return WK_Warning; 647 } 648 649 void DataFlowSanitizer::addGlobalNamePrefix(GlobalValue *GV) { 650 std::string GVName = std::string(GV->getName()), Prefix = "dfs$"; 651 GV->setName(Prefix + GVName); 652 653 // Try to change the name of the function in module inline asm. We only do 654 // this for specific asm directives, currently only ".symver", to try to avoid 655 // corrupting asm which happens to contain the symbol name as a substring. 656 // Note that the substitution for .symver assumes that the versioned symbol 657 // also has an instrumented name. 658 std::string Asm = GV->getParent()->getModuleInlineAsm(); 659 std::string SearchStr = ".symver " + GVName + ","; 660 size_t Pos = Asm.find(SearchStr); 661 if (Pos != std::string::npos) { 662 Asm.replace(Pos, SearchStr.size(), 663 ".symver " + Prefix + GVName + "," + Prefix); 664 GV->getParent()->setModuleInlineAsm(Asm); 665 } 666 } 667 668 Function * 669 DataFlowSanitizer::buildWrapperFunction(Function *F, StringRef NewFName, 670 GlobalValue::LinkageTypes NewFLink, 671 FunctionType *NewFT) { 672 FunctionType *FT = F->getFunctionType(); 673 Function *NewF = Function::Create(NewFT, NewFLink, F->getAddressSpace(), 674 NewFName, F->getParent()); 675 NewF->copyAttributesFrom(F); 676 NewF->removeAttributes( 677 AttributeList::ReturnIndex, 678 AttributeFuncs::typeIncompatible(NewFT->getReturnType())); 679 680 BasicBlock *BB = BasicBlock::Create(*Ctx, "entry", NewF); 681 if (F->isVarArg()) { 682 NewF->removeAttributes(AttributeList::FunctionIndex, 683 AttrBuilder().addAttribute("split-stack")); 684 CallInst::Create(DFSanVarargWrapperFn, 685 IRBuilder<>(BB).CreateGlobalStringPtr(F->getName()), "", 686 BB); 687 new UnreachableInst(*Ctx, BB); 688 } else { 689 std::vector<Value *> Args; 690 unsigned n = FT->getNumParams(); 691 for (Function::arg_iterator ai = NewF->arg_begin(); n != 0; ++ai, --n) 692 Args.push_back(&*ai); 693 CallInst *CI = CallInst::Create(F, Args, "", BB); 694 if (FT->getReturnType()->isVoidTy()) 695 ReturnInst::Create(*Ctx, BB); 696 else 697 ReturnInst::Create(*Ctx, CI, BB); 698 } 699 700 return NewF; 701 } 702 703 Constant *DataFlowSanitizer::getOrBuildTrampolineFunction(FunctionType *FT, 704 StringRef FName) { 705 FunctionType *FTT = getTrampolineFunctionType(FT); 706 FunctionCallee C = Mod->getOrInsertFunction(FName, FTT); 707 Function *F = dyn_cast<Function>(C.getCallee()); 708 if (F && F->isDeclaration()) { 709 F->setLinkage(GlobalValue::LinkOnceODRLinkage); 710 BasicBlock *BB = BasicBlock::Create(*Ctx, "entry", F); 711 std::vector<Value *> Args; 712 Function::arg_iterator AI = F->arg_begin(); ++AI; 713 for (unsigned N = FT->getNumParams(); N != 0; ++AI, --N) 714 Args.push_back(&*AI); 715 CallInst *CI = CallInst::Create(FT, &*F->arg_begin(), Args, "", BB); 716 ReturnInst *RI; 717 if (FT->getReturnType()->isVoidTy()) 718 RI = ReturnInst::Create(*Ctx, BB); 719 else 720 RI = ReturnInst::Create(*Ctx, CI, BB); 721 722 DFSanFunction DFSF(*this, F, /*IsNativeABI=*/true); 723 Function::arg_iterator ValAI = F->arg_begin(), ShadowAI = AI; ++ValAI; 724 for (unsigned N = FT->getNumParams(); N != 0; ++ValAI, ++ShadowAI, --N) 725 DFSF.ValShadowMap[&*ValAI] = &*ShadowAI; 726 DFSanVisitor(DFSF).visitCallInst(*CI); 727 if (!FT->getReturnType()->isVoidTy()) 728 new StoreInst(DFSF.getShadow(RI->getReturnValue()), 729 &*std::prev(F->arg_end()), RI); 730 } 731 732 return cast<Constant>(C.getCallee()); 733 } 734 735 // Initialize DataFlowSanitizer runtime functions and declare them in the module 736 void DataFlowSanitizer::initializeRuntimeFunctions(Module &M) { 737 { 738 AttributeList AL; 739 AL = AL.addAttribute(M.getContext(), AttributeList::FunctionIndex, 740 Attribute::NoUnwind); 741 AL = AL.addAttribute(M.getContext(), AttributeList::FunctionIndex, 742 Attribute::ReadNone); 743 AL = AL.addAttribute(M.getContext(), AttributeList::ReturnIndex, 744 Attribute::ZExt); 745 AL = AL.addParamAttribute(M.getContext(), 0, Attribute::ZExt); 746 AL = AL.addParamAttribute(M.getContext(), 1, Attribute::ZExt); 747 DFSanUnionFn = 748 Mod->getOrInsertFunction("__dfsan_union", DFSanUnionFnTy, AL); 749 } 750 { 751 AttributeList AL; 752 AL = AL.addAttribute(M.getContext(), AttributeList::FunctionIndex, 753 Attribute::NoUnwind); 754 AL = AL.addAttribute(M.getContext(), AttributeList::FunctionIndex, 755 Attribute::ReadNone); 756 AL = AL.addAttribute(M.getContext(), AttributeList::ReturnIndex, 757 Attribute::ZExt); 758 AL = AL.addParamAttribute(M.getContext(), 0, Attribute::ZExt); 759 AL = AL.addParamAttribute(M.getContext(), 1, Attribute::ZExt); 760 DFSanCheckedUnionFn = 761 Mod->getOrInsertFunction("dfsan_union", DFSanUnionFnTy, AL); 762 } 763 { 764 AttributeList AL; 765 AL = AL.addAttribute(M.getContext(), AttributeList::FunctionIndex, 766 Attribute::NoUnwind); 767 AL = AL.addAttribute(M.getContext(), AttributeList::FunctionIndex, 768 Attribute::ReadOnly); 769 AL = AL.addAttribute(M.getContext(), AttributeList::ReturnIndex, 770 Attribute::ZExt); 771 DFSanUnionLoadFn = 772 Mod->getOrInsertFunction("__dfsan_union_load", DFSanUnionLoadFnTy, AL); 773 } 774 { 775 AttributeList AL; 776 AL = AL.addAttribute(M.getContext(), AttributeList::FunctionIndex, 777 Attribute::NoUnwind); 778 AL = AL.addAttribute(M.getContext(), AttributeList::FunctionIndex, 779 Attribute::ReadOnly); 780 AL = AL.addAttribute(M.getContext(), AttributeList::ReturnIndex, 781 Attribute::ZExt); 782 DFSanUnionLoadFast16LabelsFn = Mod->getOrInsertFunction( 783 "__dfsan_union_load_fast16labels", DFSanUnionLoadFnTy, AL); 784 } 785 DFSanUnimplementedFn = 786 Mod->getOrInsertFunction("__dfsan_unimplemented", DFSanUnimplementedFnTy); 787 { 788 AttributeList AL; 789 AL = AL.addParamAttribute(M.getContext(), 0, Attribute::ZExt); 790 DFSanSetLabelFn = 791 Mod->getOrInsertFunction("__dfsan_set_label", DFSanSetLabelFnTy, AL); 792 } 793 DFSanNonzeroLabelFn = 794 Mod->getOrInsertFunction("__dfsan_nonzero_label", DFSanNonzeroLabelFnTy); 795 DFSanVarargWrapperFn = Mod->getOrInsertFunction("__dfsan_vararg_wrapper", 796 DFSanVarargWrapperFnTy); 797 } 798 799 // Initializes event callback functions and declare them in the module 800 void DataFlowSanitizer::initializeCallbackFunctions(Module &M) { 801 DFSanLoadCallbackFn = Mod->getOrInsertFunction("__dfsan_load_callback", 802 DFSanLoadStoreCallbackFnTy); 803 DFSanStoreCallbackFn = Mod->getOrInsertFunction("__dfsan_store_callback", 804 DFSanLoadStoreCallbackFnTy); 805 DFSanMemTransferCallbackFn = Mod->getOrInsertFunction( 806 "__dfsan_mem_transfer_callback", DFSanMemTransferCallbackFnTy); 807 DFSanCmpCallbackFn = 808 Mod->getOrInsertFunction("__dfsan_cmp_callback", DFSanCmpCallbackFnTy); 809 } 810 811 bool DataFlowSanitizer::runImpl(Module &M) { 812 init(M); 813 814 if (ABIList.isIn(M, "skip")) 815 return false; 816 817 const unsigned InitialGlobalSize = M.global_size(); 818 const unsigned InitialModuleSize = M.size(); 819 820 bool Changed = false; 821 822 Type *ArgTLSTy = ArrayType::get(ShadowTy, 64); 823 ArgTLS = Mod->getOrInsertGlobal("__dfsan_arg_tls", ArgTLSTy); 824 if (GlobalVariable *G = dyn_cast<GlobalVariable>(ArgTLS)) { 825 Changed |= G->getThreadLocalMode() != GlobalVariable::InitialExecTLSModel; 826 G->setThreadLocalMode(GlobalVariable::InitialExecTLSModel); 827 } 828 RetvalTLS = Mod->getOrInsertGlobal("__dfsan_retval_tls", ShadowTy); 829 if (GlobalVariable *G = dyn_cast<GlobalVariable>(RetvalTLS)) { 830 Changed |= G->getThreadLocalMode() != GlobalVariable::InitialExecTLSModel; 831 G->setThreadLocalMode(GlobalVariable::InitialExecTLSModel); 832 } 833 834 ExternalShadowMask = 835 Mod->getOrInsertGlobal(kDFSanExternShadowPtrMask, IntptrTy); 836 837 initializeCallbackFunctions(M); 838 initializeRuntimeFunctions(M); 839 840 std::vector<Function *> FnsToInstrument; 841 SmallPtrSet<Function *, 2> FnsWithNativeABI; 842 for (Function &i : M) { 843 if (!i.isIntrinsic() && 844 &i != DFSanUnionFn.getCallee()->stripPointerCasts() && 845 &i != DFSanCheckedUnionFn.getCallee()->stripPointerCasts() && 846 &i != DFSanUnionLoadFn.getCallee()->stripPointerCasts() && 847 &i != DFSanUnionLoadFast16LabelsFn.getCallee()->stripPointerCasts() && 848 &i != DFSanUnimplementedFn.getCallee()->stripPointerCasts() && 849 &i != DFSanSetLabelFn.getCallee()->stripPointerCasts() && 850 &i != DFSanNonzeroLabelFn.getCallee()->stripPointerCasts() && 851 &i != DFSanVarargWrapperFn.getCallee()->stripPointerCasts() && 852 &i != DFSanLoadCallbackFn.getCallee()->stripPointerCasts() && 853 &i != DFSanStoreCallbackFn.getCallee()->stripPointerCasts() && 854 &i != DFSanMemTransferCallbackFn.getCallee()->stripPointerCasts() && 855 &i != DFSanCmpCallbackFn.getCallee()->stripPointerCasts()) 856 FnsToInstrument.push_back(&i); 857 } 858 859 // Give function aliases prefixes when necessary, and build wrappers where the 860 // instrumentedness is inconsistent. 861 for (Module::alias_iterator i = M.alias_begin(), e = M.alias_end(); i != e;) { 862 GlobalAlias *GA = &*i; 863 ++i; 864 // Don't stop on weak. We assume people aren't playing games with the 865 // instrumentedness of overridden weak aliases. 866 if (auto F = dyn_cast<Function>(GA->getBaseObject())) { 867 bool GAInst = isInstrumented(GA), FInst = isInstrumented(F); 868 if (GAInst && FInst) { 869 addGlobalNamePrefix(GA); 870 } else if (GAInst != FInst) { 871 // Non-instrumented alias of an instrumented function, or vice versa. 872 // Replace the alias with a native-ABI wrapper of the aliasee. The pass 873 // below will take care of instrumenting it. 874 Function *NewF = 875 buildWrapperFunction(F, "", GA->getLinkage(), F->getFunctionType()); 876 GA->replaceAllUsesWith(ConstantExpr::getBitCast(NewF, GA->getType())); 877 NewF->takeName(GA); 878 GA->eraseFromParent(); 879 FnsToInstrument.push_back(NewF); 880 } 881 } 882 } 883 884 ReadOnlyNoneAttrs.addAttribute(Attribute::ReadOnly) 885 .addAttribute(Attribute::ReadNone); 886 887 // First, change the ABI of every function in the module. ABI-listed 888 // functions keep their original ABI and get a wrapper function. 889 for (std::vector<Function *>::iterator i = FnsToInstrument.begin(), 890 e = FnsToInstrument.end(); 891 i != e; ++i) { 892 Function &F = **i; 893 FunctionType *FT = F.getFunctionType(); 894 895 bool IsZeroArgsVoidRet = (FT->getNumParams() == 0 && !FT->isVarArg() && 896 FT->getReturnType()->isVoidTy()); 897 898 if (isInstrumented(&F)) { 899 // Instrumented functions get a 'dfs$' prefix. This allows us to more 900 // easily identify cases of mismatching ABIs. 901 if (getInstrumentedABI() == IA_Args && !IsZeroArgsVoidRet) { 902 FunctionType *NewFT = getArgsFunctionType(FT); 903 Function *NewF = Function::Create(NewFT, F.getLinkage(), 904 F.getAddressSpace(), "", &M); 905 NewF->copyAttributesFrom(&F); 906 NewF->removeAttributes( 907 AttributeList::ReturnIndex, 908 AttributeFuncs::typeIncompatible(NewFT->getReturnType())); 909 for (Function::arg_iterator FArg = F.arg_begin(), 910 NewFArg = NewF->arg_begin(), 911 FArgEnd = F.arg_end(); 912 FArg != FArgEnd; ++FArg, ++NewFArg) { 913 FArg->replaceAllUsesWith(&*NewFArg); 914 } 915 NewF->getBasicBlockList().splice(NewF->begin(), F.getBasicBlockList()); 916 917 for (Function::user_iterator UI = F.user_begin(), UE = F.user_end(); 918 UI != UE;) { 919 BlockAddress *BA = dyn_cast<BlockAddress>(*UI); 920 ++UI; 921 if (BA) { 922 BA->replaceAllUsesWith( 923 BlockAddress::get(NewF, BA->getBasicBlock())); 924 delete BA; 925 } 926 } 927 F.replaceAllUsesWith( 928 ConstantExpr::getBitCast(NewF, PointerType::getUnqual(FT))); 929 NewF->takeName(&F); 930 F.eraseFromParent(); 931 *i = NewF; 932 addGlobalNamePrefix(NewF); 933 } else { 934 addGlobalNamePrefix(&F); 935 } 936 } else if (!IsZeroArgsVoidRet || getWrapperKind(&F) == WK_Custom) { 937 // Build a wrapper function for F. The wrapper simply calls F, and is 938 // added to FnsToInstrument so that any instrumentation according to its 939 // WrapperKind is done in the second pass below. 940 FunctionType *NewFT = getInstrumentedABI() == IA_Args 941 ? getArgsFunctionType(FT) 942 : FT; 943 944 // If the function being wrapped has local linkage, then preserve the 945 // function's linkage in the wrapper function. 946 GlobalValue::LinkageTypes wrapperLinkage = 947 F.hasLocalLinkage() 948 ? F.getLinkage() 949 : GlobalValue::LinkOnceODRLinkage; 950 951 Function *NewF = buildWrapperFunction( 952 &F, std::string("dfsw$") + std::string(F.getName()), 953 wrapperLinkage, NewFT); 954 if (getInstrumentedABI() == IA_TLS) 955 NewF->removeAttributes(AttributeList::FunctionIndex, ReadOnlyNoneAttrs); 956 957 Value *WrappedFnCst = 958 ConstantExpr::getBitCast(NewF, PointerType::getUnqual(FT)); 959 F.replaceAllUsesWith(WrappedFnCst); 960 961 UnwrappedFnMap[WrappedFnCst] = &F; 962 *i = NewF; 963 964 if (!F.isDeclaration()) { 965 // This function is probably defining an interposition of an 966 // uninstrumented function and hence needs to keep the original ABI. 967 // But any functions it may call need to use the instrumented ABI, so 968 // we instrument it in a mode which preserves the original ABI. 969 FnsWithNativeABI.insert(&F); 970 971 // This code needs to rebuild the iterators, as they may be invalidated 972 // by the push_back, taking care that the new range does not include 973 // any functions added by this code. 974 size_t N = i - FnsToInstrument.begin(), 975 Count = e - FnsToInstrument.begin(); 976 FnsToInstrument.push_back(&F); 977 i = FnsToInstrument.begin() + N; 978 e = FnsToInstrument.begin() + Count; 979 } 980 // Hopefully, nobody will try to indirectly call a vararg 981 // function... yet. 982 } else if (FT->isVarArg()) { 983 UnwrappedFnMap[&F] = &F; 984 *i = nullptr; 985 } 986 } 987 988 for (Function *i : FnsToInstrument) { 989 if (!i || i->isDeclaration()) 990 continue; 991 992 removeUnreachableBlocks(*i); 993 994 DFSanFunction DFSF(*this, i, FnsWithNativeABI.count(i)); 995 996 // DFSanVisitor may create new basic blocks, which confuses df_iterator. 997 // Build a copy of the list before iterating over it. 998 SmallVector<BasicBlock *, 4> BBList(depth_first(&i->getEntryBlock())); 999 1000 for (BasicBlock *i : BBList) { 1001 Instruction *Inst = &i->front(); 1002 while (true) { 1003 // DFSanVisitor may split the current basic block, changing the current 1004 // instruction's next pointer and moving the next instruction to the 1005 // tail block from which we should continue. 1006 Instruction *Next = Inst->getNextNode(); 1007 // DFSanVisitor may delete Inst, so keep track of whether it was a 1008 // terminator. 1009 bool IsTerminator = Inst->isTerminator(); 1010 if (!DFSF.SkipInsts.count(Inst)) 1011 DFSanVisitor(DFSF).visit(Inst); 1012 if (IsTerminator) 1013 break; 1014 Inst = Next; 1015 } 1016 } 1017 1018 // We will not necessarily be able to compute the shadow for every phi node 1019 // until we have visited every block. Therefore, the code that handles phi 1020 // nodes adds them to the PHIFixups list so that they can be properly 1021 // handled here. 1022 for (std::vector<std::pair<PHINode *, PHINode *>>::iterator 1023 i = DFSF.PHIFixups.begin(), 1024 e = DFSF.PHIFixups.end(); 1025 i != e; ++i) { 1026 for (unsigned val = 0, n = i->first->getNumIncomingValues(); val != n; 1027 ++val) { 1028 i->second->setIncomingValue( 1029 val, DFSF.getShadow(i->first->getIncomingValue(val))); 1030 } 1031 } 1032 1033 // -dfsan-debug-nonzero-labels will split the CFG in all kinds of crazy 1034 // places (i.e. instructions in basic blocks we haven't even begun visiting 1035 // yet). To make our life easier, do this work in a pass after the main 1036 // instrumentation. 1037 if (ClDebugNonzeroLabels) { 1038 for (Value *V : DFSF.NonZeroChecks) { 1039 Instruction *Pos; 1040 if (Instruction *I = dyn_cast<Instruction>(V)) 1041 Pos = I->getNextNode(); 1042 else 1043 Pos = &DFSF.F->getEntryBlock().front(); 1044 while (isa<PHINode>(Pos) || isa<AllocaInst>(Pos)) 1045 Pos = Pos->getNextNode(); 1046 IRBuilder<> IRB(Pos); 1047 Value *Ne = IRB.CreateICmpNE(V, DFSF.DFS.ZeroShadow); 1048 BranchInst *BI = cast<BranchInst>(SplitBlockAndInsertIfThen( 1049 Ne, Pos, /*Unreachable=*/false, ColdCallWeights)); 1050 IRBuilder<> ThenIRB(BI); 1051 ThenIRB.CreateCall(DFSF.DFS.DFSanNonzeroLabelFn, {}); 1052 } 1053 } 1054 } 1055 1056 return Changed || !FnsToInstrument.empty() || 1057 M.global_size() != InitialGlobalSize || M.size() != InitialModuleSize; 1058 } 1059 1060 Value *DFSanFunction::getArgTLSPtr() { 1061 if (ArgTLSPtr) 1062 return ArgTLSPtr; 1063 if (DFS.ArgTLS) 1064 return ArgTLSPtr = DFS.ArgTLS; 1065 1066 IRBuilder<> IRB(&F->getEntryBlock().front()); 1067 return ArgTLSPtr = IRB.CreateCall(DFS.GetArgTLSTy, DFS.GetArgTLS, {}); 1068 } 1069 1070 Value *DFSanFunction::getRetvalTLS() { 1071 if (RetvalTLSPtr) 1072 return RetvalTLSPtr; 1073 if (DFS.RetvalTLS) 1074 return RetvalTLSPtr = DFS.RetvalTLS; 1075 1076 IRBuilder<> IRB(&F->getEntryBlock().front()); 1077 return RetvalTLSPtr = 1078 IRB.CreateCall(DFS.GetRetvalTLSTy, DFS.GetRetvalTLS, {}); 1079 } 1080 1081 Value *DFSanFunction::getArgTLS(unsigned Idx, Instruction *Pos) { 1082 IRBuilder<> IRB(Pos); 1083 return IRB.CreateConstGEP2_64(ArrayType::get(DFS.ShadowTy, 64), 1084 getArgTLSPtr(), 0, Idx); 1085 } 1086 1087 Value *DFSanFunction::getShadow(Value *V) { 1088 if (!isa<Argument>(V) && !isa<Instruction>(V)) 1089 return DFS.ZeroShadow; 1090 Value *&Shadow = ValShadowMap[V]; 1091 if (!Shadow) { 1092 if (Argument *A = dyn_cast<Argument>(V)) { 1093 if (IsNativeABI) 1094 return DFS.ZeroShadow; 1095 switch (IA) { 1096 case DataFlowSanitizer::IA_TLS: { 1097 Value *ArgTLSPtr = getArgTLSPtr(); 1098 Instruction *ArgTLSPos = 1099 DFS.ArgTLS ? &*F->getEntryBlock().begin() 1100 : cast<Instruction>(ArgTLSPtr)->getNextNode(); 1101 IRBuilder<> IRB(ArgTLSPos); 1102 Shadow = 1103 IRB.CreateLoad(DFS.ShadowTy, getArgTLS(A->getArgNo(), ArgTLSPos)); 1104 break; 1105 } 1106 case DataFlowSanitizer::IA_Args: { 1107 unsigned ArgIdx = A->getArgNo() + F->arg_size() / 2; 1108 Function::arg_iterator i = F->arg_begin(); 1109 while (ArgIdx--) 1110 ++i; 1111 Shadow = &*i; 1112 assert(Shadow->getType() == DFS.ShadowTy); 1113 break; 1114 } 1115 } 1116 NonZeroChecks.push_back(Shadow); 1117 } else { 1118 Shadow = DFS.ZeroShadow; 1119 } 1120 } 1121 return Shadow; 1122 } 1123 1124 void DFSanFunction::setShadow(Instruction *I, Value *Shadow) { 1125 assert(!ValShadowMap.count(I)); 1126 assert(Shadow->getType() == DFS.ShadowTy); 1127 ValShadowMap[I] = Shadow; 1128 } 1129 1130 Value *DataFlowSanitizer::getShadowAddress(Value *Addr, Instruction *Pos) { 1131 assert(Addr != RetvalTLS && "Reinstrumenting?"); 1132 IRBuilder<> IRB(Pos); 1133 Value *ShadowPtrMaskValue; 1134 if (DFSanRuntimeShadowMask) 1135 ShadowPtrMaskValue = IRB.CreateLoad(IntptrTy, ExternalShadowMask); 1136 else 1137 ShadowPtrMaskValue = ShadowPtrMask; 1138 return IRB.CreateIntToPtr( 1139 IRB.CreateMul( 1140 IRB.CreateAnd(IRB.CreatePtrToInt(Addr, IntptrTy), 1141 IRB.CreatePtrToInt(ShadowPtrMaskValue, IntptrTy)), 1142 ShadowPtrMul), 1143 ShadowPtrTy); 1144 } 1145 1146 // Generates IR to compute the union of the two given shadows, inserting it 1147 // before Pos. Returns the computed union Value. 1148 Value *DFSanFunction::combineShadows(Value *V1, Value *V2, Instruction *Pos) { 1149 if (V1 == DFS.ZeroShadow) 1150 return V2; 1151 if (V2 == DFS.ZeroShadow) 1152 return V1; 1153 if (V1 == V2) 1154 return V1; 1155 1156 auto V1Elems = ShadowElements.find(V1); 1157 auto V2Elems = ShadowElements.find(V2); 1158 if (V1Elems != ShadowElements.end() && V2Elems != ShadowElements.end()) { 1159 if (std::includes(V1Elems->second.begin(), V1Elems->second.end(), 1160 V2Elems->second.begin(), V2Elems->second.end())) { 1161 return V1; 1162 } else if (std::includes(V2Elems->second.begin(), V2Elems->second.end(), 1163 V1Elems->second.begin(), V1Elems->second.end())) { 1164 return V2; 1165 } 1166 } else if (V1Elems != ShadowElements.end()) { 1167 if (V1Elems->second.count(V2)) 1168 return V1; 1169 } else if (V2Elems != ShadowElements.end()) { 1170 if (V2Elems->second.count(V1)) 1171 return V2; 1172 } 1173 1174 auto Key = std::make_pair(V1, V2); 1175 if (V1 > V2) 1176 std::swap(Key.first, Key.second); 1177 CachedCombinedShadow &CCS = CachedCombinedShadows[Key]; 1178 if (CCS.Block && DT.dominates(CCS.Block, Pos->getParent())) 1179 return CCS.Shadow; 1180 1181 IRBuilder<> IRB(Pos); 1182 if (ClFast16Labels) { 1183 CCS.Block = Pos->getParent(); 1184 CCS.Shadow = IRB.CreateOr(V1, V2); 1185 } else if (AvoidNewBlocks) { 1186 CallInst *Call = IRB.CreateCall(DFS.DFSanCheckedUnionFn, {V1, V2}); 1187 Call->addAttribute(AttributeList::ReturnIndex, Attribute::ZExt); 1188 Call->addParamAttr(0, Attribute::ZExt); 1189 Call->addParamAttr(1, Attribute::ZExt); 1190 1191 CCS.Block = Pos->getParent(); 1192 CCS.Shadow = Call; 1193 } else { 1194 BasicBlock *Head = Pos->getParent(); 1195 Value *Ne = IRB.CreateICmpNE(V1, V2); 1196 BranchInst *BI = cast<BranchInst>(SplitBlockAndInsertIfThen( 1197 Ne, Pos, /*Unreachable=*/false, DFS.ColdCallWeights, &DT)); 1198 IRBuilder<> ThenIRB(BI); 1199 CallInst *Call = ThenIRB.CreateCall(DFS.DFSanUnionFn, {V1, V2}); 1200 Call->addAttribute(AttributeList::ReturnIndex, Attribute::ZExt); 1201 Call->addParamAttr(0, Attribute::ZExt); 1202 Call->addParamAttr(1, Attribute::ZExt); 1203 1204 BasicBlock *Tail = BI->getSuccessor(0); 1205 PHINode *Phi = PHINode::Create(DFS.ShadowTy, 2, "", &Tail->front()); 1206 Phi->addIncoming(Call, Call->getParent()); 1207 Phi->addIncoming(V1, Head); 1208 1209 CCS.Block = Tail; 1210 CCS.Shadow = Phi; 1211 } 1212 1213 std::set<Value *> UnionElems; 1214 if (V1Elems != ShadowElements.end()) { 1215 UnionElems = V1Elems->second; 1216 } else { 1217 UnionElems.insert(V1); 1218 } 1219 if (V2Elems != ShadowElements.end()) { 1220 UnionElems.insert(V2Elems->second.begin(), V2Elems->second.end()); 1221 } else { 1222 UnionElems.insert(V2); 1223 } 1224 ShadowElements[CCS.Shadow] = std::move(UnionElems); 1225 1226 return CCS.Shadow; 1227 } 1228 1229 // A convenience function which folds the shadows of each of the operands 1230 // of the provided instruction Inst, inserting the IR before Inst. Returns 1231 // the computed union Value. 1232 Value *DFSanFunction::combineOperandShadows(Instruction *Inst) { 1233 if (Inst->getNumOperands() == 0) 1234 return DFS.ZeroShadow; 1235 1236 Value *Shadow = getShadow(Inst->getOperand(0)); 1237 for (unsigned i = 1, n = Inst->getNumOperands(); i != n; ++i) { 1238 Shadow = combineShadows(Shadow, getShadow(Inst->getOperand(i)), Inst); 1239 } 1240 return Shadow; 1241 } 1242 1243 Value *DFSanVisitor::visitOperandShadowInst(Instruction &I) { 1244 Value *CombinedShadow = DFSF.combineOperandShadows(&I); 1245 DFSF.setShadow(&I, CombinedShadow); 1246 return CombinedShadow; 1247 } 1248 1249 // Generates IR to load shadow corresponding to bytes [Addr, Addr+Size), where 1250 // Addr has alignment Align, and take the union of each of those shadows. 1251 Value *DFSanFunction::loadShadow(Value *Addr, uint64_t Size, uint64_t Align, 1252 Instruction *Pos) { 1253 if (AllocaInst *AI = dyn_cast<AllocaInst>(Addr)) { 1254 const auto i = AllocaShadowMap.find(AI); 1255 if (i != AllocaShadowMap.end()) { 1256 IRBuilder<> IRB(Pos); 1257 return IRB.CreateLoad(DFS.ShadowTy, i->second); 1258 } 1259 } 1260 1261 const llvm::Align ShadowAlign(Align * DFS.ShadowWidthBytes); 1262 SmallVector<const Value *, 2> Objs; 1263 getUnderlyingObjects(Addr, Objs); 1264 bool AllConstants = true; 1265 for (const Value *Obj : Objs) { 1266 if (isa<Function>(Obj) || isa<BlockAddress>(Obj)) 1267 continue; 1268 if (isa<GlobalVariable>(Obj) && cast<GlobalVariable>(Obj)->isConstant()) 1269 continue; 1270 1271 AllConstants = false; 1272 break; 1273 } 1274 if (AllConstants) 1275 return DFS.ZeroShadow; 1276 1277 Value *ShadowAddr = DFS.getShadowAddress(Addr, Pos); 1278 switch (Size) { 1279 case 0: 1280 return DFS.ZeroShadow; 1281 case 1: { 1282 LoadInst *LI = new LoadInst(DFS.ShadowTy, ShadowAddr, "", Pos); 1283 LI->setAlignment(ShadowAlign); 1284 return LI; 1285 } 1286 case 2: { 1287 IRBuilder<> IRB(Pos); 1288 Value *ShadowAddr1 = IRB.CreateGEP(DFS.ShadowTy, ShadowAddr, 1289 ConstantInt::get(DFS.IntptrTy, 1)); 1290 return combineShadows( 1291 IRB.CreateAlignedLoad(DFS.ShadowTy, ShadowAddr, ShadowAlign), 1292 IRB.CreateAlignedLoad(DFS.ShadowTy, ShadowAddr1, ShadowAlign), Pos); 1293 } 1294 } 1295 1296 if (ClFast16Labels && Size % (64 / DFS.ShadowWidthBits) == 0) { 1297 // First OR all the WideShadows, then OR individual shadows within the 1298 // combined WideShadow. This is fewer instructions than ORing shadows 1299 // individually. 1300 IRBuilder<> IRB(Pos); 1301 Value *WideAddr = 1302 IRB.CreateBitCast(ShadowAddr, Type::getInt64PtrTy(*DFS.Ctx)); 1303 Value *CombinedWideShadow = 1304 IRB.CreateAlignedLoad(IRB.getInt64Ty(), WideAddr, ShadowAlign); 1305 for (uint64_t Ofs = 64 / DFS.ShadowWidthBits; Ofs != Size; 1306 Ofs += 64 / DFS.ShadowWidthBits) { 1307 WideAddr = IRB.CreateGEP(Type::getInt64Ty(*DFS.Ctx), WideAddr, 1308 ConstantInt::get(DFS.IntptrTy, 1)); 1309 Value *NextWideShadow = 1310 IRB.CreateAlignedLoad(IRB.getInt64Ty(), WideAddr, ShadowAlign); 1311 CombinedWideShadow = IRB.CreateOr(CombinedWideShadow, NextWideShadow); 1312 } 1313 for (unsigned Width = 32; Width >= DFS.ShadowWidthBits; Width >>= 1) { 1314 Value *ShrShadow = IRB.CreateLShr(CombinedWideShadow, Width); 1315 CombinedWideShadow = IRB.CreateOr(CombinedWideShadow, ShrShadow); 1316 } 1317 return IRB.CreateTrunc(CombinedWideShadow, DFS.ShadowTy); 1318 } 1319 if (!AvoidNewBlocks && Size % (64 / DFS.ShadowWidthBits) == 0) { 1320 // Fast path for the common case where each byte has identical shadow: load 1321 // shadow 64 bits at a time, fall out to a __dfsan_union_load call if any 1322 // shadow is non-equal. 1323 BasicBlock *FallbackBB = BasicBlock::Create(*DFS.Ctx, "", F); 1324 IRBuilder<> FallbackIRB(FallbackBB); 1325 CallInst *FallbackCall = FallbackIRB.CreateCall( 1326 DFS.DFSanUnionLoadFn, 1327 {ShadowAddr, ConstantInt::get(DFS.IntptrTy, Size)}); 1328 FallbackCall->addAttribute(AttributeList::ReturnIndex, Attribute::ZExt); 1329 1330 // Compare each of the shadows stored in the loaded 64 bits to each other, 1331 // by computing (WideShadow rotl ShadowWidthBits) == WideShadow. 1332 IRBuilder<> IRB(Pos); 1333 Value *WideAddr = 1334 IRB.CreateBitCast(ShadowAddr, Type::getInt64PtrTy(*DFS.Ctx)); 1335 Value *WideShadow = 1336 IRB.CreateAlignedLoad(IRB.getInt64Ty(), WideAddr, ShadowAlign); 1337 Value *TruncShadow = IRB.CreateTrunc(WideShadow, DFS.ShadowTy); 1338 Value *ShlShadow = IRB.CreateShl(WideShadow, DFS.ShadowWidthBits); 1339 Value *ShrShadow = IRB.CreateLShr(WideShadow, 64 - DFS.ShadowWidthBits); 1340 Value *RotShadow = IRB.CreateOr(ShlShadow, ShrShadow); 1341 Value *ShadowsEq = IRB.CreateICmpEQ(WideShadow, RotShadow); 1342 1343 BasicBlock *Head = Pos->getParent(); 1344 BasicBlock *Tail = Head->splitBasicBlock(Pos->getIterator()); 1345 1346 if (DomTreeNode *OldNode = DT.getNode(Head)) { 1347 std::vector<DomTreeNode *> Children(OldNode->begin(), OldNode->end()); 1348 1349 DomTreeNode *NewNode = DT.addNewBlock(Tail, Head); 1350 for (auto Child : Children) 1351 DT.changeImmediateDominator(Child, NewNode); 1352 } 1353 1354 // In the following code LastBr will refer to the previous basic block's 1355 // conditional branch instruction, whose true successor is fixed up to point 1356 // to the next block during the loop below or to the tail after the final 1357 // iteration. 1358 BranchInst *LastBr = BranchInst::Create(FallbackBB, FallbackBB, ShadowsEq); 1359 ReplaceInstWithInst(Head->getTerminator(), LastBr); 1360 DT.addNewBlock(FallbackBB, Head); 1361 1362 for (uint64_t Ofs = 64 / DFS.ShadowWidthBits; Ofs != Size; 1363 Ofs += 64 / DFS.ShadowWidthBits) { 1364 BasicBlock *NextBB = BasicBlock::Create(*DFS.Ctx, "", F); 1365 DT.addNewBlock(NextBB, LastBr->getParent()); 1366 IRBuilder<> NextIRB(NextBB); 1367 WideAddr = NextIRB.CreateGEP(Type::getInt64Ty(*DFS.Ctx), WideAddr, 1368 ConstantInt::get(DFS.IntptrTy, 1)); 1369 Value *NextWideShadow = NextIRB.CreateAlignedLoad(NextIRB.getInt64Ty(), 1370 WideAddr, ShadowAlign); 1371 ShadowsEq = NextIRB.CreateICmpEQ(WideShadow, NextWideShadow); 1372 LastBr->setSuccessor(0, NextBB); 1373 LastBr = NextIRB.CreateCondBr(ShadowsEq, FallbackBB, FallbackBB); 1374 } 1375 1376 LastBr->setSuccessor(0, Tail); 1377 FallbackIRB.CreateBr(Tail); 1378 PHINode *Shadow = PHINode::Create(DFS.ShadowTy, 2, "", &Tail->front()); 1379 Shadow->addIncoming(FallbackCall, FallbackBB); 1380 Shadow->addIncoming(TruncShadow, LastBr->getParent()); 1381 return Shadow; 1382 } 1383 1384 IRBuilder<> IRB(Pos); 1385 FunctionCallee &UnionLoadFn = 1386 ClFast16Labels ? DFS.DFSanUnionLoadFast16LabelsFn : DFS.DFSanUnionLoadFn; 1387 CallInst *FallbackCall = IRB.CreateCall( 1388 UnionLoadFn, {ShadowAddr, ConstantInt::get(DFS.IntptrTy, Size)}); 1389 FallbackCall->addAttribute(AttributeList::ReturnIndex, Attribute::ZExt); 1390 return FallbackCall; 1391 } 1392 1393 void DFSanVisitor::visitLoadInst(LoadInst &LI) { 1394 auto &DL = LI.getModule()->getDataLayout(); 1395 uint64_t Size = DL.getTypeStoreSize(LI.getType()); 1396 if (Size == 0) { 1397 DFSF.setShadow(&LI, DFSF.DFS.ZeroShadow); 1398 return; 1399 } 1400 1401 Align Alignment = ClPreserveAlignment ? LI.getAlign() : Align(1); 1402 Value *Shadow = 1403 DFSF.loadShadow(LI.getPointerOperand(), Size, Alignment.value(), &LI); 1404 if (ClCombinePointerLabelsOnLoad) { 1405 Value *PtrShadow = DFSF.getShadow(LI.getPointerOperand()); 1406 Shadow = DFSF.combineShadows(Shadow, PtrShadow, &LI); 1407 } 1408 if (Shadow != DFSF.DFS.ZeroShadow) 1409 DFSF.NonZeroChecks.push_back(Shadow); 1410 1411 DFSF.setShadow(&LI, Shadow); 1412 if (ClEventCallbacks) { 1413 IRBuilder<> IRB(&LI); 1414 Value *Addr8 = IRB.CreateBitCast(LI.getPointerOperand(), DFSF.DFS.Int8Ptr); 1415 IRB.CreateCall(DFSF.DFS.DFSanLoadCallbackFn, {Shadow, Addr8}); 1416 } 1417 } 1418 1419 void DFSanFunction::storeShadow(Value *Addr, uint64_t Size, Align Alignment, 1420 Value *Shadow, Instruction *Pos) { 1421 if (AllocaInst *AI = dyn_cast<AllocaInst>(Addr)) { 1422 const auto i = AllocaShadowMap.find(AI); 1423 if (i != AllocaShadowMap.end()) { 1424 IRBuilder<> IRB(Pos); 1425 IRB.CreateStore(Shadow, i->second); 1426 return; 1427 } 1428 } 1429 1430 const Align ShadowAlign(Alignment.value() * DFS.ShadowWidthBytes); 1431 IRBuilder<> IRB(Pos); 1432 Value *ShadowAddr = DFS.getShadowAddress(Addr, Pos); 1433 if (Shadow == DFS.ZeroShadow) { 1434 IntegerType *ShadowTy = 1435 IntegerType::get(*DFS.Ctx, Size * DFS.ShadowWidthBits); 1436 Value *ExtZeroShadow = ConstantInt::get(ShadowTy, 0); 1437 Value *ExtShadowAddr = 1438 IRB.CreateBitCast(ShadowAddr, PointerType::getUnqual(ShadowTy)); 1439 IRB.CreateAlignedStore(ExtZeroShadow, ExtShadowAddr, ShadowAlign); 1440 return; 1441 } 1442 1443 const unsigned ShadowVecSize = 128 / DFS.ShadowWidthBits; 1444 uint64_t Offset = 0; 1445 if (Size >= ShadowVecSize) { 1446 auto *ShadowVecTy = FixedVectorType::get(DFS.ShadowTy, ShadowVecSize); 1447 Value *ShadowVec = UndefValue::get(ShadowVecTy); 1448 for (unsigned i = 0; i != ShadowVecSize; ++i) { 1449 ShadowVec = IRB.CreateInsertElement( 1450 ShadowVec, Shadow, ConstantInt::get(Type::getInt32Ty(*DFS.Ctx), i)); 1451 } 1452 Value *ShadowVecAddr = 1453 IRB.CreateBitCast(ShadowAddr, PointerType::getUnqual(ShadowVecTy)); 1454 do { 1455 Value *CurShadowVecAddr = 1456 IRB.CreateConstGEP1_32(ShadowVecTy, ShadowVecAddr, Offset); 1457 IRB.CreateAlignedStore(ShadowVec, CurShadowVecAddr, ShadowAlign); 1458 Size -= ShadowVecSize; 1459 ++Offset; 1460 } while (Size >= ShadowVecSize); 1461 Offset *= ShadowVecSize; 1462 } 1463 while (Size > 0) { 1464 Value *CurShadowAddr = 1465 IRB.CreateConstGEP1_32(DFS.ShadowTy, ShadowAddr, Offset); 1466 IRB.CreateAlignedStore(Shadow, CurShadowAddr, ShadowAlign); 1467 --Size; 1468 ++Offset; 1469 } 1470 } 1471 1472 void DFSanVisitor::visitStoreInst(StoreInst &SI) { 1473 auto &DL = SI.getModule()->getDataLayout(); 1474 uint64_t Size = DL.getTypeStoreSize(SI.getValueOperand()->getType()); 1475 if (Size == 0) 1476 return; 1477 1478 const Align Alignment = ClPreserveAlignment ? SI.getAlign() : Align(1); 1479 1480 Value* Shadow = DFSF.getShadow(SI.getValueOperand()); 1481 if (ClCombinePointerLabelsOnStore) { 1482 Value *PtrShadow = DFSF.getShadow(SI.getPointerOperand()); 1483 Shadow = DFSF.combineShadows(Shadow, PtrShadow, &SI); 1484 } 1485 DFSF.storeShadow(SI.getPointerOperand(), Size, Alignment, Shadow, &SI); 1486 if (ClEventCallbacks) { 1487 IRBuilder<> IRB(&SI); 1488 Value *Addr8 = IRB.CreateBitCast(SI.getPointerOperand(), DFSF.DFS.Int8Ptr); 1489 IRB.CreateCall(DFSF.DFS.DFSanStoreCallbackFn, {Shadow, Addr8}); 1490 } 1491 } 1492 1493 void DFSanVisitor::visitUnaryOperator(UnaryOperator &UO) { 1494 visitOperandShadowInst(UO); 1495 } 1496 1497 void DFSanVisitor::visitBinaryOperator(BinaryOperator &BO) { 1498 visitOperandShadowInst(BO); 1499 } 1500 1501 void DFSanVisitor::visitCastInst(CastInst &CI) { visitOperandShadowInst(CI); } 1502 1503 void DFSanVisitor::visitCmpInst(CmpInst &CI) { 1504 Value *CombinedShadow = visitOperandShadowInst(CI); 1505 if (ClEventCallbacks) { 1506 IRBuilder<> IRB(&CI); 1507 IRB.CreateCall(DFSF.DFS.DFSanCmpCallbackFn, CombinedShadow); 1508 } 1509 } 1510 1511 void DFSanVisitor::visitGetElementPtrInst(GetElementPtrInst &GEPI) { 1512 visitOperandShadowInst(GEPI); 1513 } 1514 1515 void DFSanVisitor::visitExtractElementInst(ExtractElementInst &I) { 1516 visitOperandShadowInst(I); 1517 } 1518 1519 void DFSanVisitor::visitInsertElementInst(InsertElementInst &I) { 1520 visitOperandShadowInst(I); 1521 } 1522 1523 void DFSanVisitor::visitShuffleVectorInst(ShuffleVectorInst &I) { 1524 visitOperandShadowInst(I); 1525 } 1526 1527 void DFSanVisitor::visitExtractValueInst(ExtractValueInst &I) { 1528 visitOperandShadowInst(I); 1529 } 1530 1531 void DFSanVisitor::visitInsertValueInst(InsertValueInst &I) { 1532 visitOperandShadowInst(I); 1533 } 1534 1535 void DFSanVisitor::visitAllocaInst(AllocaInst &I) { 1536 bool AllLoadsStores = true; 1537 for (User *U : I.users()) { 1538 if (isa<LoadInst>(U)) 1539 continue; 1540 1541 if (StoreInst *SI = dyn_cast<StoreInst>(U)) { 1542 if (SI->getPointerOperand() == &I) 1543 continue; 1544 } 1545 1546 AllLoadsStores = false; 1547 break; 1548 } 1549 if (AllLoadsStores) { 1550 IRBuilder<> IRB(&I); 1551 DFSF.AllocaShadowMap[&I] = IRB.CreateAlloca(DFSF.DFS.ShadowTy); 1552 } 1553 DFSF.setShadow(&I, DFSF.DFS.ZeroShadow); 1554 } 1555 1556 void DFSanVisitor::visitSelectInst(SelectInst &I) { 1557 Value *CondShadow = DFSF.getShadow(I.getCondition()); 1558 Value *TrueShadow = DFSF.getShadow(I.getTrueValue()); 1559 Value *FalseShadow = DFSF.getShadow(I.getFalseValue()); 1560 Value *ShadowSel = nullptr; 1561 1562 if (isa<VectorType>(I.getCondition()->getType())) { 1563 ShadowSel = DFSF.combineShadows(TrueShadow, FalseShadow, &I); 1564 } else { 1565 if (TrueShadow == FalseShadow) { 1566 ShadowSel = TrueShadow; 1567 } else { 1568 ShadowSel = 1569 SelectInst::Create(I.getCondition(), TrueShadow, FalseShadow, "", &I); 1570 } 1571 } 1572 DFSF.setShadow(&I, ClTrackSelectControlFlow 1573 ? DFSF.combineShadows(CondShadow, ShadowSel, &I) 1574 : ShadowSel); 1575 } 1576 1577 void DFSanVisitor::visitMemSetInst(MemSetInst &I) { 1578 IRBuilder<> IRB(&I); 1579 Value *ValShadow = DFSF.getShadow(I.getValue()); 1580 IRB.CreateCall(DFSF.DFS.DFSanSetLabelFn, 1581 {ValShadow, IRB.CreateBitCast(I.getDest(), Type::getInt8PtrTy( 1582 *DFSF.DFS.Ctx)), 1583 IRB.CreateZExtOrTrunc(I.getLength(), DFSF.DFS.IntptrTy)}); 1584 } 1585 1586 void DFSanVisitor::visitMemTransferInst(MemTransferInst &I) { 1587 IRBuilder<> IRB(&I); 1588 Value *RawDestShadow = DFSF.DFS.getShadowAddress(I.getDest(), &I); 1589 Value *SrcShadow = DFSF.DFS.getShadowAddress(I.getSource(), &I); 1590 Value *LenShadow = 1591 IRB.CreateMul(I.getLength(), ConstantInt::get(I.getLength()->getType(), 1592 DFSF.DFS.ShadowWidthBytes)); 1593 Type *Int8Ptr = Type::getInt8PtrTy(*DFSF.DFS.Ctx); 1594 Value *DestShadow = IRB.CreateBitCast(RawDestShadow, Int8Ptr); 1595 SrcShadow = IRB.CreateBitCast(SrcShadow, Int8Ptr); 1596 auto *MTI = cast<MemTransferInst>( 1597 IRB.CreateCall(I.getFunctionType(), I.getCalledOperand(), 1598 {DestShadow, SrcShadow, LenShadow, I.getVolatileCst()})); 1599 if (ClPreserveAlignment) { 1600 MTI->setDestAlignment(I.getDestAlign() * DFSF.DFS.ShadowWidthBytes); 1601 MTI->setSourceAlignment(I.getSourceAlign() * DFSF.DFS.ShadowWidthBytes); 1602 } else { 1603 MTI->setDestAlignment(Align(DFSF.DFS.ShadowWidthBytes)); 1604 MTI->setSourceAlignment(Align(DFSF.DFS.ShadowWidthBytes)); 1605 } 1606 if (ClEventCallbacks) { 1607 IRB.CreateCall(DFSF.DFS.DFSanMemTransferCallbackFn, 1608 {RawDestShadow, I.getLength()}); 1609 } 1610 } 1611 1612 void DFSanVisitor::visitReturnInst(ReturnInst &RI) { 1613 if (!DFSF.IsNativeABI && RI.getReturnValue()) { 1614 switch (DFSF.IA) { 1615 case DataFlowSanitizer::IA_TLS: { 1616 Value *S = DFSF.getShadow(RI.getReturnValue()); 1617 IRBuilder<> IRB(&RI); 1618 IRB.CreateStore(S, DFSF.getRetvalTLS()); 1619 break; 1620 } 1621 case DataFlowSanitizer::IA_Args: { 1622 IRBuilder<> IRB(&RI); 1623 Type *RT = DFSF.F->getFunctionType()->getReturnType(); 1624 Value *InsVal = 1625 IRB.CreateInsertValue(UndefValue::get(RT), RI.getReturnValue(), 0); 1626 Value *InsShadow = 1627 IRB.CreateInsertValue(InsVal, DFSF.getShadow(RI.getReturnValue()), 1); 1628 RI.setOperand(0, InsShadow); 1629 break; 1630 } 1631 } 1632 } 1633 } 1634 1635 void DFSanVisitor::visitCallBase(CallBase &CB) { 1636 Function *F = CB.getCalledFunction(); 1637 if ((F && F->isIntrinsic()) || CB.isInlineAsm()) { 1638 visitOperandShadowInst(CB); 1639 return; 1640 } 1641 1642 // Calls to this function are synthesized in wrappers, and we shouldn't 1643 // instrument them. 1644 if (F == DFSF.DFS.DFSanVarargWrapperFn.getCallee()->stripPointerCasts()) 1645 return; 1646 1647 IRBuilder<> IRB(&CB); 1648 1649 DenseMap<Value *, Function *>::iterator i = 1650 DFSF.DFS.UnwrappedFnMap.find(CB.getCalledOperand()); 1651 if (i != DFSF.DFS.UnwrappedFnMap.end()) { 1652 Function *F = i->second; 1653 switch (DFSF.DFS.getWrapperKind(F)) { 1654 case DataFlowSanitizer::WK_Warning: 1655 CB.setCalledFunction(F); 1656 IRB.CreateCall(DFSF.DFS.DFSanUnimplementedFn, 1657 IRB.CreateGlobalStringPtr(F->getName())); 1658 DFSF.setShadow(&CB, DFSF.DFS.ZeroShadow); 1659 return; 1660 case DataFlowSanitizer::WK_Discard: 1661 CB.setCalledFunction(F); 1662 DFSF.setShadow(&CB, DFSF.DFS.ZeroShadow); 1663 return; 1664 case DataFlowSanitizer::WK_Functional: 1665 CB.setCalledFunction(F); 1666 visitOperandShadowInst(CB); 1667 return; 1668 case DataFlowSanitizer::WK_Custom: 1669 // Don't try to handle invokes of custom functions, it's too complicated. 1670 // Instead, invoke the dfsw$ wrapper, which will in turn call the __dfsw_ 1671 // wrapper. 1672 if (CallInst *CI = dyn_cast<CallInst>(&CB)) { 1673 FunctionType *FT = F->getFunctionType(); 1674 TransformedFunction CustomFn = DFSF.DFS.getCustomFunctionType(FT); 1675 std::string CustomFName = "__dfsw_"; 1676 CustomFName += F->getName(); 1677 FunctionCallee CustomF = DFSF.DFS.Mod->getOrInsertFunction( 1678 CustomFName, CustomFn.TransformedType); 1679 if (Function *CustomFn = dyn_cast<Function>(CustomF.getCallee())) { 1680 CustomFn->copyAttributesFrom(F); 1681 1682 // Custom functions returning non-void will write to the return label. 1683 if (!FT->getReturnType()->isVoidTy()) { 1684 CustomFn->removeAttributes(AttributeList::FunctionIndex, 1685 DFSF.DFS.ReadOnlyNoneAttrs); 1686 } 1687 } 1688 1689 std::vector<Value *> Args; 1690 1691 auto i = CB.arg_begin(); 1692 for (unsigned n = FT->getNumParams(); n != 0; ++i, --n) { 1693 Type *T = (*i)->getType(); 1694 FunctionType *ParamFT; 1695 if (isa<PointerType>(T) && 1696 (ParamFT = dyn_cast<FunctionType>( 1697 cast<PointerType>(T)->getElementType()))) { 1698 std::string TName = "dfst"; 1699 TName += utostr(FT->getNumParams() - n); 1700 TName += "$"; 1701 TName += F->getName(); 1702 Constant *T = DFSF.DFS.getOrBuildTrampolineFunction(ParamFT, TName); 1703 Args.push_back(T); 1704 Args.push_back( 1705 IRB.CreateBitCast(*i, Type::getInt8PtrTy(*DFSF.DFS.Ctx))); 1706 } else { 1707 Args.push_back(*i); 1708 } 1709 } 1710 1711 i = CB.arg_begin(); 1712 const unsigned ShadowArgStart = Args.size(); 1713 for (unsigned n = FT->getNumParams(); n != 0; ++i, --n) 1714 Args.push_back(DFSF.getShadow(*i)); 1715 1716 if (FT->isVarArg()) { 1717 auto *LabelVATy = ArrayType::get(DFSF.DFS.ShadowTy, 1718 CB.arg_size() - FT->getNumParams()); 1719 auto *LabelVAAlloca = new AllocaInst( 1720 LabelVATy, getDataLayout().getAllocaAddrSpace(), 1721 "labelva", &DFSF.F->getEntryBlock().front()); 1722 1723 for (unsigned n = 0; i != CB.arg_end(); ++i, ++n) { 1724 auto LabelVAPtr = IRB.CreateStructGEP(LabelVATy, LabelVAAlloca, n); 1725 IRB.CreateStore(DFSF.getShadow(*i), LabelVAPtr); 1726 } 1727 1728 Args.push_back(IRB.CreateStructGEP(LabelVATy, LabelVAAlloca, 0)); 1729 } 1730 1731 if (!FT->getReturnType()->isVoidTy()) { 1732 if (!DFSF.LabelReturnAlloca) { 1733 DFSF.LabelReturnAlloca = 1734 new AllocaInst(DFSF.DFS.ShadowTy, 1735 getDataLayout().getAllocaAddrSpace(), 1736 "labelreturn", &DFSF.F->getEntryBlock().front()); 1737 } 1738 Args.push_back(DFSF.LabelReturnAlloca); 1739 } 1740 1741 for (i = CB.arg_begin() + FT->getNumParams(); i != CB.arg_end(); ++i) 1742 Args.push_back(*i); 1743 1744 CallInst *CustomCI = IRB.CreateCall(CustomF, Args); 1745 CustomCI->setCallingConv(CI->getCallingConv()); 1746 CustomCI->setAttributes(TransformFunctionAttributes(CustomFn, 1747 CI->getContext(), CI->getAttributes())); 1748 1749 // Update the parameter attributes of the custom call instruction to 1750 // zero extend the shadow parameters. This is required for targets 1751 // which consider ShadowTy an illegal type. 1752 for (unsigned n = 0; n < FT->getNumParams(); n++) { 1753 const unsigned ArgNo = ShadowArgStart + n; 1754 if (CustomCI->getArgOperand(ArgNo)->getType() == DFSF.DFS.ShadowTy) 1755 CustomCI->addParamAttr(ArgNo, Attribute::ZExt); 1756 } 1757 1758 if (!FT->getReturnType()->isVoidTy()) { 1759 LoadInst *LabelLoad = 1760 IRB.CreateLoad(DFSF.DFS.ShadowTy, DFSF.LabelReturnAlloca); 1761 DFSF.setShadow(CustomCI, LabelLoad); 1762 } 1763 1764 CI->replaceAllUsesWith(CustomCI); 1765 CI->eraseFromParent(); 1766 return; 1767 } 1768 break; 1769 } 1770 } 1771 1772 FunctionType *FT = CB.getFunctionType(); 1773 if (DFSF.DFS.getInstrumentedABI() == DataFlowSanitizer::IA_TLS) { 1774 for (unsigned i = 0, n = FT->getNumParams(); i != n; ++i) { 1775 IRB.CreateStore(DFSF.getShadow(CB.getArgOperand(i)), 1776 DFSF.getArgTLS(i, &CB)); 1777 } 1778 } 1779 1780 Instruction *Next = nullptr; 1781 if (!CB.getType()->isVoidTy()) { 1782 if (InvokeInst *II = dyn_cast<InvokeInst>(&CB)) { 1783 if (II->getNormalDest()->getSinglePredecessor()) { 1784 Next = &II->getNormalDest()->front(); 1785 } else { 1786 BasicBlock *NewBB = 1787 SplitEdge(II->getParent(), II->getNormalDest(), &DFSF.DT); 1788 Next = &NewBB->front(); 1789 } 1790 } else { 1791 assert(CB.getIterator() != CB.getParent()->end()); 1792 Next = CB.getNextNode(); 1793 } 1794 1795 if (DFSF.DFS.getInstrumentedABI() == DataFlowSanitizer::IA_TLS) { 1796 IRBuilder<> NextIRB(Next); 1797 LoadInst *LI = NextIRB.CreateLoad(DFSF.DFS.ShadowTy, DFSF.getRetvalTLS()); 1798 DFSF.SkipInsts.insert(LI); 1799 DFSF.setShadow(&CB, LI); 1800 DFSF.NonZeroChecks.push_back(LI); 1801 } 1802 } 1803 1804 // Do all instrumentation for IA_Args down here to defer tampering with the 1805 // CFG in a way that SplitEdge may be able to detect. 1806 if (DFSF.DFS.getInstrumentedABI() == DataFlowSanitizer::IA_Args) { 1807 FunctionType *NewFT = DFSF.DFS.getArgsFunctionType(FT); 1808 Value *Func = 1809 IRB.CreateBitCast(CB.getCalledOperand(), PointerType::getUnqual(NewFT)); 1810 std::vector<Value *> Args; 1811 1812 auto i = CB.arg_begin(), E = CB.arg_end(); 1813 for (unsigned n = FT->getNumParams(); n != 0; ++i, --n) 1814 Args.push_back(*i); 1815 1816 i = CB.arg_begin(); 1817 for (unsigned n = FT->getNumParams(); n != 0; ++i, --n) 1818 Args.push_back(DFSF.getShadow(*i)); 1819 1820 if (FT->isVarArg()) { 1821 unsigned VarArgSize = CB.arg_size() - FT->getNumParams(); 1822 ArrayType *VarArgArrayTy = ArrayType::get(DFSF.DFS.ShadowTy, VarArgSize); 1823 AllocaInst *VarArgShadow = 1824 new AllocaInst(VarArgArrayTy, getDataLayout().getAllocaAddrSpace(), 1825 "", &DFSF.F->getEntryBlock().front()); 1826 Args.push_back(IRB.CreateConstGEP2_32(VarArgArrayTy, VarArgShadow, 0, 0)); 1827 for (unsigned n = 0; i != E; ++i, ++n) { 1828 IRB.CreateStore( 1829 DFSF.getShadow(*i), 1830 IRB.CreateConstGEP2_32(VarArgArrayTy, VarArgShadow, 0, n)); 1831 Args.push_back(*i); 1832 } 1833 } 1834 1835 CallBase *NewCB; 1836 if (InvokeInst *II = dyn_cast<InvokeInst>(&CB)) { 1837 NewCB = IRB.CreateInvoke(NewFT, Func, II->getNormalDest(), 1838 II->getUnwindDest(), Args); 1839 } else { 1840 NewCB = IRB.CreateCall(NewFT, Func, Args); 1841 } 1842 NewCB->setCallingConv(CB.getCallingConv()); 1843 NewCB->setAttributes(CB.getAttributes().removeAttributes( 1844 *DFSF.DFS.Ctx, AttributeList::ReturnIndex, 1845 AttributeFuncs::typeIncompatible(NewCB->getType()))); 1846 1847 if (Next) { 1848 ExtractValueInst *ExVal = ExtractValueInst::Create(NewCB, 0, "", Next); 1849 DFSF.SkipInsts.insert(ExVal); 1850 ExtractValueInst *ExShadow = ExtractValueInst::Create(NewCB, 1, "", Next); 1851 DFSF.SkipInsts.insert(ExShadow); 1852 DFSF.setShadow(ExVal, ExShadow); 1853 DFSF.NonZeroChecks.push_back(ExShadow); 1854 1855 CB.replaceAllUsesWith(ExVal); 1856 } 1857 1858 CB.eraseFromParent(); 1859 } 1860 } 1861 1862 void DFSanVisitor::visitPHINode(PHINode &PN) { 1863 PHINode *ShadowPN = 1864 PHINode::Create(DFSF.DFS.ShadowTy, PN.getNumIncomingValues(), "", &PN); 1865 1866 // Give the shadow phi node valid predecessors to fool SplitEdge into working. 1867 Value *UndefShadow = UndefValue::get(DFSF.DFS.ShadowTy); 1868 for (PHINode::block_iterator i = PN.block_begin(), e = PN.block_end(); i != e; 1869 ++i) { 1870 ShadowPN->addIncoming(UndefShadow, *i); 1871 } 1872 1873 DFSF.PHIFixups.push_back(std::make_pair(&PN, ShadowPN)); 1874 DFSF.setShadow(&PN, ShadowPN); 1875 } 1876 1877 namespace { 1878 class DataFlowSanitizerLegacyPass : public ModulePass { 1879 private: 1880 std::vector<std::string> ABIListFiles; 1881 1882 public: 1883 static char ID; 1884 1885 DataFlowSanitizerLegacyPass( 1886 const std::vector<std::string> &ABIListFiles = std::vector<std::string>()) 1887 : ModulePass(ID), ABIListFiles(ABIListFiles) {} 1888 1889 bool runOnModule(Module &M) override { 1890 return DataFlowSanitizer(ABIListFiles).runImpl(M); 1891 } 1892 }; 1893 } // namespace 1894 1895 char DataFlowSanitizerLegacyPass::ID; 1896 1897 INITIALIZE_PASS(DataFlowSanitizerLegacyPass, "dfsan", 1898 "DataFlowSanitizer: dynamic data flow analysis.", false, false) 1899 1900 ModulePass *llvm::createDataFlowSanitizerLegacyPassPass( 1901 const std::vector<std::string> &ABIListFiles) { 1902 return new DataFlowSanitizerLegacyPass(ABIListFiles); 1903 } 1904 1905 PreservedAnalyses DataFlowSanitizerPass::run(Module &M, 1906 ModuleAnalysisManager &AM) { 1907 if (DataFlowSanitizer(ABIListFiles).runImpl(M)) { 1908 return PreservedAnalyses::none(); 1909 } 1910 return PreservedAnalyses::all(); 1911 } 1912