1 //===- DataFlowSanitizer.cpp - dynamic data flow analysis -----------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 /// \file 11 /// This file is a part of DataFlowSanitizer, a generalised dynamic data flow 12 /// analysis. 13 /// 14 /// Unlike other Sanitizer tools, this tool is not designed to detect a specific 15 /// class of bugs on its own. Instead, it provides a generic dynamic data flow 16 /// analysis framework to be used by clients to help detect application-specific 17 /// issues within their own code. 18 /// 19 /// The analysis is based on automatic propagation of data flow labels (also 20 /// known as taint labels) through a program as it performs computation. Each 21 /// byte of application memory is backed by two bytes of shadow memory which 22 /// hold the label. On Linux/x86_64, memory is laid out as follows: 23 /// 24 /// +--------------------+ 0x800000000000 (top of memory) 25 /// | application memory | 26 /// +--------------------+ 0x700000008000 (kAppAddr) 27 /// | | 28 /// | unused | 29 /// | | 30 /// +--------------------+ 0x200200000000 (kUnusedAddr) 31 /// | union table | 32 /// +--------------------+ 0x200000000000 (kUnionTableAddr) 33 /// | shadow memory | 34 /// +--------------------+ 0x000000010000 (kShadowAddr) 35 /// | reserved by kernel | 36 /// +--------------------+ 0x000000000000 37 /// 38 /// To derive a shadow memory address from an application memory address, 39 /// bits 44-46 are cleared to bring the address into the range 40 /// [0x000000008000,0x100000000000). Then the address is shifted left by 1 to 41 /// account for the double byte representation of shadow labels and move the 42 /// address into the shadow memory range. See the function 43 /// DataFlowSanitizer::getShadowAddress below. 44 /// 45 /// For more information, please refer to the design document: 46 /// http://clang.llvm.org/docs/DataFlowSanitizerDesign.html 47 // 48 //===----------------------------------------------------------------------===// 49 50 #include "llvm/ADT/DenseMap.h" 51 #include "llvm/ADT/DenseSet.h" 52 #include "llvm/ADT/DepthFirstIterator.h" 53 #include "llvm/ADT/None.h" 54 #include "llvm/ADT/SmallPtrSet.h" 55 #include "llvm/ADT/SmallVector.h" 56 #include "llvm/ADT/StringExtras.h" 57 #include "llvm/ADT/StringRef.h" 58 #include "llvm/ADT/Triple.h" 59 #include "llvm/Analysis/ValueTracking.h" 60 #include "llvm/IR/Argument.h" 61 #include "llvm/IR/Attributes.h" 62 #include "llvm/IR/BasicBlock.h" 63 #include "llvm/IR/CallSite.h" 64 #include "llvm/IR/Constant.h" 65 #include "llvm/IR/Constants.h" 66 #include "llvm/IR/DataLayout.h" 67 #include "llvm/IR/DerivedTypes.h" 68 #include "llvm/IR/Dominators.h" 69 #include "llvm/IR/Function.h" 70 #include "llvm/IR/GlobalAlias.h" 71 #include "llvm/IR/GlobalValue.h" 72 #include "llvm/IR/GlobalVariable.h" 73 #include "llvm/IR/IRBuilder.h" 74 #include "llvm/IR/InlineAsm.h" 75 #include "llvm/IR/InstVisitor.h" 76 #include "llvm/IR/InstrTypes.h" 77 #include "llvm/IR/Instruction.h" 78 #include "llvm/IR/Instructions.h" 79 #include "llvm/IR/IntrinsicInst.h" 80 #include "llvm/IR/LLVMContext.h" 81 #include "llvm/IR/MDBuilder.h" 82 #include "llvm/IR/Module.h" 83 #include "llvm/IR/Type.h" 84 #include "llvm/IR/User.h" 85 #include "llvm/IR/Value.h" 86 #include "llvm/Pass.h" 87 #include "llvm/Support/Casting.h" 88 #include "llvm/Support/CommandLine.h" 89 #include "llvm/Support/ErrorHandling.h" 90 #include "llvm/Support/SpecialCaseList.h" 91 #include "llvm/Transforms/Instrumentation.h" 92 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 93 #include "llvm/Transforms/Utils/Local.h" 94 #include <algorithm> 95 #include <cassert> 96 #include <cstddef> 97 #include <cstdint> 98 #include <iterator> 99 #include <memory> 100 #include <set> 101 #include <string> 102 #include <utility> 103 #include <vector> 104 105 using namespace llvm; 106 107 // External symbol to be used when generating the shadow address for 108 // architectures with multiple VMAs. Instead of using a constant integer 109 // the runtime will set the external mask based on the VMA range. 110 static const char *const kDFSanExternShadowPtrMask = "__dfsan_shadow_ptr_mask"; 111 112 // The -dfsan-preserve-alignment flag controls whether this pass assumes that 113 // alignment requirements provided by the input IR are correct. For example, 114 // if the input IR contains a load with alignment 8, this flag will cause 115 // the shadow load to have alignment 16. This flag is disabled by default as 116 // we have unfortunately encountered too much code (including Clang itself; 117 // see PR14291) which performs misaligned access. 118 static cl::opt<bool> ClPreserveAlignment( 119 "dfsan-preserve-alignment", 120 cl::desc("respect alignment requirements provided by input IR"), cl::Hidden, 121 cl::init(false)); 122 123 // The ABI list files control how shadow parameters are passed. The pass treats 124 // every function labelled "uninstrumented" in the ABI list file as conforming 125 // to the "native" (i.e. unsanitized) ABI. Unless the ABI list contains 126 // additional annotations for those functions, a call to one of those functions 127 // will produce a warning message, as the labelling behaviour of the function is 128 // unknown. The other supported annotations are "functional" and "discard", 129 // which are described below under DataFlowSanitizer::WrapperKind. 130 static cl::list<std::string> ClABIListFiles( 131 "dfsan-abilist", 132 cl::desc("File listing native ABI functions and how the pass treats them"), 133 cl::Hidden); 134 135 // Controls whether the pass uses IA_Args or IA_TLS as the ABI for instrumented 136 // functions (see DataFlowSanitizer::InstrumentedABI below). 137 static cl::opt<bool> ClArgsABI( 138 "dfsan-args-abi", 139 cl::desc("Use the argument ABI rather than the TLS ABI"), 140 cl::Hidden); 141 142 // Controls whether the pass includes or ignores the labels of pointers in load 143 // instructions. 144 static cl::opt<bool> ClCombinePointerLabelsOnLoad( 145 "dfsan-combine-pointer-labels-on-load", 146 cl::desc("Combine the label of the pointer with the label of the data when " 147 "loading from memory."), 148 cl::Hidden, cl::init(true)); 149 150 // Controls whether the pass includes or ignores the labels of pointers in 151 // stores instructions. 152 static cl::opt<bool> ClCombinePointerLabelsOnStore( 153 "dfsan-combine-pointer-labels-on-store", 154 cl::desc("Combine the label of the pointer with the label of the data when " 155 "storing in memory."), 156 cl::Hidden, cl::init(false)); 157 158 static cl::opt<bool> ClDebugNonzeroLabels( 159 "dfsan-debug-nonzero-labels", 160 cl::desc("Insert calls to __dfsan_nonzero_label on observing a parameter, " 161 "load or return with a nonzero label"), 162 cl::Hidden); 163 164 static StringRef GetGlobalTypeString(const GlobalValue &G) { 165 // Types of GlobalVariables are always pointer types. 166 Type *GType = G.getValueType(); 167 // For now we support blacklisting struct types only. 168 if (StructType *SGType = dyn_cast<StructType>(GType)) { 169 if (!SGType->isLiteral()) 170 return SGType->getName(); 171 } 172 return "<unknown type>"; 173 } 174 175 namespace { 176 177 class DFSanABIList { 178 std::unique_ptr<SpecialCaseList> SCL; 179 180 public: 181 DFSanABIList() = default; 182 183 void set(std::unique_ptr<SpecialCaseList> List) { SCL = std::move(List); } 184 185 /// Returns whether either this function or its source file are listed in the 186 /// given category. 187 bool isIn(const Function &F, StringRef Category) const { 188 return isIn(*F.getParent(), Category) || 189 SCL->inSection("dataflow", "fun", F.getName(), Category); 190 } 191 192 /// Returns whether this global alias is listed in the given category. 193 /// 194 /// If GA aliases a function, the alias's name is matched as a function name 195 /// would be. Similarly, aliases of globals are matched like globals. 196 bool isIn(const GlobalAlias &GA, StringRef Category) const { 197 if (isIn(*GA.getParent(), Category)) 198 return true; 199 200 if (isa<FunctionType>(GA.getValueType())) 201 return SCL->inSection("dataflow", "fun", GA.getName(), Category); 202 203 return SCL->inSection("dataflow", "global", GA.getName(), Category) || 204 SCL->inSection("dataflow", "type", GetGlobalTypeString(GA), 205 Category); 206 } 207 208 /// Returns whether this module is listed in the given category. 209 bool isIn(const Module &M, StringRef Category) const { 210 return SCL->inSection("dataflow", "src", M.getModuleIdentifier(), Category); 211 } 212 }; 213 214 class DataFlowSanitizer : public ModulePass { 215 friend struct DFSanFunction; 216 friend class DFSanVisitor; 217 218 enum { 219 ShadowWidth = 16 220 }; 221 222 /// Which ABI should be used for instrumented functions? 223 enum InstrumentedABI { 224 /// Argument and return value labels are passed through additional 225 /// arguments and by modifying the return type. 226 IA_Args, 227 228 /// Argument and return value labels are passed through TLS variables 229 /// __dfsan_arg_tls and __dfsan_retval_tls. 230 IA_TLS 231 }; 232 233 /// How should calls to uninstrumented functions be handled? 234 enum WrapperKind { 235 /// This function is present in an uninstrumented form but we don't know 236 /// how it should be handled. Print a warning and call the function anyway. 237 /// Don't label the return value. 238 WK_Warning, 239 240 /// This function does not write to (user-accessible) memory, and its return 241 /// value is unlabelled. 242 WK_Discard, 243 244 /// This function does not write to (user-accessible) memory, and the label 245 /// of its return value is the union of the label of its arguments. 246 WK_Functional, 247 248 /// Instead of calling the function, a custom wrapper __dfsw_F is called, 249 /// where F is the name of the function. This function may wrap the 250 /// original function or provide its own implementation. This is similar to 251 /// the IA_Args ABI, except that IA_Args uses a struct return type to 252 /// pass the return value shadow in a register, while WK_Custom uses an 253 /// extra pointer argument to return the shadow. This allows the wrapped 254 /// form of the function type to be expressed in C. 255 WK_Custom 256 }; 257 258 Module *Mod; 259 LLVMContext *Ctx; 260 IntegerType *ShadowTy; 261 PointerType *ShadowPtrTy; 262 IntegerType *IntptrTy; 263 ConstantInt *ZeroShadow; 264 ConstantInt *ShadowPtrMask; 265 ConstantInt *ShadowPtrMul; 266 Constant *ArgTLS; 267 Constant *RetvalTLS; 268 void *(*GetArgTLSPtr)(); 269 void *(*GetRetvalTLSPtr)(); 270 Constant *GetArgTLS; 271 Constant *GetRetvalTLS; 272 Constant *ExternalShadowMask; 273 FunctionType *DFSanUnionFnTy; 274 FunctionType *DFSanUnionLoadFnTy; 275 FunctionType *DFSanUnimplementedFnTy; 276 FunctionType *DFSanSetLabelFnTy; 277 FunctionType *DFSanNonzeroLabelFnTy; 278 FunctionType *DFSanVarargWrapperFnTy; 279 Constant *DFSanUnionFn; 280 Constant *DFSanCheckedUnionFn; 281 Constant *DFSanUnionLoadFn; 282 Constant *DFSanUnimplementedFn; 283 Constant *DFSanSetLabelFn; 284 Constant *DFSanNonzeroLabelFn; 285 Constant *DFSanVarargWrapperFn; 286 MDNode *ColdCallWeights; 287 DFSanABIList ABIList; 288 DenseMap<Value *, Function *> UnwrappedFnMap; 289 AttrBuilder ReadOnlyNoneAttrs; 290 bool DFSanRuntimeShadowMask = false; 291 292 Value *getShadowAddress(Value *Addr, Instruction *Pos); 293 bool isInstrumented(const Function *F); 294 bool isInstrumented(const GlobalAlias *GA); 295 FunctionType *getArgsFunctionType(FunctionType *T); 296 FunctionType *getTrampolineFunctionType(FunctionType *T); 297 FunctionType *getCustomFunctionType(FunctionType *T); 298 InstrumentedABI getInstrumentedABI(); 299 WrapperKind getWrapperKind(Function *F); 300 void addGlobalNamePrefix(GlobalValue *GV); 301 Function *buildWrapperFunction(Function *F, StringRef NewFName, 302 GlobalValue::LinkageTypes NewFLink, 303 FunctionType *NewFT); 304 Constant *getOrBuildTrampolineFunction(FunctionType *FT, StringRef FName); 305 306 public: 307 static char ID; 308 309 DataFlowSanitizer( 310 const std::vector<std::string> &ABIListFiles = std::vector<std::string>(), 311 void *(*getArgTLS)() = nullptr, void *(*getRetValTLS)() = nullptr); 312 313 bool doInitialization(Module &M) override; 314 bool runOnModule(Module &M) override; 315 }; 316 317 struct DFSanFunction { 318 DataFlowSanitizer &DFS; 319 Function *F; 320 DominatorTree DT; 321 DataFlowSanitizer::InstrumentedABI IA; 322 bool IsNativeABI; 323 Value *ArgTLSPtr = nullptr; 324 Value *RetvalTLSPtr = nullptr; 325 AllocaInst *LabelReturnAlloca = nullptr; 326 DenseMap<Value *, Value *> ValShadowMap; 327 DenseMap<AllocaInst *, AllocaInst *> AllocaShadowMap; 328 std::vector<std::pair<PHINode *, PHINode *>> PHIFixups; 329 DenseSet<Instruction *> SkipInsts; 330 std::vector<Value *> NonZeroChecks; 331 bool AvoidNewBlocks; 332 333 struct CachedCombinedShadow { 334 BasicBlock *Block; 335 Value *Shadow; 336 }; 337 DenseMap<std::pair<Value *, Value *>, CachedCombinedShadow> 338 CachedCombinedShadows; 339 DenseMap<Value *, std::set<Value *>> ShadowElements; 340 341 DFSanFunction(DataFlowSanitizer &DFS, Function *F, bool IsNativeABI) 342 : DFS(DFS), F(F), IA(DFS.getInstrumentedABI()), IsNativeABI(IsNativeABI) { 343 DT.recalculate(*F); 344 // FIXME: Need to track down the register allocator issue which causes poor 345 // performance in pathological cases with large numbers of basic blocks. 346 AvoidNewBlocks = F->size() > 1000; 347 } 348 349 Value *getArgTLSPtr(); 350 Value *getArgTLS(unsigned Index, Instruction *Pos); 351 Value *getRetvalTLS(); 352 Value *getShadow(Value *V); 353 void setShadow(Instruction *I, Value *Shadow); 354 Value *combineShadows(Value *V1, Value *V2, Instruction *Pos); 355 Value *combineOperandShadows(Instruction *Inst); 356 Value *loadShadow(Value *ShadowAddr, uint64_t Size, uint64_t Align, 357 Instruction *Pos); 358 void storeShadow(Value *Addr, uint64_t Size, uint64_t Align, Value *Shadow, 359 Instruction *Pos); 360 }; 361 362 class DFSanVisitor : public InstVisitor<DFSanVisitor> { 363 public: 364 DFSanFunction &DFSF; 365 366 DFSanVisitor(DFSanFunction &DFSF) : DFSF(DFSF) {} 367 368 const DataLayout &getDataLayout() const { 369 return DFSF.F->getParent()->getDataLayout(); 370 } 371 372 void visitOperandShadowInst(Instruction &I); 373 void visitBinaryOperator(BinaryOperator &BO); 374 void visitCastInst(CastInst &CI); 375 void visitCmpInst(CmpInst &CI); 376 void visitGetElementPtrInst(GetElementPtrInst &GEPI); 377 void visitLoadInst(LoadInst &LI); 378 void visitStoreInst(StoreInst &SI); 379 void visitReturnInst(ReturnInst &RI); 380 void visitCallSite(CallSite CS); 381 void visitPHINode(PHINode &PN); 382 void visitExtractElementInst(ExtractElementInst &I); 383 void visitInsertElementInst(InsertElementInst &I); 384 void visitShuffleVectorInst(ShuffleVectorInst &I); 385 void visitExtractValueInst(ExtractValueInst &I); 386 void visitInsertValueInst(InsertValueInst &I); 387 void visitAllocaInst(AllocaInst &I); 388 void visitSelectInst(SelectInst &I); 389 void visitMemSetInst(MemSetInst &I); 390 void visitMemTransferInst(MemTransferInst &I); 391 }; 392 393 } // end anonymous namespace 394 395 char DataFlowSanitizer::ID; 396 397 INITIALIZE_PASS(DataFlowSanitizer, "dfsan", 398 "DataFlowSanitizer: dynamic data flow analysis.", false, false) 399 400 ModulePass * 401 llvm::createDataFlowSanitizerPass(const std::vector<std::string> &ABIListFiles, 402 void *(*getArgTLS)(), 403 void *(*getRetValTLS)()) { 404 return new DataFlowSanitizer(ABIListFiles, getArgTLS, getRetValTLS); 405 } 406 407 DataFlowSanitizer::DataFlowSanitizer( 408 const std::vector<std::string> &ABIListFiles, void *(*getArgTLS)(), 409 void *(*getRetValTLS)()) 410 : ModulePass(ID), GetArgTLSPtr(getArgTLS), GetRetvalTLSPtr(getRetValTLS) { 411 std::vector<std::string> AllABIListFiles(std::move(ABIListFiles)); 412 AllABIListFiles.insert(AllABIListFiles.end(), ClABIListFiles.begin(), 413 ClABIListFiles.end()); 414 ABIList.set(SpecialCaseList::createOrDie(AllABIListFiles)); 415 } 416 417 FunctionType *DataFlowSanitizer::getArgsFunctionType(FunctionType *T) { 418 SmallVector<Type *, 4> ArgTypes(T->param_begin(), T->param_end()); 419 ArgTypes.append(T->getNumParams(), ShadowTy); 420 if (T->isVarArg()) 421 ArgTypes.push_back(ShadowPtrTy); 422 Type *RetType = T->getReturnType(); 423 if (!RetType->isVoidTy()) 424 RetType = StructType::get(RetType, ShadowTy); 425 return FunctionType::get(RetType, ArgTypes, T->isVarArg()); 426 } 427 428 FunctionType *DataFlowSanitizer::getTrampolineFunctionType(FunctionType *T) { 429 assert(!T->isVarArg()); 430 SmallVector<Type *, 4> ArgTypes; 431 ArgTypes.push_back(T->getPointerTo()); 432 ArgTypes.append(T->param_begin(), T->param_end()); 433 ArgTypes.append(T->getNumParams(), ShadowTy); 434 Type *RetType = T->getReturnType(); 435 if (!RetType->isVoidTy()) 436 ArgTypes.push_back(ShadowPtrTy); 437 return FunctionType::get(T->getReturnType(), ArgTypes, false); 438 } 439 440 FunctionType *DataFlowSanitizer::getCustomFunctionType(FunctionType *T) { 441 SmallVector<Type *, 4> ArgTypes; 442 for (FunctionType::param_iterator i = T->param_begin(), e = T->param_end(); 443 i != e; ++i) { 444 FunctionType *FT; 445 if (isa<PointerType>(*i) && (FT = dyn_cast<FunctionType>(cast<PointerType>( 446 *i)->getElementType()))) { 447 ArgTypes.push_back(getTrampolineFunctionType(FT)->getPointerTo()); 448 ArgTypes.push_back(Type::getInt8PtrTy(*Ctx)); 449 } else { 450 ArgTypes.push_back(*i); 451 } 452 } 453 for (unsigned i = 0, e = T->getNumParams(); i != e; ++i) 454 ArgTypes.push_back(ShadowTy); 455 if (T->isVarArg()) 456 ArgTypes.push_back(ShadowPtrTy); 457 Type *RetType = T->getReturnType(); 458 if (!RetType->isVoidTy()) 459 ArgTypes.push_back(ShadowPtrTy); 460 return FunctionType::get(T->getReturnType(), ArgTypes, T->isVarArg()); 461 } 462 463 bool DataFlowSanitizer::doInitialization(Module &M) { 464 Triple TargetTriple(M.getTargetTriple()); 465 bool IsX86_64 = TargetTriple.getArch() == Triple::x86_64; 466 bool IsMIPS64 = TargetTriple.getArch() == Triple::mips64 || 467 TargetTriple.getArch() == Triple::mips64el; 468 bool IsAArch64 = TargetTriple.getArch() == Triple::aarch64 || 469 TargetTriple.getArch() == Triple::aarch64_be; 470 471 const DataLayout &DL = M.getDataLayout(); 472 473 Mod = &M; 474 Ctx = &M.getContext(); 475 ShadowTy = IntegerType::get(*Ctx, ShadowWidth); 476 ShadowPtrTy = PointerType::getUnqual(ShadowTy); 477 IntptrTy = DL.getIntPtrType(*Ctx); 478 ZeroShadow = ConstantInt::getSigned(ShadowTy, 0); 479 ShadowPtrMul = ConstantInt::getSigned(IntptrTy, ShadowWidth / 8); 480 if (IsX86_64) 481 ShadowPtrMask = ConstantInt::getSigned(IntptrTy, ~0x700000000000LL); 482 else if (IsMIPS64) 483 ShadowPtrMask = ConstantInt::getSigned(IntptrTy, ~0xF000000000LL); 484 // AArch64 supports multiple VMAs and the shadow mask is set at runtime. 485 else if (IsAArch64) 486 DFSanRuntimeShadowMask = true; 487 else 488 report_fatal_error("unsupported triple"); 489 490 Type *DFSanUnionArgs[2] = { ShadowTy, ShadowTy }; 491 DFSanUnionFnTy = 492 FunctionType::get(ShadowTy, DFSanUnionArgs, /*isVarArg=*/ false); 493 Type *DFSanUnionLoadArgs[2] = { ShadowPtrTy, IntptrTy }; 494 DFSanUnionLoadFnTy = 495 FunctionType::get(ShadowTy, DFSanUnionLoadArgs, /*isVarArg=*/ false); 496 DFSanUnimplementedFnTy = FunctionType::get( 497 Type::getVoidTy(*Ctx), Type::getInt8PtrTy(*Ctx), /*isVarArg=*/false); 498 Type *DFSanSetLabelArgs[3] = { ShadowTy, Type::getInt8PtrTy(*Ctx), IntptrTy }; 499 DFSanSetLabelFnTy = FunctionType::get(Type::getVoidTy(*Ctx), 500 DFSanSetLabelArgs, /*isVarArg=*/false); 501 DFSanNonzeroLabelFnTy = FunctionType::get( 502 Type::getVoidTy(*Ctx), None, /*isVarArg=*/false); 503 DFSanVarargWrapperFnTy = FunctionType::get( 504 Type::getVoidTy(*Ctx), Type::getInt8PtrTy(*Ctx), /*isVarArg=*/false); 505 506 if (GetArgTLSPtr) { 507 Type *ArgTLSTy = ArrayType::get(ShadowTy, 64); 508 ArgTLS = nullptr; 509 GetArgTLS = ConstantExpr::getIntToPtr( 510 ConstantInt::get(IntptrTy, uintptr_t(GetArgTLSPtr)), 511 PointerType::getUnqual( 512 FunctionType::get(PointerType::getUnqual(ArgTLSTy), false))); 513 } 514 if (GetRetvalTLSPtr) { 515 RetvalTLS = nullptr; 516 GetRetvalTLS = ConstantExpr::getIntToPtr( 517 ConstantInt::get(IntptrTy, uintptr_t(GetRetvalTLSPtr)), 518 PointerType::getUnqual( 519 FunctionType::get(PointerType::getUnqual(ShadowTy), false))); 520 } 521 522 ColdCallWeights = MDBuilder(*Ctx).createBranchWeights(1, 1000); 523 return true; 524 } 525 526 bool DataFlowSanitizer::isInstrumented(const Function *F) { 527 return !ABIList.isIn(*F, "uninstrumented"); 528 } 529 530 bool DataFlowSanitizer::isInstrumented(const GlobalAlias *GA) { 531 return !ABIList.isIn(*GA, "uninstrumented"); 532 } 533 534 DataFlowSanitizer::InstrumentedABI DataFlowSanitizer::getInstrumentedABI() { 535 return ClArgsABI ? IA_Args : IA_TLS; 536 } 537 538 DataFlowSanitizer::WrapperKind DataFlowSanitizer::getWrapperKind(Function *F) { 539 if (ABIList.isIn(*F, "functional")) 540 return WK_Functional; 541 if (ABIList.isIn(*F, "discard")) 542 return WK_Discard; 543 if (ABIList.isIn(*F, "custom")) 544 return WK_Custom; 545 546 return WK_Warning; 547 } 548 549 void DataFlowSanitizer::addGlobalNamePrefix(GlobalValue *GV) { 550 std::string GVName = GV->getName(), Prefix = "dfs$"; 551 GV->setName(Prefix + GVName); 552 553 // Try to change the name of the function in module inline asm. We only do 554 // this for specific asm directives, currently only ".symver", to try to avoid 555 // corrupting asm which happens to contain the symbol name as a substring. 556 // Note that the substitution for .symver assumes that the versioned symbol 557 // also has an instrumented name. 558 std::string Asm = GV->getParent()->getModuleInlineAsm(); 559 std::string SearchStr = ".symver " + GVName + ","; 560 size_t Pos = Asm.find(SearchStr); 561 if (Pos != std::string::npos) { 562 Asm.replace(Pos, SearchStr.size(), 563 ".symver " + Prefix + GVName + "," + Prefix); 564 GV->getParent()->setModuleInlineAsm(Asm); 565 } 566 } 567 568 Function * 569 DataFlowSanitizer::buildWrapperFunction(Function *F, StringRef NewFName, 570 GlobalValue::LinkageTypes NewFLink, 571 FunctionType *NewFT) { 572 FunctionType *FT = F->getFunctionType(); 573 Function *NewF = Function::Create(NewFT, NewFLink, NewFName, 574 F->getParent()); 575 NewF->copyAttributesFrom(F); 576 NewF->removeAttributes( 577 AttributeList::ReturnIndex, 578 AttributeFuncs::typeIncompatible(NewFT->getReturnType())); 579 580 BasicBlock *BB = BasicBlock::Create(*Ctx, "entry", NewF); 581 if (F->isVarArg()) { 582 NewF->removeAttributes(AttributeList::FunctionIndex, 583 AttrBuilder().addAttribute("split-stack")); 584 CallInst::Create(DFSanVarargWrapperFn, 585 IRBuilder<>(BB).CreateGlobalStringPtr(F->getName()), "", 586 BB); 587 new UnreachableInst(*Ctx, BB); 588 } else { 589 std::vector<Value *> Args; 590 unsigned n = FT->getNumParams(); 591 for (Function::arg_iterator ai = NewF->arg_begin(); n != 0; ++ai, --n) 592 Args.push_back(&*ai); 593 CallInst *CI = CallInst::Create(F, Args, "", BB); 594 if (FT->getReturnType()->isVoidTy()) 595 ReturnInst::Create(*Ctx, BB); 596 else 597 ReturnInst::Create(*Ctx, CI, BB); 598 } 599 600 return NewF; 601 } 602 603 Constant *DataFlowSanitizer::getOrBuildTrampolineFunction(FunctionType *FT, 604 StringRef FName) { 605 FunctionType *FTT = getTrampolineFunctionType(FT); 606 Constant *C = Mod->getOrInsertFunction(FName, FTT); 607 Function *F = dyn_cast<Function>(C); 608 if (F && F->isDeclaration()) { 609 F->setLinkage(GlobalValue::LinkOnceODRLinkage); 610 BasicBlock *BB = BasicBlock::Create(*Ctx, "entry", F); 611 std::vector<Value *> Args; 612 Function::arg_iterator AI = F->arg_begin(); ++AI; 613 for (unsigned N = FT->getNumParams(); N != 0; ++AI, --N) 614 Args.push_back(&*AI); 615 CallInst *CI = CallInst::Create(&*F->arg_begin(), Args, "", BB); 616 ReturnInst *RI; 617 if (FT->getReturnType()->isVoidTy()) 618 RI = ReturnInst::Create(*Ctx, BB); 619 else 620 RI = ReturnInst::Create(*Ctx, CI, BB); 621 622 DFSanFunction DFSF(*this, F, /*IsNativeABI=*/true); 623 Function::arg_iterator ValAI = F->arg_begin(), ShadowAI = AI; ++ValAI; 624 for (unsigned N = FT->getNumParams(); N != 0; ++ValAI, ++ShadowAI, --N) 625 DFSF.ValShadowMap[&*ValAI] = &*ShadowAI; 626 DFSanVisitor(DFSF).visitCallInst(*CI); 627 if (!FT->getReturnType()->isVoidTy()) 628 new StoreInst(DFSF.getShadow(RI->getReturnValue()), 629 &*std::prev(F->arg_end()), RI); 630 } 631 632 return C; 633 } 634 635 bool DataFlowSanitizer::runOnModule(Module &M) { 636 if (ABIList.isIn(M, "skip")) 637 return false; 638 639 if (!GetArgTLSPtr) { 640 Type *ArgTLSTy = ArrayType::get(ShadowTy, 64); 641 ArgTLS = Mod->getOrInsertGlobal("__dfsan_arg_tls", ArgTLSTy); 642 if (GlobalVariable *G = dyn_cast<GlobalVariable>(ArgTLS)) 643 G->setThreadLocalMode(GlobalVariable::InitialExecTLSModel); 644 } 645 if (!GetRetvalTLSPtr) { 646 RetvalTLS = Mod->getOrInsertGlobal("__dfsan_retval_tls", ShadowTy); 647 if (GlobalVariable *G = dyn_cast<GlobalVariable>(RetvalTLS)) 648 G->setThreadLocalMode(GlobalVariable::InitialExecTLSModel); 649 } 650 651 ExternalShadowMask = 652 Mod->getOrInsertGlobal(kDFSanExternShadowPtrMask, IntptrTy); 653 654 DFSanUnionFn = Mod->getOrInsertFunction("__dfsan_union", DFSanUnionFnTy); 655 if (Function *F = dyn_cast<Function>(DFSanUnionFn)) { 656 F->addAttribute(AttributeList::FunctionIndex, Attribute::NoUnwind); 657 F->addAttribute(AttributeList::FunctionIndex, Attribute::ReadNone); 658 F->addAttribute(AttributeList::ReturnIndex, Attribute::ZExt); 659 F->addParamAttr(0, Attribute::ZExt); 660 F->addParamAttr(1, Attribute::ZExt); 661 } 662 DFSanCheckedUnionFn = Mod->getOrInsertFunction("dfsan_union", DFSanUnionFnTy); 663 if (Function *F = dyn_cast<Function>(DFSanCheckedUnionFn)) { 664 F->addAttribute(AttributeList::FunctionIndex, Attribute::NoUnwind); 665 F->addAttribute(AttributeList::FunctionIndex, Attribute::ReadNone); 666 F->addAttribute(AttributeList::ReturnIndex, Attribute::ZExt); 667 F->addParamAttr(0, Attribute::ZExt); 668 F->addParamAttr(1, Attribute::ZExt); 669 } 670 DFSanUnionLoadFn = 671 Mod->getOrInsertFunction("__dfsan_union_load", DFSanUnionLoadFnTy); 672 if (Function *F = dyn_cast<Function>(DFSanUnionLoadFn)) { 673 F->addAttribute(AttributeList::FunctionIndex, Attribute::NoUnwind); 674 F->addAttribute(AttributeList::FunctionIndex, Attribute::ReadOnly); 675 F->addAttribute(AttributeList::ReturnIndex, Attribute::ZExt); 676 } 677 DFSanUnimplementedFn = 678 Mod->getOrInsertFunction("__dfsan_unimplemented", DFSanUnimplementedFnTy); 679 DFSanSetLabelFn = 680 Mod->getOrInsertFunction("__dfsan_set_label", DFSanSetLabelFnTy); 681 if (Function *F = dyn_cast<Function>(DFSanSetLabelFn)) { 682 F->addParamAttr(0, Attribute::ZExt); 683 } 684 DFSanNonzeroLabelFn = 685 Mod->getOrInsertFunction("__dfsan_nonzero_label", DFSanNonzeroLabelFnTy); 686 DFSanVarargWrapperFn = Mod->getOrInsertFunction("__dfsan_vararg_wrapper", 687 DFSanVarargWrapperFnTy); 688 689 std::vector<Function *> FnsToInstrument; 690 SmallPtrSet<Function *, 2> FnsWithNativeABI; 691 for (Function &i : M) { 692 if (!i.isIntrinsic() && 693 &i != DFSanUnionFn && 694 &i != DFSanCheckedUnionFn && 695 &i != DFSanUnionLoadFn && 696 &i != DFSanUnimplementedFn && 697 &i != DFSanSetLabelFn && 698 &i != DFSanNonzeroLabelFn && 699 &i != DFSanVarargWrapperFn) 700 FnsToInstrument.push_back(&i); 701 } 702 703 // Give function aliases prefixes when necessary, and build wrappers where the 704 // instrumentedness is inconsistent. 705 for (Module::alias_iterator i = M.alias_begin(), e = M.alias_end(); i != e;) { 706 GlobalAlias *GA = &*i; 707 ++i; 708 // Don't stop on weak. We assume people aren't playing games with the 709 // instrumentedness of overridden weak aliases. 710 if (auto F = dyn_cast<Function>(GA->getBaseObject())) { 711 bool GAInst = isInstrumented(GA), FInst = isInstrumented(F); 712 if (GAInst && FInst) { 713 addGlobalNamePrefix(GA); 714 } else if (GAInst != FInst) { 715 // Non-instrumented alias of an instrumented function, or vice versa. 716 // Replace the alias with a native-ABI wrapper of the aliasee. The pass 717 // below will take care of instrumenting it. 718 Function *NewF = 719 buildWrapperFunction(F, "", GA->getLinkage(), F->getFunctionType()); 720 GA->replaceAllUsesWith(ConstantExpr::getBitCast(NewF, GA->getType())); 721 NewF->takeName(GA); 722 GA->eraseFromParent(); 723 FnsToInstrument.push_back(NewF); 724 } 725 } 726 } 727 728 ReadOnlyNoneAttrs.addAttribute(Attribute::ReadOnly) 729 .addAttribute(Attribute::ReadNone); 730 731 // First, change the ABI of every function in the module. ABI-listed 732 // functions keep their original ABI and get a wrapper function. 733 for (std::vector<Function *>::iterator i = FnsToInstrument.begin(), 734 e = FnsToInstrument.end(); 735 i != e; ++i) { 736 Function &F = **i; 737 FunctionType *FT = F.getFunctionType(); 738 739 bool IsZeroArgsVoidRet = (FT->getNumParams() == 0 && !FT->isVarArg() && 740 FT->getReturnType()->isVoidTy()); 741 742 if (isInstrumented(&F)) { 743 // Instrumented functions get a 'dfs$' prefix. This allows us to more 744 // easily identify cases of mismatching ABIs. 745 if (getInstrumentedABI() == IA_Args && !IsZeroArgsVoidRet) { 746 FunctionType *NewFT = getArgsFunctionType(FT); 747 Function *NewF = Function::Create(NewFT, F.getLinkage(), "", &M); 748 NewF->copyAttributesFrom(&F); 749 NewF->removeAttributes( 750 AttributeList::ReturnIndex, 751 AttributeFuncs::typeIncompatible(NewFT->getReturnType())); 752 for (Function::arg_iterator FArg = F.arg_begin(), 753 NewFArg = NewF->arg_begin(), 754 FArgEnd = F.arg_end(); 755 FArg != FArgEnd; ++FArg, ++NewFArg) { 756 FArg->replaceAllUsesWith(&*NewFArg); 757 } 758 NewF->getBasicBlockList().splice(NewF->begin(), F.getBasicBlockList()); 759 760 for (Function::user_iterator UI = F.user_begin(), UE = F.user_end(); 761 UI != UE;) { 762 BlockAddress *BA = dyn_cast<BlockAddress>(*UI); 763 ++UI; 764 if (BA) { 765 BA->replaceAllUsesWith( 766 BlockAddress::get(NewF, BA->getBasicBlock())); 767 delete BA; 768 } 769 } 770 F.replaceAllUsesWith( 771 ConstantExpr::getBitCast(NewF, PointerType::getUnqual(FT))); 772 NewF->takeName(&F); 773 F.eraseFromParent(); 774 *i = NewF; 775 addGlobalNamePrefix(NewF); 776 } else { 777 addGlobalNamePrefix(&F); 778 } 779 } else if (!IsZeroArgsVoidRet || getWrapperKind(&F) == WK_Custom) { 780 // Build a wrapper function for F. The wrapper simply calls F, and is 781 // added to FnsToInstrument so that any instrumentation according to its 782 // WrapperKind is done in the second pass below. 783 FunctionType *NewFT = getInstrumentedABI() == IA_Args 784 ? getArgsFunctionType(FT) 785 : FT; 786 Function *NewF = buildWrapperFunction( 787 &F, std::string("dfsw$") + std::string(F.getName()), 788 GlobalValue::LinkOnceODRLinkage, NewFT); 789 if (getInstrumentedABI() == IA_TLS) 790 NewF->removeAttributes(AttributeList::FunctionIndex, ReadOnlyNoneAttrs); 791 792 Value *WrappedFnCst = 793 ConstantExpr::getBitCast(NewF, PointerType::getUnqual(FT)); 794 F.replaceAllUsesWith(WrappedFnCst); 795 796 UnwrappedFnMap[WrappedFnCst] = &F; 797 *i = NewF; 798 799 if (!F.isDeclaration()) { 800 // This function is probably defining an interposition of an 801 // uninstrumented function and hence needs to keep the original ABI. 802 // But any functions it may call need to use the instrumented ABI, so 803 // we instrument it in a mode which preserves the original ABI. 804 FnsWithNativeABI.insert(&F); 805 806 // This code needs to rebuild the iterators, as they may be invalidated 807 // by the push_back, taking care that the new range does not include 808 // any functions added by this code. 809 size_t N = i - FnsToInstrument.begin(), 810 Count = e - FnsToInstrument.begin(); 811 FnsToInstrument.push_back(&F); 812 i = FnsToInstrument.begin() + N; 813 e = FnsToInstrument.begin() + Count; 814 } 815 // Hopefully, nobody will try to indirectly call a vararg 816 // function... yet. 817 } else if (FT->isVarArg()) { 818 UnwrappedFnMap[&F] = &F; 819 *i = nullptr; 820 } 821 } 822 823 for (Function *i : FnsToInstrument) { 824 if (!i || i->isDeclaration()) 825 continue; 826 827 removeUnreachableBlocks(*i); 828 829 DFSanFunction DFSF(*this, i, FnsWithNativeABI.count(i)); 830 831 // DFSanVisitor may create new basic blocks, which confuses df_iterator. 832 // Build a copy of the list before iterating over it. 833 SmallVector<BasicBlock *, 4> BBList(depth_first(&i->getEntryBlock())); 834 835 for (BasicBlock *i : BBList) { 836 Instruction *Inst = &i->front(); 837 while (true) { 838 // DFSanVisitor may split the current basic block, changing the current 839 // instruction's next pointer and moving the next instruction to the 840 // tail block from which we should continue. 841 Instruction *Next = Inst->getNextNode(); 842 // DFSanVisitor may delete Inst, so keep track of whether it was a 843 // terminator. 844 bool IsTerminator = isa<TerminatorInst>(Inst); 845 if (!DFSF.SkipInsts.count(Inst)) 846 DFSanVisitor(DFSF).visit(Inst); 847 if (IsTerminator) 848 break; 849 Inst = Next; 850 } 851 } 852 853 // We will not necessarily be able to compute the shadow for every phi node 854 // until we have visited every block. Therefore, the code that handles phi 855 // nodes adds them to the PHIFixups list so that they can be properly 856 // handled here. 857 for (std::vector<std::pair<PHINode *, PHINode *>>::iterator 858 i = DFSF.PHIFixups.begin(), 859 e = DFSF.PHIFixups.end(); 860 i != e; ++i) { 861 for (unsigned val = 0, n = i->first->getNumIncomingValues(); val != n; 862 ++val) { 863 i->second->setIncomingValue( 864 val, DFSF.getShadow(i->first->getIncomingValue(val))); 865 } 866 } 867 868 // -dfsan-debug-nonzero-labels will split the CFG in all kinds of crazy 869 // places (i.e. instructions in basic blocks we haven't even begun visiting 870 // yet). To make our life easier, do this work in a pass after the main 871 // instrumentation. 872 if (ClDebugNonzeroLabels) { 873 for (Value *V : DFSF.NonZeroChecks) { 874 Instruction *Pos; 875 if (Instruction *I = dyn_cast<Instruction>(V)) 876 Pos = I->getNextNode(); 877 else 878 Pos = &DFSF.F->getEntryBlock().front(); 879 while (isa<PHINode>(Pos) || isa<AllocaInst>(Pos)) 880 Pos = Pos->getNextNode(); 881 IRBuilder<> IRB(Pos); 882 Value *Ne = IRB.CreateICmpNE(V, DFSF.DFS.ZeroShadow); 883 BranchInst *BI = cast<BranchInst>(SplitBlockAndInsertIfThen( 884 Ne, Pos, /*Unreachable=*/false, ColdCallWeights)); 885 IRBuilder<> ThenIRB(BI); 886 ThenIRB.CreateCall(DFSF.DFS.DFSanNonzeroLabelFn, {}); 887 } 888 } 889 } 890 891 return false; 892 } 893 894 Value *DFSanFunction::getArgTLSPtr() { 895 if (ArgTLSPtr) 896 return ArgTLSPtr; 897 if (DFS.ArgTLS) 898 return ArgTLSPtr = DFS.ArgTLS; 899 900 IRBuilder<> IRB(&F->getEntryBlock().front()); 901 return ArgTLSPtr = IRB.CreateCall(DFS.GetArgTLS, {}); 902 } 903 904 Value *DFSanFunction::getRetvalTLS() { 905 if (RetvalTLSPtr) 906 return RetvalTLSPtr; 907 if (DFS.RetvalTLS) 908 return RetvalTLSPtr = DFS.RetvalTLS; 909 910 IRBuilder<> IRB(&F->getEntryBlock().front()); 911 return RetvalTLSPtr = IRB.CreateCall(DFS.GetRetvalTLS, {}); 912 } 913 914 Value *DFSanFunction::getArgTLS(unsigned Idx, Instruction *Pos) { 915 IRBuilder<> IRB(Pos); 916 return IRB.CreateConstGEP2_64(getArgTLSPtr(), 0, Idx); 917 } 918 919 Value *DFSanFunction::getShadow(Value *V) { 920 if (!isa<Argument>(V) && !isa<Instruction>(V)) 921 return DFS.ZeroShadow; 922 Value *&Shadow = ValShadowMap[V]; 923 if (!Shadow) { 924 if (Argument *A = dyn_cast<Argument>(V)) { 925 if (IsNativeABI) 926 return DFS.ZeroShadow; 927 switch (IA) { 928 case DataFlowSanitizer::IA_TLS: { 929 Value *ArgTLSPtr = getArgTLSPtr(); 930 Instruction *ArgTLSPos = 931 DFS.ArgTLS ? &*F->getEntryBlock().begin() 932 : cast<Instruction>(ArgTLSPtr)->getNextNode(); 933 IRBuilder<> IRB(ArgTLSPos); 934 Shadow = IRB.CreateLoad(getArgTLS(A->getArgNo(), ArgTLSPos)); 935 break; 936 } 937 case DataFlowSanitizer::IA_Args: { 938 unsigned ArgIdx = A->getArgNo() + F->arg_size() / 2; 939 Function::arg_iterator i = F->arg_begin(); 940 while (ArgIdx--) 941 ++i; 942 Shadow = &*i; 943 assert(Shadow->getType() == DFS.ShadowTy); 944 break; 945 } 946 } 947 NonZeroChecks.push_back(Shadow); 948 } else { 949 Shadow = DFS.ZeroShadow; 950 } 951 } 952 return Shadow; 953 } 954 955 void DFSanFunction::setShadow(Instruction *I, Value *Shadow) { 956 assert(!ValShadowMap.count(I)); 957 assert(Shadow->getType() == DFS.ShadowTy); 958 ValShadowMap[I] = Shadow; 959 } 960 961 Value *DataFlowSanitizer::getShadowAddress(Value *Addr, Instruction *Pos) { 962 assert(Addr != RetvalTLS && "Reinstrumenting?"); 963 IRBuilder<> IRB(Pos); 964 Value *ShadowPtrMaskValue; 965 if (DFSanRuntimeShadowMask) 966 ShadowPtrMaskValue = IRB.CreateLoad(IntptrTy, ExternalShadowMask); 967 else 968 ShadowPtrMaskValue = ShadowPtrMask; 969 return IRB.CreateIntToPtr( 970 IRB.CreateMul( 971 IRB.CreateAnd(IRB.CreatePtrToInt(Addr, IntptrTy), 972 IRB.CreatePtrToInt(ShadowPtrMaskValue, IntptrTy)), 973 ShadowPtrMul), 974 ShadowPtrTy); 975 } 976 977 // Generates IR to compute the union of the two given shadows, inserting it 978 // before Pos. Returns the computed union Value. 979 Value *DFSanFunction::combineShadows(Value *V1, Value *V2, Instruction *Pos) { 980 if (V1 == DFS.ZeroShadow) 981 return V2; 982 if (V2 == DFS.ZeroShadow) 983 return V1; 984 if (V1 == V2) 985 return V1; 986 987 auto V1Elems = ShadowElements.find(V1); 988 auto V2Elems = ShadowElements.find(V2); 989 if (V1Elems != ShadowElements.end() && V2Elems != ShadowElements.end()) { 990 if (std::includes(V1Elems->second.begin(), V1Elems->second.end(), 991 V2Elems->second.begin(), V2Elems->second.end())) { 992 return V1; 993 } else if (std::includes(V2Elems->second.begin(), V2Elems->second.end(), 994 V1Elems->second.begin(), V1Elems->second.end())) { 995 return V2; 996 } 997 } else if (V1Elems != ShadowElements.end()) { 998 if (V1Elems->second.count(V2)) 999 return V1; 1000 } else if (V2Elems != ShadowElements.end()) { 1001 if (V2Elems->second.count(V1)) 1002 return V2; 1003 } 1004 1005 auto Key = std::make_pair(V1, V2); 1006 if (V1 > V2) 1007 std::swap(Key.first, Key.second); 1008 CachedCombinedShadow &CCS = CachedCombinedShadows[Key]; 1009 if (CCS.Block && DT.dominates(CCS.Block, Pos->getParent())) 1010 return CCS.Shadow; 1011 1012 IRBuilder<> IRB(Pos); 1013 if (AvoidNewBlocks) { 1014 CallInst *Call = IRB.CreateCall(DFS.DFSanCheckedUnionFn, {V1, V2}); 1015 Call->addAttribute(AttributeList::ReturnIndex, Attribute::ZExt); 1016 Call->addParamAttr(0, Attribute::ZExt); 1017 Call->addParamAttr(1, Attribute::ZExt); 1018 1019 CCS.Block = Pos->getParent(); 1020 CCS.Shadow = Call; 1021 } else { 1022 BasicBlock *Head = Pos->getParent(); 1023 Value *Ne = IRB.CreateICmpNE(V1, V2); 1024 BranchInst *BI = cast<BranchInst>(SplitBlockAndInsertIfThen( 1025 Ne, Pos, /*Unreachable=*/false, DFS.ColdCallWeights, &DT)); 1026 IRBuilder<> ThenIRB(BI); 1027 CallInst *Call = ThenIRB.CreateCall(DFS.DFSanUnionFn, {V1, V2}); 1028 Call->addAttribute(AttributeList::ReturnIndex, Attribute::ZExt); 1029 Call->addParamAttr(0, Attribute::ZExt); 1030 Call->addParamAttr(1, Attribute::ZExt); 1031 1032 BasicBlock *Tail = BI->getSuccessor(0); 1033 PHINode *Phi = PHINode::Create(DFS.ShadowTy, 2, "", &Tail->front()); 1034 Phi->addIncoming(Call, Call->getParent()); 1035 Phi->addIncoming(V1, Head); 1036 1037 CCS.Block = Tail; 1038 CCS.Shadow = Phi; 1039 } 1040 1041 std::set<Value *> UnionElems; 1042 if (V1Elems != ShadowElements.end()) { 1043 UnionElems = V1Elems->second; 1044 } else { 1045 UnionElems.insert(V1); 1046 } 1047 if (V2Elems != ShadowElements.end()) { 1048 UnionElems.insert(V2Elems->second.begin(), V2Elems->second.end()); 1049 } else { 1050 UnionElems.insert(V2); 1051 } 1052 ShadowElements[CCS.Shadow] = std::move(UnionElems); 1053 1054 return CCS.Shadow; 1055 } 1056 1057 // A convenience function which folds the shadows of each of the operands 1058 // of the provided instruction Inst, inserting the IR before Inst. Returns 1059 // the computed union Value. 1060 Value *DFSanFunction::combineOperandShadows(Instruction *Inst) { 1061 if (Inst->getNumOperands() == 0) 1062 return DFS.ZeroShadow; 1063 1064 Value *Shadow = getShadow(Inst->getOperand(0)); 1065 for (unsigned i = 1, n = Inst->getNumOperands(); i != n; ++i) { 1066 Shadow = combineShadows(Shadow, getShadow(Inst->getOperand(i)), Inst); 1067 } 1068 return Shadow; 1069 } 1070 1071 void DFSanVisitor::visitOperandShadowInst(Instruction &I) { 1072 Value *CombinedShadow = DFSF.combineOperandShadows(&I); 1073 DFSF.setShadow(&I, CombinedShadow); 1074 } 1075 1076 // Generates IR to load shadow corresponding to bytes [Addr, Addr+Size), where 1077 // Addr has alignment Align, and take the union of each of those shadows. 1078 Value *DFSanFunction::loadShadow(Value *Addr, uint64_t Size, uint64_t Align, 1079 Instruction *Pos) { 1080 if (AllocaInst *AI = dyn_cast<AllocaInst>(Addr)) { 1081 const auto i = AllocaShadowMap.find(AI); 1082 if (i != AllocaShadowMap.end()) { 1083 IRBuilder<> IRB(Pos); 1084 return IRB.CreateLoad(i->second); 1085 } 1086 } 1087 1088 uint64_t ShadowAlign = Align * DFS.ShadowWidth / 8; 1089 SmallVector<Value *, 2> Objs; 1090 GetUnderlyingObjects(Addr, Objs, Pos->getModule()->getDataLayout()); 1091 bool AllConstants = true; 1092 for (Value *Obj : Objs) { 1093 if (isa<Function>(Obj) || isa<BlockAddress>(Obj)) 1094 continue; 1095 if (isa<GlobalVariable>(Obj) && cast<GlobalVariable>(Obj)->isConstant()) 1096 continue; 1097 1098 AllConstants = false; 1099 break; 1100 } 1101 if (AllConstants) 1102 return DFS.ZeroShadow; 1103 1104 Value *ShadowAddr = DFS.getShadowAddress(Addr, Pos); 1105 switch (Size) { 1106 case 0: 1107 return DFS.ZeroShadow; 1108 case 1: { 1109 LoadInst *LI = new LoadInst(ShadowAddr, "", Pos); 1110 LI->setAlignment(ShadowAlign); 1111 return LI; 1112 } 1113 case 2: { 1114 IRBuilder<> IRB(Pos); 1115 Value *ShadowAddr1 = IRB.CreateGEP(DFS.ShadowTy, ShadowAddr, 1116 ConstantInt::get(DFS.IntptrTy, 1)); 1117 return combineShadows(IRB.CreateAlignedLoad(ShadowAddr, ShadowAlign), 1118 IRB.CreateAlignedLoad(ShadowAddr1, ShadowAlign), Pos); 1119 } 1120 } 1121 if (!AvoidNewBlocks && Size % (64 / DFS.ShadowWidth) == 0) { 1122 // Fast path for the common case where each byte has identical shadow: load 1123 // shadow 64 bits at a time, fall out to a __dfsan_union_load call if any 1124 // shadow is non-equal. 1125 BasicBlock *FallbackBB = BasicBlock::Create(*DFS.Ctx, "", F); 1126 IRBuilder<> FallbackIRB(FallbackBB); 1127 CallInst *FallbackCall = FallbackIRB.CreateCall( 1128 DFS.DFSanUnionLoadFn, 1129 {ShadowAddr, ConstantInt::get(DFS.IntptrTy, Size)}); 1130 FallbackCall->addAttribute(AttributeList::ReturnIndex, Attribute::ZExt); 1131 1132 // Compare each of the shadows stored in the loaded 64 bits to each other, 1133 // by computing (WideShadow rotl ShadowWidth) == WideShadow. 1134 IRBuilder<> IRB(Pos); 1135 Value *WideAddr = 1136 IRB.CreateBitCast(ShadowAddr, Type::getInt64PtrTy(*DFS.Ctx)); 1137 Value *WideShadow = IRB.CreateAlignedLoad(WideAddr, ShadowAlign); 1138 Value *TruncShadow = IRB.CreateTrunc(WideShadow, DFS.ShadowTy); 1139 Value *ShlShadow = IRB.CreateShl(WideShadow, DFS.ShadowWidth); 1140 Value *ShrShadow = IRB.CreateLShr(WideShadow, 64 - DFS.ShadowWidth); 1141 Value *RotShadow = IRB.CreateOr(ShlShadow, ShrShadow); 1142 Value *ShadowsEq = IRB.CreateICmpEQ(WideShadow, RotShadow); 1143 1144 BasicBlock *Head = Pos->getParent(); 1145 BasicBlock *Tail = Head->splitBasicBlock(Pos->getIterator()); 1146 1147 if (DomTreeNode *OldNode = DT.getNode(Head)) { 1148 std::vector<DomTreeNode *> Children(OldNode->begin(), OldNode->end()); 1149 1150 DomTreeNode *NewNode = DT.addNewBlock(Tail, Head); 1151 for (auto Child : Children) 1152 DT.changeImmediateDominator(Child, NewNode); 1153 } 1154 1155 // In the following code LastBr will refer to the previous basic block's 1156 // conditional branch instruction, whose true successor is fixed up to point 1157 // to the next block during the loop below or to the tail after the final 1158 // iteration. 1159 BranchInst *LastBr = BranchInst::Create(FallbackBB, FallbackBB, ShadowsEq); 1160 ReplaceInstWithInst(Head->getTerminator(), LastBr); 1161 DT.addNewBlock(FallbackBB, Head); 1162 1163 for (uint64_t Ofs = 64 / DFS.ShadowWidth; Ofs != Size; 1164 Ofs += 64 / DFS.ShadowWidth) { 1165 BasicBlock *NextBB = BasicBlock::Create(*DFS.Ctx, "", F); 1166 DT.addNewBlock(NextBB, LastBr->getParent()); 1167 IRBuilder<> NextIRB(NextBB); 1168 WideAddr = NextIRB.CreateGEP(Type::getInt64Ty(*DFS.Ctx), WideAddr, 1169 ConstantInt::get(DFS.IntptrTy, 1)); 1170 Value *NextWideShadow = NextIRB.CreateAlignedLoad(WideAddr, ShadowAlign); 1171 ShadowsEq = NextIRB.CreateICmpEQ(WideShadow, NextWideShadow); 1172 LastBr->setSuccessor(0, NextBB); 1173 LastBr = NextIRB.CreateCondBr(ShadowsEq, FallbackBB, FallbackBB); 1174 } 1175 1176 LastBr->setSuccessor(0, Tail); 1177 FallbackIRB.CreateBr(Tail); 1178 PHINode *Shadow = PHINode::Create(DFS.ShadowTy, 2, "", &Tail->front()); 1179 Shadow->addIncoming(FallbackCall, FallbackBB); 1180 Shadow->addIncoming(TruncShadow, LastBr->getParent()); 1181 return Shadow; 1182 } 1183 1184 IRBuilder<> IRB(Pos); 1185 CallInst *FallbackCall = IRB.CreateCall( 1186 DFS.DFSanUnionLoadFn, {ShadowAddr, ConstantInt::get(DFS.IntptrTy, Size)}); 1187 FallbackCall->addAttribute(AttributeList::ReturnIndex, Attribute::ZExt); 1188 return FallbackCall; 1189 } 1190 1191 void DFSanVisitor::visitLoadInst(LoadInst &LI) { 1192 auto &DL = LI.getModule()->getDataLayout(); 1193 uint64_t Size = DL.getTypeStoreSize(LI.getType()); 1194 if (Size == 0) { 1195 DFSF.setShadow(&LI, DFSF.DFS.ZeroShadow); 1196 return; 1197 } 1198 1199 uint64_t Align; 1200 if (ClPreserveAlignment) { 1201 Align = LI.getAlignment(); 1202 if (Align == 0) 1203 Align = DL.getABITypeAlignment(LI.getType()); 1204 } else { 1205 Align = 1; 1206 } 1207 IRBuilder<> IRB(&LI); 1208 Value *Shadow = DFSF.loadShadow(LI.getPointerOperand(), Size, Align, &LI); 1209 if (ClCombinePointerLabelsOnLoad) { 1210 Value *PtrShadow = DFSF.getShadow(LI.getPointerOperand()); 1211 Shadow = DFSF.combineShadows(Shadow, PtrShadow, &LI); 1212 } 1213 if (Shadow != DFSF.DFS.ZeroShadow) 1214 DFSF.NonZeroChecks.push_back(Shadow); 1215 1216 DFSF.setShadow(&LI, Shadow); 1217 } 1218 1219 void DFSanFunction::storeShadow(Value *Addr, uint64_t Size, uint64_t Align, 1220 Value *Shadow, Instruction *Pos) { 1221 if (AllocaInst *AI = dyn_cast<AllocaInst>(Addr)) { 1222 const auto i = AllocaShadowMap.find(AI); 1223 if (i != AllocaShadowMap.end()) { 1224 IRBuilder<> IRB(Pos); 1225 IRB.CreateStore(Shadow, i->second); 1226 return; 1227 } 1228 } 1229 1230 uint64_t ShadowAlign = Align * DFS.ShadowWidth / 8; 1231 IRBuilder<> IRB(Pos); 1232 Value *ShadowAddr = DFS.getShadowAddress(Addr, Pos); 1233 if (Shadow == DFS.ZeroShadow) { 1234 IntegerType *ShadowTy = IntegerType::get(*DFS.Ctx, Size * DFS.ShadowWidth); 1235 Value *ExtZeroShadow = ConstantInt::get(ShadowTy, 0); 1236 Value *ExtShadowAddr = 1237 IRB.CreateBitCast(ShadowAddr, PointerType::getUnqual(ShadowTy)); 1238 IRB.CreateAlignedStore(ExtZeroShadow, ExtShadowAddr, ShadowAlign); 1239 return; 1240 } 1241 1242 const unsigned ShadowVecSize = 128 / DFS.ShadowWidth; 1243 uint64_t Offset = 0; 1244 if (Size >= ShadowVecSize) { 1245 VectorType *ShadowVecTy = VectorType::get(DFS.ShadowTy, ShadowVecSize); 1246 Value *ShadowVec = UndefValue::get(ShadowVecTy); 1247 for (unsigned i = 0; i != ShadowVecSize; ++i) { 1248 ShadowVec = IRB.CreateInsertElement( 1249 ShadowVec, Shadow, ConstantInt::get(Type::getInt32Ty(*DFS.Ctx), i)); 1250 } 1251 Value *ShadowVecAddr = 1252 IRB.CreateBitCast(ShadowAddr, PointerType::getUnqual(ShadowVecTy)); 1253 do { 1254 Value *CurShadowVecAddr = 1255 IRB.CreateConstGEP1_32(ShadowVecTy, ShadowVecAddr, Offset); 1256 IRB.CreateAlignedStore(ShadowVec, CurShadowVecAddr, ShadowAlign); 1257 Size -= ShadowVecSize; 1258 ++Offset; 1259 } while (Size >= ShadowVecSize); 1260 Offset *= ShadowVecSize; 1261 } 1262 while (Size > 0) { 1263 Value *CurShadowAddr = 1264 IRB.CreateConstGEP1_32(DFS.ShadowTy, ShadowAddr, Offset); 1265 IRB.CreateAlignedStore(Shadow, CurShadowAddr, ShadowAlign); 1266 --Size; 1267 ++Offset; 1268 } 1269 } 1270 1271 void DFSanVisitor::visitStoreInst(StoreInst &SI) { 1272 auto &DL = SI.getModule()->getDataLayout(); 1273 uint64_t Size = DL.getTypeStoreSize(SI.getValueOperand()->getType()); 1274 if (Size == 0) 1275 return; 1276 1277 uint64_t Align; 1278 if (ClPreserveAlignment) { 1279 Align = SI.getAlignment(); 1280 if (Align == 0) 1281 Align = DL.getABITypeAlignment(SI.getValueOperand()->getType()); 1282 } else { 1283 Align = 1; 1284 } 1285 1286 Value* Shadow = DFSF.getShadow(SI.getValueOperand()); 1287 if (ClCombinePointerLabelsOnStore) { 1288 Value *PtrShadow = DFSF.getShadow(SI.getPointerOperand()); 1289 Shadow = DFSF.combineShadows(Shadow, PtrShadow, &SI); 1290 } 1291 DFSF.storeShadow(SI.getPointerOperand(), Size, Align, Shadow, &SI); 1292 } 1293 1294 void DFSanVisitor::visitBinaryOperator(BinaryOperator &BO) { 1295 visitOperandShadowInst(BO); 1296 } 1297 1298 void DFSanVisitor::visitCastInst(CastInst &CI) { visitOperandShadowInst(CI); } 1299 1300 void DFSanVisitor::visitCmpInst(CmpInst &CI) { visitOperandShadowInst(CI); } 1301 1302 void DFSanVisitor::visitGetElementPtrInst(GetElementPtrInst &GEPI) { 1303 visitOperandShadowInst(GEPI); 1304 } 1305 1306 void DFSanVisitor::visitExtractElementInst(ExtractElementInst &I) { 1307 visitOperandShadowInst(I); 1308 } 1309 1310 void DFSanVisitor::visitInsertElementInst(InsertElementInst &I) { 1311 visitOperandShadowInst(I); 1312 } 1313 1314 void DFSanVisitor::visitShuffleVectorInst(ShuffleVectorInst &I) { 1315 visitOperandShadowInst(I); 1316 } 1317 1318 void DFSanVisitor::visitExtractValueInst(ExtractValueInst &I) { 1319 visitOperandShadowInst(I); 1320 } 1321 1322 void DFSanVisitor::visitInsertValueInst(InsertValueInst &I) { 1323 visitOperandShadowInst(I); 1324 } 1325 1326 void DFSanVisitor::visitAllocaInst(AllocaInst &I) { 1327 bool AllLoadsStores = true; 1328 for (User *U : I.users()) { 1329 if (isa<LoadInst>(U)) 1330 continue; 1331 1332 if (StoreInst *SI = dyn_cast<StoreInst>(U)) { 1333 if (SI->getPointerOperand() == &I) 1334 continue; 1335 } 1336 1337 AllLoadsStores = false; 1338 break; 1339 } 1340 if (AllLoadsStores) { 1341 IRBuilder<> IRB(&I); 1342 DFSF.AllocaShadowMap[&I] = IRB.CreateAlloca(DFSF.DFS.ShadowTy); 1343 } 1344 DFSF.setShadow(&I, DFSF.DFS.ZeroShadow); 1345 } 1346 1347 void DFSanVisitor::visitSelectInst(SelectInst &I) { 1348 Value *CondShadow = DFSF.getShadow(I.getCondition()); 1349 Value *TrueShadow = DFSF.getShadow(I.getTrueValue()); 1350 Value *FalseShadow = DFSF.getShadow(I.getFalseValue()); 1351 1352 if (isa<VectorType>(I.getCondition()->getType())) { 1353 DFSF.setShadow( 1354 &I, 1355 DFSF.combineShadows( 1356 CondShadow, DFSF.combineShadows(TrueShadow, FalseShadow, &I), &I)); 1357 } else { 1358 Value *ShadowSel; 1359 if (TrueShadow == FalseShadow) { 1360 ShadowSel = TrueShadow; 1361 } else { 1362 ShadowSel = 1363 SelectInst::Create(I.getCondition(), TrueShadow, FalseShadow, "", &I); 1364 } 1365 DFSF.setShadow(&I, DFSF.combineShadows(CondShadow, ShadowSel, &I)); 1366 } 1367 } 1368 1369 void DFSanVisitor::visitMemSetInst(MemSetInst &I) { 1370 IRBuilder<> IRB(&I); 1371 Value *ValShadow = DFSF.getShadow(I.getValue()); 1372 IRB.CreateCall(DFSF.DFS.DFSanSetLabelFn, 1373 {ValShadow, IRB.CreateBitCast(I.getDest(), Type::getInt8PtrTy( 1374 *DFSF.DFS.Ctx)), 1375 IRB.CreateZExtOrTrunc(I.getLength(), DFSF.DFS.IntptrTy)}); 1376 } 1377 1378 void DFSanVisitor::visitMemTransferInst(MemTransferInst &I) { 1379 IRBuilder<> IRB(&I); 1380 Value *DestShadow = DFSF.DFS.getShadowAddress(I.getDest(), &I); 1381 Value *SrcShadow = DFSF.DFS.getShadowAddress(I.getSource(), &I); 1382 Value *LenShadow = IRB.CreateMul( 1383 I.getLength(), 1384 ConstantInt::get(I.getLength()->getType(), DFSF.DFS.ShadowWidth / 8)); 1385 Type *Int8Ptr = Type::getInt8PtrTy(*DFSF.DFS.Ctx); 1386 DestShadow = IRB.CreateBitCast(DestShadow, Int8Ptr); 1387 SrcShadow = IRB.CreateBitCast(SrcShadow, Int8Ptr); 1388 auto *MTI = cast<MemTransferInst>( 1389 IRB.CreateCall(I.getCalledValue(), 1390 {DestShadow, SrcShadow, LenShadow, I.getVolatileCst()})); 1391 // FIXME: Set the source & dest alignments of MTI based on the separate 1392 // source & dest alignments of I 1393 if (ClPreserveAlignment) { 1394 MTI->setAlignment(I.getAlignment() * (DFSF.DFS.ShadowWidth / 8)); 1395 } else { 1396 MTI->setAlignment(DFSF.DFS.ShadowWidth / 8); 1397 } 1398 } 1399 1400 void DFSanVisitor::visitReturnInst(ReturnInst &RI) { 1401 if (!DFSF.IsNativeABI && RI.getReturnValue()) { 1402 switch (DFSF.IA) { 1403 case DataFlowSanitizer::IA_TLS: { 1404 Value *S = DFSF.getShadow(RI.getReturnValue()); 1405 IRBuilder<> IRB(&RI); 1406 IRB.CreateStore(S, DFSF.getRetvalTLS()); 1407 break; 1408 } 1409 case DataFlowSanitizer::IA_Args: { 1410 IRBuilder<> IRB(&RI); 1411 Type *RT = DFSF.F->getFunctionType()->getReturnType(); 1412 Value *InsVal = 1413 IRB.CreateInsertValue(UndefValue::get(RT), RI.getReturnValue(), 0); 1414 Value *InsShadow = 1415 IRB.CreateInsertValue(InsVal, DFSF.getShadow(RI.getReturnValue()), 1); 1416 RI.setOperand(0, InsShadow); 1417 break; 1418 } 1419 } 1420 } 1421 } 1422 1423 void DFSanVisitor::visitCallSite(CallSite CS) { 1424 Function *F = CS.getCalledFunction(); 1425 if ((F && F->isIntrinsic()) || isa<InlineAsm>(CS.getCalledValue())) { 1426 visitOperandShadowInst(*CS.getInstruction()); 1427 return; 1428 } 1429 1430 // Calls to this function are synthesized in wrappers, and we shouldn't 1431 // instrument them. 1432 if (F == DFSF.DFS.DFSanVarargWrapperFn) 1433 return; 1434 1435 IRBuilder<> IRB(CS.getInstruction()); 1436 1437 DenseMap<Value *, Function *>::iterator i = 1438 DFSF.DFS.UnwrappedFnMap.find(CS.getCalledValue()); 1439 if (i != DFSF.DFS.UnwrappedFnMap.end()) { 1440 Function *F = i->second; 1441 switch (DFSF.DFS.getWrapperKind(F)) { 1442 case DataFlowSanitizer::WK_Warning: 1443 CS.setCalledFunction(F); 1444 IRB.CreateCall(DFSF.DFS.DFSanUnimplementedFn, 1445 IRB.CreateGlobalStringPtr(F->getName())); 1446 DFSF.setShadow(CS.getInstruction(), DFSF.DFS.ZeroShadow); 1447 return; 1448 case DataFlowSanitizer::WK_Discard: 1449 CS.setCalledFunction(F); 1450 DFSF.setShadow(CS.getInstruction(), DFSF.DFS.ZeroShadow); 1451 return; 1452 case DataFlowSanitizer::WK_Functional: 1453 CS.setCalledFunction(F); 1454 visitOperandShadowInst(*CS.getInstruction()); 1455 return; 1456 case DataFlowSanitizer::WK_Custom: 1457 // Don't try to handle invokes of custom functions, it's too complicated. 1458 // Instead, invoke the dfsw$ wrapper, which will in turn call the __dfsw_ 1459 // wrapper. 1460 if (CallInst *CI = dyn_cast<CallInst>(CS.getInstruction())) { 1461 FunctionType *FT = F->getFunctionType(); 1462 FunctionType *CustomFT = DFSF.DFS.getCustomFunctionType(FT); 1463 std::string CustomFName = "__dfsw_"; 1464 CustomFName += F->getName(); 1465 Constant *CustomF = 1466 DFSF.DFS.Mod->getOrInsertFunction(CustomFName, CustomFT); 1467 if (Function *CustomFn = dyn_cast<Function>(CustomF)) { 1468 CustomFn->copyAttributesFrom(F); 1469 1470 // Custom functions returning non-void will write to the return label. 1471 if (!FT->getReturnType()->isVoidTy()) { 1472 CustomFn->removeAttributes(AttributeList::FunctionIndex, 1473 DFSF.DFS.ReadOnlyNoneAttrs); 1474 } 1475 } 1476 1477 std::vector<Value *> Args; 1478 1479 CallSite::arg_iterator i = CS.arg_begin(); 1480 for (unsigned n = FT->getNumParams(); n != 0; ++i, --n) { 1481 Type *T = (*i)->getType(); 1482 FunctionType *ParamFT; 1483 if (isa<PointerType>(T) && 1484 (ParamFT = dyn_cast<FunctionType>( 1485 cast<PointerType>(T)->getElementType()))) { 1486 std::string TName = "dfst"; 1487 TName += utostr(FT->getNumParams() - n); 1488 TName += "$"; 1489 TName += F->getName(); 1490 Constant *T = DFSF.DFS.getOrBuildTrampolineFunction(ParamFT, TName); 1491 Args.push_back(T); 1492 Args.push_back( 1493 IRB.CreateBitCast(*i, Type::getInt8PtrTy(*DFSF.DFS.Ctx))); 1494 } else { 1495 Args.push_back(*i); 1496 } 1497 } 1498 1499 i = CS.arg_begin(); 1500 const unsigned ShadowArgStart = Args.size(); 1501 for (unsigned n = FT->getNumParams(); n != 0; ++i, --n) 1502 Args.push_back(DFSF.getShadow(*i)); 1503 1504 if (FT->isVarArg()) { 1505 auto *LabelVATy = ArrayType::get(DFSF.DFS.ShadowTy, 1506 CS.arg_size() - FT->getNumParams()); 1507 auto *LabelVAAlloca = new AllocaInst( 1508 LabelVATy, getDataLayout().getAllocaAddrSpace(), 1509 "labelva", &DFSF.F->getEntryBlock().front()); 1510 1511 for (unsigned n = 0; i != CS.arg_end(); ++i, ++n) { 1512 auto LabelVAPtr = IRB.CreateStructGEP(LabelVATy, LabelVAAlloca, n); 1513 IRB.CreateStore(DFSF.getShadow(*i), LabelVAPtr); 1514 } 1515 1516 Args.push_back(IRB.CreateStructGEP(LabelVATy, LabelVAAlloca, 0)); 1517 } 1518 1519 if (!FT->getReturnType()->isVoidTy()) { 1520 if (!DFSF.LabelReturnAlloca) { 1521 DFSF.LabelReturnAlloca = 1522 new AllocaInst(DFSF.DFS.ShadowTy, 1523 getDataLayout().getAllocaAddrSpace(), 1524 "labelreturn", &DFSF.F->getEntryBlock().front()); 1525 } 1526 Args.push_back(DFSF.LabelReturnAlloca); 1527 } 1528 1529 for (i = CS.arg_begin() + FT->getNumParams(); i != CS.arg_end(); ++i) 1530 Args.push_back(*i); 1531 1532 CallInst *CustomCI = IRB.CreateCall(CustomF, Args); 1533 CustomCI->setCallingConv(CI->getCallingConv()); 1534 CustomCI->setAttributes(CI->getAttributes()); 1535 1536 // Update the parameter attributes of the custom call instruction to 1537 // zero extend the shadow parameters. This is required for targets 1538 // which consider ShadowTy an illegal type. 1539 for (unsigned n = 0; n < FT->getNumParams(); n++) { 1540 const unsigned ArgNo = ShadowArgStart + n; 1541 if (CustomCI->getArgOperand(ArgNo)->getType() == DFSF.DFS.ShadowTy) 1542 CustomCI->addParamAttr(ArgNo, Attribute::ZExt); 1543 } 1544 1545 if (!FT->getReturnType()->isVoidTy()) { 1546 LoadInst *LabelLoad = IRB.CreateLoad(DFSF.LabelReturnAlloca); 1547 DFSF.setShadow(CustomCI, LabelLoad); 1548 } 1549 1550 CI->replaceAllUsesWith(CustomCI); 1551 CI->eraseFromParent(); 1552 return; 1553 } 1554 break; 1555 } 1556 } 1557 1558 FunctionType *FT = cast<FunctionType>( 1559 CS.getCalledValue()->getType()->getPointerElementType()); 1560 if (DFSF.DFS.getInstrumentedABI() == DataFlowSanitizer::IA_TLS) { 1561 for (unsigned i = 0, n = FT->getNumParams(); i != n; ++i) { 1562 IRB.CreateStore(DFSF.getShadow(CS.getArgument(i)), 1563 DFSF.getArgTLS(i, CS.getInstruction())); 1564 } 1565 } 1566 1567 Instruction *Next = nullptr; 1568 if (!CS.getType()->isVoidTy()) { 1569 if (InvokeInst *II = dyn_cast<InvokeInst>(CS.getInstruction())) { 1570 if (II->getNormalDest()->getSinglePredecessor()) { 1571 Next = &II->getNormalDest()->front(); 1572 } else { 1573 BasicBlock *NewBB = 1574 SplitEdge(II->getParent(), II->getNormalDest(), &DFSF.DT); 1575 Next = &NewBB->front(); 1576 } 1577 } else { 1578 assert(CS->getIterator() != CS->getParent()->end()); 1579 Next = CS->getNextNode(); 1580 } 1581 1582 if (DFSF.DFS.getInstrumentedABI() == DataFlowSanitizer::IA_TLS) { 1583 IRBuilder<> NextIRB(Next); 1584 LoadInst *LI = NextIRB.CreateLoad(DFSF.getRetvalTLS()); 1585 DFSF.SkipInsts.insert(LI); 1586 DFSF.setShadow(CS.getInstruction(), LI); 1587 DFSF.NonZeroChecks.push_back(LI); 1588 } 1589 } 1590 1591 // Do all instrumentation for IA_Args down here to defer tampering with the 1592 // CFG in a way that SplitEdge may be able to detect. 1593 if (DFSF.DFS.getInstrumentedABI() == DataFlowSanitizer::IA_Args) { 1594 FunctionType *NewFT = DFSF.DFS.getArgsFunctionType(FT); 1595 Value *Func = 1596 IRB.CreateBitCast(CS.getCalledValue(), PointerType::getUnqual(NewFT)); 1597 std::vector<Value *> Args; 1598 1599 CallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end(); 1600 for (unsigned n = FT->getNumParams(); n != 0; ++i, --n) 1601 Args.push_back(*i); 1602 1603 i = CS.arg_begin(); 1604 for (unsigned n = FT->getNumParams(); n != 0; ++i, --n) 1605 Args.push_back(DFSF.getShadow(*i)); 1606 1607 if (FT->isVarArg()) { 1608 unsigned VarArgSize = CS.arg_size() - FT->getNumParams(); 1609 ArrayType *VarArgArrayTy = ArrayType::get(DFSF.DFS.ShadowTy, VarArgSize); 1610 AllocaInst *VarArgShadow = 1611 new AllocaInst(VarArgArrayTy, getDataLayout().getAllocaAddrSpace(), 1612 "", &DFSF.F->getEntryBlock().front()); 1613 Args.push_back(IRB.CreateConstGEP2_32(VarArgArrayTy, VarArgShadow, 0, 0)); 1614 for (unsigned n = 0; i != e; ++i, ++n) { 1615 IRB.CreateStore( 1616 DFSF.getShadow(*i), 1617 IRB.CreateConstGEP2_32(VarArgArrayTy, VarArgShadow, 0, n)); 1618 Args.push_back(*i); 1619 } 1620 } 1621 1622 CallSite NewCS; 1623 if (InvokeInst *II = dyn_cast<InvokeInst>(CS.getInstruction())) { 1624 NewCS = IRB.CreateInvoke(Func, II->getNormalDest(), II->getUnwindDest(), 1625 Args); 1626 } else { 1627 NewCS = IRB.CreateCall(Func, Args); 1628 } 1629 NewCS.setCallingConv(CS.getCallingConv()); 1630 NewCS.setAttributes(CS.getAttributes().removeAttributes( 1631 *DFSF.DFS.Ctx, AttributeList::ReturnIndex, 1632 AttributeFuncs::typeIncompatible(NewCS.getInstruction()->getType()))); 1633 1634 if (Next) { 1635 ExtractValueInst *ExVal = 1636 ExtractValueInst::Create(NewCS.getInstruction(), 0, "", Next); 1637 DFSF.SkipInsts.insert(ExVal); 1638 ExtractValueInst *ExShadow = 1639 ExtractValueInst::Create(NewCS.getInstruction(), 1, "", Next); 1640 DFSF.SkipInsts.insert(ExShadow); 1641 DFSF.setShadow(ExVal, ExShadow); 1642 DFSF.NonZeroChecks.push_back(ExShadow); 1643 1644 CS.getInstruction()->replaceAllUsesWith(ExVal); 1645 } 1646 1647 CS.getInstruction()->eraseFromParent(); 1648 } 1649 } 1650 1651 void DFSanVisitor::visitPHINode(PHINode &PN) { 1652 PHINode *ShadowPN = 1653 PHINode::Create(DFSF.DFS.ShadowTy, PN.getNumIncomingValues(), "", &PN); 1654 1655 // Give the shadow phi node valid predecessors to fool SplitEdge into working. 1656 Value *UndefShadow = UndefValue::get(DFSF.DFS.ShadowTy); 1657 for (PHINode::block_iterator i = PN.block_begin(), e = PN.block_end(); i != e; 1658 ++i) { 1659 ShadowPN->addIncoming(UndefShadow, *i); 1660 } 1661 1662 DFSF.PHIFixups.push_back(std::make_pair(&PN, ShadowPN)); 1663 DFSF.setShadow(&PN, ShadowPN); 1664 } 1665