1 //===-- DataFlowSanitizer.cpp - dynamic data flow analysis ----------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 /// \file 10 /// This file is a part of DataFlowSanitizer, a generalised dynamic data flow 11 /// analysis. 12 /// 13 /// Unlike other Sanitizer tools, this tool is not designed to detect a specific 14 /// class of bugs on its own. Instead, it provides a generic dynamic data flow 15 /// analysis framework to be used by clients to help detect application-specific 16 /// issues within their own code. 17 /// 18 /// The analysis is based on automatic propagation of data flow labels (also 19 /// known as taint labels) through a program as it performs computation. Each 20 /// byte of application memory is backed by two bytes of shadow memory which 21 /// hold the label. On Linux/x86_64, memory is laid out as follows: 22 /// 23 /// +--------------------+ 0x800000000000 (top of memory) 24 /// | application memory | 25 /// +--------------------+ 0x700000008000 (kAppAddr) 26 /// | | 27 /// | unused | 28 /// | | 29 /// +--------------------+ 0x200200000000 (kUnusedAddr) 30 /// | union table | 31 /// +--------------------+ 0x200000000000 (kUnionTableAddr) 32 /// | shadow memory | 33 /// +--------------------+ 0x000000010000 (kShadowAddr) 34 /// | reserved by kernel | 35 /// +--------------------+ 0x000000000000 36 /// 37 /// To derive a shadow memory address from an application memory address, 38 /// bits 44-46 are cleared to bring the address into the range 39 /// [0x000000008000,0x100000000000). Then the address is shifted left by 1 to 40 /// account for the double byte representation of shadow labels and move the 41 /// address into the shadow memory range. See the function 42 /// DataFlowSanitizer::getShadowAddress below. 43 /// 44 /// For more information, please refer to the design document: 45 /// http://clang.llvm.org/docs/DataFlowSanitizerDesign.html 46 47 #include "llvm/Transforms/Instrumentation.h" 48 #include "llvm/ADT/DenseMap.h" 49 #include "llvm/ADT/DenseSet.h" 50 #include "llvm/ADT/DepthFirstIterator.h" 51 #include "llvm/ADT/StringExtras.h" 52 #include "llvm/ADT/Triple.h" 53 #include "llvm/Analysis/ValueTracking.h" 54 #include "llvm/IR/Dominators.h" 55 #include "llvm/IR/DebugInfo.h" 56 #include "llvm/IR/IRBuilder.h" 57 #include "llvm/IR/InlineAsm.h" 58 #include "llvm/IR/InstVisitor.h" 59 #include "llvm/IR/LLVMContext.h" 60 #include "llvm/IR/MDBuilder.h" 61 #include "llvm/IR/Type.h" 62 #include "llvm/IR/Value.h" 63 #include "llvm/Pass.h" 64 #include "llvm/Support/CommandLine.h" 65 #include "llvm/Support/SpecialCaseList.h" 66 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 67 #include "llvm/Transforms/Utils/Local.h" 68 #include <algorithm> 69 #include <iterator> 70 #include <set> 71 #include <utility> 72 73 using namespace llvm; 74 75 // External symbol to be used when generating the shadow address for 76 // architectures with multiple VMAs. Instead of using a constant integer 77 // the runtime will set the external mask based on the VMA range. 78 static const char *const kDFSanExternShadowPtrMask = "__dfsan_shadow_ptr_mask"; 79 80 // The -dfsan-preserve-alignment flag controls whether this pass assumes that 81 // alignment requirements provided by the input IR are correct. For example, 82 // if the input IR contains a load with alignment 8, this flag will cause 83 // the shadow load to have alignment 16. This flag is disabled by default as 84 // we have unfortunately encountered too much code (including Clang itself; 85 // see PR14291) which performs misaligned access. 86 static cl::opt<bool> ClPreserveAlignment( 87 "dfsan-preserve-alignment", 88 cl::desc("respect alignment requirements provided by input IR"), cl::Hidden, 89 cl::init(false)); 90 91 // The ABI list files control how shadow parameters are passed. The pass treats 92 // every function labelled "uninstrumented" in the ABI list file as conforming 93 // to the "native" (i.e. unsanitized) ABI. Unless the ABI list contains 94 // additional annotations for those functions, a call to one of those functions 95 // will produce a warning message, as the labelling behaviour of the function is 96 // unknown. The other supported annotations are "functional" and "discard", 97 // which are described below under DataFlowSanitizer::WrapperKind. 98 static cl::list<std::string> ClABIListFiles( 99 "dfsan-abilist", 100 cl::desc("File listing native ABI functions and how the pass treats them"), 101 cl::Hidden); 102 103 // Controls whether the pass uses IA_Args or IA_TLS as the ABI for instrumented 104 // functions (see DataFlowSanitizer::InstrumentedABI below). 105 static cl::opt<bool> ClArgsABI( 106 "dfsan-args-abi", 107 cl::desc("Use the argument ABI rather than the TLS ABI"), 108 cl::Hidden); 109 110 // Controls whether the pass includes or ignores the labels of pointers in load 111 // instructions. 112 static cl::opt<bool> ClCombinePointerLabelsOnLoad( 113 "dfsan-combine-pointer-labels-on-load", 114 cl::desc("Combine the label of the pointer with the label of the data when " 115 "loading from memory."), 116 cl::Hidden, cl::init(true)); 117 118 // Controls whether the pass includes or ignores the labels of pointers in 119 // stores instructions. 120 static cl::opt<bool> ClCombinePointerLabelsOnStore( 121 "dfsan-combine-pointer-labels-on-store", 122 cl::desc("Combine the label of the pointer with the label of the data when " 123 "storing in memory."), 124 cl::Hidden, cl::init(false)); 125 126 static cl::opt<bool> ClDebugNonzeroLabels( 127 "dfsan-debug-nonzero-labels", 128 cl::desc("Insert calls to __dfsan_nonzero_label on observing a parameter, " 129 "load or return with a nonzero label"), 130 cl::Hidden); 131 132 133 namespace { 134 135 StringRef GetGlobalTypeString(const GlobalValue &G) { 136 // Types of GlobalVariables are always pointer types. 137 Type *GType = G.getValueType(); 138 // For now we support blacklisting struct types only. 139 if (StructType *SGType = dyn_cast<StructType>(GType)) { 140 if (!SGType->isLiteral()) 141 return SGType->getName(); 142 } 143 return "<unknown type>"; 144 } 145 146 class DFSanABIList { 147 std::unique_ptr<SpecialCaseList> SCL; 148 149 public: 150 DFSanABIList() {} 151 152 void set(std::unique_ptr<SpecialCaseList> List) { SCL = std::move(List); } 153 154 /// Returns whether either this function or its source file are listed in the 155 /// given category. 156 bool isIn(const Function &F, StringRef Category) const { 157 return isIn(*F.getParent(), Category) || 158 SCL->inSection("fun", F.getName(), Category); 159 } 160 161 /// Returns whether this global alias is listed in the given category. 162 /// 163 /// If GA aliases a function, the alias's name is matched as a function name 164 /// would be. Similarly, aliases of globals are matched like globals. 165 bool isIn(const GlobalAlias &GA, StringRef Category) const { 166 if (isIn(*GA.getParent(), Category)) 167 return true; 168 169 if (isa<FunctionType>(GA.getValueType())) 170 return SCL->inSection("fun", GA.getName(), Category); 171 172 return SCL->inSection("global", GA.getName(), Category) || 173 SCL->inSection("type", GetGlobalTypeString(GA), Category); 174 } 175 176 /// Returns whether this module is listed in the given category. 177 bool isIn(const Module &M, StringRef Category) const { 178 return SCL->inSection("src", M.getModuleIdentifier(), Category); 179 } 180 }; 181 182 class DataFlowSanitizer : public ModulePass { 183 friend struct DFSanFunction; 184 friend class DFSanVisitor; 185 186 enum { 187 ShadowWidth = 16 188 }; 189 190 /// Which ABI should be used for instrumented functions? 191 enum InstrumentedABI { 192 /// Argument and return value labels are passed through additional 193 /// arguments and by modifying the return type. 194 IA_Args, 195 196 /// Argument and return value labels are passed through TLS variables 197 /// __dfsan_arg_tls and __dfsan_retval_tls. 198 IA_TLS 199 }; 200 201 /// How should calls to uninstrumented functions be handled? 202 enum WrapperKind { 203 /// This function is present in an uninstrumented form but we don't know 204 /// how it should be handled. Print a warning and call the function anyway. 205 /// Don't label the return value. 206 WK_Warning, 207 208 /// This function does not write to (user-accessible) memory, and its return 209 /// value is unlabelled. 210 WK_Discard, 211 212 /// This function does not write to (user-accessible) memory, and the label 213 /// of its return value is the union of the label of its arguments. 214 WK_Functional, 215 216 /// Instead of calling the function, a custom wrapper __dfsw_F is called, 217 /// where F is the name of the function. This function may wrap the 218 /// original function or provide its own implementation. This is similar to 219 /// the IA_Args ABI, except that IA_Args uses a struct return type to 220 /// pass the return value shadow in a register, while WK_Custom uses an 221 /// extra pointer argument to return the shadow. This allows the wrapped 222 /// form of the function type to be expressed in C. 223 WK_Custom 224 }; 225 226 Module *Mod; 227 LLVMContext *Ctx; 228 IntegerType *ShadowTy; 229 PointerType *ShadowPtrTy; 230 IntegerType *IntptrTy; 231 ConstantInt *ZeroShadow; 232 ConstantInt *ShadowPtrMask; 233 ConstantInt *ShadowPtrMul; 234 Constant *ArgTLS; 235 Constant *RetvalTLS; 236 void *(*GetArgTLSPtr)(); 237 void *(*GetRetvalTLSPtr)(); 238 Constant *GetArgTLS; 239 Constant *GetRetvalTLS; 240 Constant *ExternalShadowMask; 241 FunctionType *DFSanUnionFnTy; 242 FunctionType *DFSanUnionLoadFnTy; 243 FunctionType *DFSanUnimplementedFnTy; 244 FunctionType *DFSanSetLabelFnTy; 245 FunctionType *DFSanNonzeroLabelFnTy; 246 FunctionType *DFSanVarargWrapperFnTy; 247 Constant *DFSanUnionFn; 248 Constant *DFSanCheckedUnionFn; 249 Constant *DFSanUnionLoadFn; 250 Constant *DFSanUnimplementedFn; 251 Constant *DFSanSetLabelFn; 252 Constant *DFSanNonzeroLabelFn; 253 Constant *DFSanVarargWrapperFn; 254 MDNode *ColdCallWeights; 255 DFSanABIList ABIList; 256 DenseMap<Value *, Function *> UnwrappedFnMap; 257 AttributeList ReadOnlyNoneAttrs; 258 bool DFSanRuntimeShadowMask; 259 260 Value *getShadowAddress(Value *Addr, Instruction *Pos); 261 bool isInstrumented(const Function *F); 262 bool isInstrumented(const GlobalAlias *GA); 263 FunctionType *getArgsFunctionType(FunctionType *T); 264 FunctionType *getTrampolineFunctionType(FunctionType *T); 265 FunctionType *getCustomFunctionType(FunctionType *T); 266 InstrumentedABI getInstrumentedABI(); 267 WrapperKind getWrapperKind(Function *F); 268 void addGlobalNamePrefix(GlobalValue *GV); 269 Function *buildWrapperFunction(Function *F, StringRef NewFName, 270 GlobalValue::LinkageTypes NewFLink, 271 FunctionType *NewFT); 272 Constant *getOrBuildTrampolineFunction(FunctionType *FT, StringRef FName); 273 274 public: 275 DataFlowSanitizer( 276 const std::vector<std::string> &ABIListFiles = std::vector<std::string>(), 277 void *(*getArgTLS)() = nullptr, void *(*getRetValTLS)() = nullptr); 278 static char ID; 279 bool doInitialization(Module &M) override; 280 bool runOnModule(Module &M) override; 281 }; 282 283 struct DFSanFunction { 284 DataFlowSanitizer &DFS; 285 Function *F; 286 DominatorTree DT; 287 DataFlowSanitizer::InstrumentedABI IA; 288 bool IsNativeABI; 289 Value *ArgTLSPtr; 290 Value *RetvalTLSPtr; 291 AllocaInst *LabelReturnAlloca; 292 DenseMap<Value *, Value *> ValShadowMap; 293 DenseMap<AllocaInst *, AllocaInst *> AllocaShadowMap; 294 std::vector<std::pair<PHINode *, PHINode *> > PHIFixups; 295 DenseSet<Instruction *> SkipInsts; 296 std::vector<Value *> NonZeroChecks; 297 bool AvoidNewBlocks; 298 299 struct CachedCombinedShadow { 300 BasicBlock *Block; 301 Value *Shadow; 302 }; 303 DenseMap<std::pair<Value *, Value *>, CachedCombinedShadow> 304 CachedCombinedShadows; 305 DenseMap<Value *, std::set<Value *>> ShadowElements; 306 307 DFSanFunction(DataFlowSanitizer &DFS, Function *F, bool IsNativeABI) 308 : DFS(DFS), F(F), IA(DFS.getInstrumentedABI()), 309 IsNativeABI(IsNativeABI), ArgTLSPtr(nullptr), RetvalTLSPtr(nullptr), 310 LabelReturnAlloca(nullptr) { 311 DT.recalculate(*F); 312 // FIXME: Need to track down the register allocator issue which causes poor 313 // performance in pathological cases with large numbers of basic blocks. 314 AvoidNewBlocks = F->size() > 1000; 315 } 316 Value *getArgTLSPtr(); 317 Value *getArgTLS(unsigned Index, Instruction *Pos); 318 Value *getRetvalTLS(); 319 Value *getShadow(Value *V); 320 void setShadow(Instruction *I, Value *Shadow); 321 Value *combineShadows(Value *V1, Value *V2, Instruction *Pos); 322 Value *combineOperandShadows(Instruction *Inst); 323 Value *loadShadow(Value *ShadowAddr, uint64_t Size, uint64_t Align, 324 Instruction *Pos); 325 void storeShadow(Value *Addr, uint64_t Size, uint64_t Align, Value *Shadow, 326 Instruction *Pos); 327 }; 328 329 class DFSanVisitor : public InstVisitor<DFSanVisitor> { 330 public: 331 DFSanFunction &DFSF; 332 DFSanVisitor(DFSanFunction &DFSF) : DFSF(DFSF) {} 333 334 const DataLayout &getDataLayout() const { 335 return DFSF.F->getParent()->getDataLayout(); 336 } 337 338 void visitOperandShadowInst(Instruction &I); 339 340 void visitBinaryOperator(BinaryOperator &BO); 341 void visitCastInst(CastInst &CI); 342 void visitCmpInst(CmpInst &CI); 343 void visitGetElementPtrInst(GetElementPtrInst &GEPI); 344 void visitLoadInst(LoadInst &LI); 345 void visitStoreInst(StoreInst &SI); 346 void visitReturnInst(ReturnInst &RI); 347 void visitCallSite(CallSite CS); 348 void visitPHINode(PHINode &PN); 349 void visitExtractElementInst(ExtractElementInst &I); 350 void visitInsertElementInst(InsertElementInst &I); 351 void visitShuffleVectorInst(ShuffleVectorInst &I); 352 void visitExtractValueInst(ExtractValueInst &I); 353 void visitInsertValueInst(InsertValueInst &I); 354 void visitAllocaInst(AllocaInst &I); 355 void visitSelectInst(SelectInst &I); 356 void visitMemSetInst(MemSetInst &I); 357 void visitMemTransferInst(MemTransferInst &I); 358 }; 359 360 } 361 362 char DataFlowSanitizer::ID; 363 INITIALIZE_PASS(DataFlowSanitizer, "dfsan", 364 "DataFlowSanitizer: dynamic data flow analysis.", false, false) 365 366 ModulePass * 367 llvm::createDataFlowSanitizerPass(const std::vector<std::string> &ABIListFiles, 368 void *(*getArgTLS)(), 369 void *(*getRetValTLS)()) { 370 return new DataFlowSanitizer(ABIListFiles, getArgTLS, getRetValTLS); 371 } 372 373 DataFlowSanitizer::DataFlowSanitizer( 374 const std::vector<std::string> &ABIListFiles, void *(*getArgTLS)(), 375 void *(*getRetValTLS)()) 376 : ModulePass(ID), GetArgTLSPtr(getArgTLS), GetRetvalTLSPtr(getRetValTLS), 377 DFSanRuntimeShadowMask(false) { 378 std::vector<std::string> AllABIListFiles(std::move(ABIListFiles)); 379 AllABIListFiles.insert(AllABIListFiles.end(), ClABIListFiles.begin(), 380 ClABIListFiles.end()); 381 ABIList.set(SpecialCaseList::createOrDie(AllABIListFiles)); 382 } 383 384 FunctionType *DataFlowSanitizer::getArgsFunctionType(FunctionType *T) { 385 llvm::SmallVector<Type *, 4> ArgTypes(T->param_begin(), T->param_end()); 386 ArgTypes.append(T->getNumParams(), ShadowTy); 387 if (T->isVarArg()) 388 ArgTypes.push_back(ShadowPtrTy); 389 Type *RetType = T->getReturnType(); 390 if (!RetType->isVoidTy()) 391 RetType = StructType::get(RetType, ShadowTy, (Type *)nullptr); 392 return FunctionType::get(RetType, ArgTypes, T->isVarArg()); 393 } 394 395 FunctionType *DataFlowSanitizer::getTrampolineFunctionType(FunctionType *T) { 396 assert(!T->isVarArg()); 397 llvm::SmallVector<Type *, 4> ArgTypes; 398 ArgTypes.push_back(T->getPointerTo()); 399 ArgTypes.append(T->param_begin(), T->param_end()); 400 ArgTypes.append(T->getNumParams(), ShadowTy); 401 Type *RetType = T->getReturnType(); 402 if (!RetType->isVoidTy()) 403 ArgTypes.push_back(ShadowPtrTy); 404 return FunctionType::get(T->getReturnType(), ArgTypes, false); 405 } 406 407 FunctionType *DataFlowSanitizer::getCustomFunctionType(FunctionType *T) { 408 llvm::SmallVector<Type *, 4> ArgTypes; 409 for (FunctionType::param_iterator i = T->param_begin(), e = T->param_end(); 410 i != e; ++i) { 411 FunctionType *FT; 412 if (isa<PointerType>(*i) && (FT = dyn_cast<FunctionType>(cast<PointerType>( 413 *i)->getElementType()))) { 414 ArgTypes.push_back(getTrampolineFunctionType(FT)->getPointerTo()); 415 ArgTypes.push_back(Type::getInt8PtrTy(*Ctx)); 416 } else { 417 ArgTypes.push_back(*i); 418 } 419 } 420 for (unsigned i = 0, e = T->getNumParams(); i != e; ++i) 421 ArgTypes.push_back(ShadowTy); 422 if (T->isVarArg()) 423 ArgTypes.push_back(ShadowPtrTy); 424 Type *RetType = T->getReturnType(); 425 if (!RetType->isVoidTy()) 426 ArgTypes.push_back(ShadowPtrTy); 427 return FunctionType::get(T->getReturnType(), ArgTypes, T->isVarArg()); 428 } 429 430 bool DataFlowSanitizer::doInitialization(Module &M) { 431 llvm::Triple TargetTriple(M.getTargetTriple()); 432 bool IsX86_64 = TargetTriple.getArch() == llvm::Triple::x86_64; 433 bool IsMIPS64 = TargetTriple.getArch() == llvm::Triple::mips64 || 434 TargetTriple.getArch() == llvm::Triple::mips64el; 435 bool IsAArch64 = TargetTriple.getArch() == llvm::Triple::aarch64 || 436 TargetTriple.getArch() == llvm::Triple::aarch64_be; 437 438 const DataLayout &DL = M.getDataLayout(); 439 440 Mod = &M; 441 Ctx = &M.getContext(); 442 ShadowTy = IntegerType::get(*Ctx, ShadowWidth); 443 ShadowPtrTy = PointerType::getUnqual(ShadowTy); 444 IntptrTy = DL.getIntPtrType(*Ctx); 445 ZeroShadow = ConstantInt::getSigned(ShadowTy, 0); 446 ShadowPtrMul = ConstantInt::getSigned(IntptrTy, ShadowWidth / 8); 447 if (IsX86_64) 448 ShadowPtrMask = ConstantInt::getSigned(IntptrTy, ~0x700000000000LL); 449 else if (IsMIPS64) 450 ShadowPtrMask = ConstantInt::getSigned(IntptrTy, ~0xF000000000LL); 451 // AArch64 supports multiple VMAs and the shadow mask is set at runtime. 452 else if (IsAArch64) 453 DFSanRuntimeShadowMask = true; 454 else 455 report_fatal_error("unsupported triple"); 456 457 Type *DFSanUnionArgs[2] = { ShadowTy, ShadowTy }; 458 DFSanUnionFnTy = 459 FunctionType::get(ShadowTy, DFSanUnionArgs, /*isVarArg=*/ false); 460 Type *DFSanUnionLoadArgs[2] = { ShadowPtrTy, IntptrTy }; 461 DFSanUnionLoadFnTy = 462 FunctionType::get(ShadowTy, DFSanUnionLoadArgs, /*isVarArg=*/ false); 463 DFSanUnimplementedFnTy = FunctionType::get( 464 Type::getVoidTy(*Ctx), Type::getInt8PtrTy(*Ctx), /*isVarArg=*/false); 465 Type *DFSanSetLabelArgs[3] = { ShadowTy, Type::getInt8PtrTy(*Ctx), IntptrTy }; 466 DFSanSetLabelFnTy = FunctionType::get(Type::getVoidTy(*Ctx), 467 DFSanSetLabelArgs, /*isVarArg=*/false); 468 DFSanNonzeroLabelFnTy = FunctionType::get( 469 Type::getVoidTy(*Ctx), None, /*isVarArg=*/false); 470 DFSanVarargWrapperFnTy = FunctionType::get( 471 Type::getVoidTy(*Ctx), Type::getInt8PtrTy(*Ctx), /*isVarArg=*/false); 472 473 if (GetArgTLSPtr) { 474 Type *ArgTLSTy = ArrayType::get(ShadowTy, 64); 475 ArgTLS = nullptr; 476 GetArgTLS = ConstantExpr::getIntToPtr( 477 ConstantInt::get(IntptrTy, uintptr_t(GetArgTLSPtr)), 478 PointerType::getUnqual( 479 FunctionType::get(PointerType::getUnqual(ArgTLSTy), 480 (Type *)nullptr))); 481 } 482 if (GetRetvalTLSPtr) { 483 RetvalTLS = nullptr; 484 GetRetvalTLS = ConstantExpr::getIntToPtr( 485 ConstantInt::get(IntptrTy, uintptr_t(GetRetvalTLSPtr)), 486 PointerType::getUnqual( 487 FunctionType::get(PointerType::getUnqual(ShadowTy), 488 (Type *)nullptr))); 489 } 490 491 ColdCallWeights = MDBuilder(*Ctx).createBranchWeights(1, 1000); 492 return true; 493 } 494 495 bool DataFlowSanitizer::isInstrumented(const Function *F) { 496 return !ABIList.isIn(*F, "uninstrumented"); 497 } 498 499 bool DataFlowSanitizer::isInstrumented(const GlobalAlias *GA) { 500 return !ABIList.isIn(*GA, "uninstrumented"); 501 } 502 503 DataFlowSanitizer::InstrumentedABI DataFlowSanitizer::getInstrumentedABI() { 504 return ClArgsABI ? IA_Args : IA_TLS; 505 } 506 507 DataFlowSanitizer::WrapperKind DataFlowSanitizer::getWrapperKind(Function *F) { 508 if (ABIList.isIn(*F, "functional")) 509 return WK_Functional; 510 if (ABIList.isIn(*F, "discard")) 511 return WK_Discard; 512 if (ABIList.isIn(*F, "custom")) 513 return WK_Custom; 514 515 return WK_Warning; 516 } 517 518 void DataFlowSanitizer::addGlobalNamePrefix(GlobalValue *GV) { 519 std::string GVName = GV->getName(), Prefix = "dfs$"; 520 GV->setName(Prefix + GVName); 521 522 // Try to change the name of the function in module inline asm. We only do 523 // this for specific asm directives, currently only ".symver", to try to avoid 524 // corrupting asm which happens to contain the symbol name as a substring. 525 // Note that the substitution for .symver assumes that the versioned symbol 526 // also has an instrumented name. 527 std::string Asm = GV->getParent()->getModuleInlineAsm(); 528 std::string SearchStr = ".symver " + GVName + ","; 529 size_t Pos = Asm.find(SearchStr); 530 if (Pos != std::string::npos) { 531 Asm.replace(Pos, SearchStr.size(), 532 ".symver " + Prefix + GVName + "," + Prefix); 533 GV->getParent()->setModuleInlineAsm(Asm); 534 } 535 } 536 537 Function * 538 DataFlowSanitizer::buildWrapperFunction(Function *F, StringRef NewFName, 539 GlobalValue::LinkageTypes NewFLink, 540 FunctionType *NewFT) { 541 FunctionType *FT = F->getFunctionType(); 542 Function *NewF = Function::Create(NewFT, NewFLink, NewFName, 543 F->getParent()); 544 NewF->copyAttributesFrom(F); 545 NewF->removeAttributes( 546 AttributeList::ReturnIndex, 547 AttributeList::get( 548 F->getContext(), AttributeList::ReturnIndex, 549 AttributeFuncs::typeIncompatible(NewFT->getReturnType()))); 550 551 BasicBlock *BB = BasicBlock::Create(*Ctx, "entry", NewF); 552 if (F->isVarArg()) { 553 NewF->removeAttributes( 554 AttributeList::FunctionIndex, 555 AttributeList().addAttribute(*Ctx, AttributeList::FunctionIndex, 556 "split-stack")); 557 CallInst::Create(DFSanVarargWrapperFn, 558 IRBuilder<>(BB).CreateGlobalStringPtr(F->getName()), "", 559 BB); 560 new UnreachableInst(*Ctx, BB); 561 } else { 562 std::vector<Value *> Args; 563 unsigned n = FT->getNumParams(); 564 for (Function::arg_iterator ai = NewF->arg_begin(); n != 0; ++ai, --n) 565 Args.push_back(&*ai); 566 CallInst *CI = CallInst::Create(F, Args, "", BB); 567 if (FT->getReturnType()->isVoidTy()) 568 ReturnInst::Create(*Ctx, BB); 569 else 570 ReturnInst::Create(*Ctx, CI, BB); 571 } 572 573 return NewF; 574 } 575 576 Constant *DataFlowSanitizer::getOrBuildTrampolineFunction(FunctionType *FT, 577 StringRef FName) { 578 FunctionType *FTT = getTrampolineFunctionType(FT); 579 Constant *C = Mod->getOrInsertFunction(FName, FTT); 580 Function *F = dyn_cast<Function>(C); 581 if (F && F->isDeclaration()) { 582 F->setLinkage(GlobalValue::LinkOnceODRLinkage); 583 BasicBlock *BB = BasicBlock::Create(*Ctx, "entry", F); 584 std::vector<Value *> Args; 585 Function::arg_iterator AI = F->arg_begin(); ++AI; 586 for (unsigned N = FT->getNumParams(); N != 0; ++AI, --N) 587 Args.push_back(&*AI); 588 CallInst *CI = CallInst::Create(&*F->arg_begin(), Args, "", BB); 589 ReturnInst *RI; 590 if (FT->getReturnType()->isVoidTy()) 591 RI = ReturnInst::Create(*Ctx, BB); 592 else 593 RI = ReturnInst::Create(*Ctx, CI, BB); 594 595 DFSanFunction DFSF(*this, F, /*IsNativeABI=*/true); 596 Function::arg_iterator ValAI = F->arg_begin(), ShadowAI = AI; ++ValAI; 597 for (unsigned N = FT->getNumParams(); N != 0; ++ValAI, ++ShadowAI, --N) 598 DFSF.ValShadowMap[&*ValAI] = &*ShadowAI; 599 DFSanVisitor(DFSF).visitCallInst(*CI); 600 if (!FT->getReturnType()->isVoidTy()) 601 new StoreInst(DFSF.getShadow(RI->getReturnValue()), 602 &*std::prev(F->arg_end()), RI); 603 } 604 605 return C; 606 } 607 608 bool DataFlowSanitizer::runOnModule(Module &M) { 609 if (ABIList.isIn(M, "skip")) 610 return false; 611 612 if (!GetArgTLSPtr) { 613 Type *ArgTLSTy = ArrayType::get(ShadowTy, 64); 614 ArgTLS = Mod->getOrInsertGlobal("__dfsan_arg_tls", ArgTLSTy); 615 if (GlobalVariable *G = dyn_cast<GlobalVariable>(ArgTLS)) 616 G->setThreadLocalMode(GlobalVariable::InitialExecTLSModel); 617 } 618 if (!GetRetvalTLSPtr) { 619 RetvalTLS = Mod->getOrInsertGlobal("__dfsan_retval_tls", ShadowTy); 620 if (GlobalVariable *G = dyn_cast<GlobalVariable>(RetvalTLS)) 621 G->setThreadLocalMode(GlobalVariable::InitialExecTLSModel); 622 } 623 624 ExternalShadowMask = 625 Mod->getOrInsertGlobal(kDFSanExternShadowPtrMask, IntptrTy); 626 627 DFSanUnionFn = Mod->getOrInsertFunction("__dfsan_union", DFSanUnionFnTy); 628 if (Function *F = dyn_cast<Function>(DFSanUnionFn)) { 629 F->addAttribute(AttributeList::FunctionIndex, Attribute::NoUnwind); 630 F->addAttribute(AttributeList::FunctionIndex, Attribute::ReadNone); 631 F->addAttribute(AttributeList::ReturnIndex, Attribute::ZExt); 632 F->addAttribute(1, Attribute::ZExt); 633 F->addAttribute(2, Attribute::ZExt); 634 } 635 DFSanCheckedUnionFn = Mod->getOrInsertFunction("dfsan_union", DFSanUnionFnTy); 636 if (Function *F = dyn_cast<Function>(DFSanCheckedUnionFn)) { 637 F->addAttribute(AttributeList::FunctionIndex, Attribute::NoUnwind); 638 F->addAttribute(AttributeList::FunctionIndex, Attribute::ReadNone); 639 F->addAttribute(AttributeList::ReturnIndex, Attribute::ZExt); 640 F->addAttribute(1, Attribute::ZExt); 641 F->addAttribute(2, Attribute::ZExt); 642 } 643 DFSanUnionLoadFn = 644 Mod->getOrInsertFunction("__dfsan_union_load", DFSanUnionLoadFnTy); 645 if (Function *F = dyn_cast<Function>(DFSanUnionLoadFn)) { 646 F->addAttribute(AttributeList::FunctionIndex, Attribute::NoUnwind); 647 F->addAttribute(AttributeList::FunctionIndex, Attribute::ReadOnly); 648 F->addAttribute(AttributeList::ReturnIndex, Attribute::ZExt); 649 } 650 DFSanUnimplementedFn = 651 Mod->getOrInsertFunction("__dfsan_unimplemented", DFSanUnimplementedFnTy); 652 DFSanSetLabelFn = 653 Mod->getOrInsertFunction("__dfsan_set_label", DFSanSetLabelFnTy); 654 if (Function *F = dyn_cast<Function>(DFSanSetLabelFn)) { 655 F->addAttribute(1, Attribute::ZExt); 656 } 657 DFSanNonzeroLabelFn = 658 Mod->getOrInsertFunction("__dfsan_nonzero_label", DFSanNonzeroLabelFnTy); 659 DFSanVarargWrapperFn = Mod->getOrInsertFunction("__dfsan_vararg_wrapper", 660 DFSanVarargWrapperFnTy); 661 662 std::vector<Function *> FnsToInstrument; 663 llvm::SmallPtrSet<Function *, 2> FnsWithNativeABI; 664 for (Function &i : M) { 665 if (!i.isIntrinsic() && 666 &i != DFSanUnionFn && 667 &i != DFSanCheckedUnionFn && 668 &i != DFSanUnionLoadFn && 669 &i != DFSanUnimplementedFn && 670 &i != DFSanSetLabelFn && 671 &i != DFSanNonzeroLabelFn && 672 &i != DFSanVarargWrapperFn) 673 FnsToInstrument.push_back(&i); 674 } 675 676 // Give function aliases prefixes when necessary, and build wrappers where the 677 // instrumentedness is inconsistent. 678 for (Module::alias_iterator i = M.alias_begin(), e = M.alias_end(); i != e;) { 679 GlobalAlias *GA = &*i; 680 ++i; 681 // Don't stop on weak. We assume people aren't playing games with the 682 // instrumentedness of overridden weak aliases. 683 if (auto F = dyn_cast<Function>(GA->getBaseObject())) { 684 bool GAInst = isInstrumented(GA), FInst = isInstrumented(F); 685 if (GAInst && FInst) { 686 addGlobalNamePrefix(GA); 687 } else if (GAInst != FInst) { 688 // Non-instrumented alias of an instrumented function, or vice versa. 689 // Replace the alias with a native-ABI wrapper of the aliasee. The pass 690 // below will take care of instrumenting it. 691 Function *NewF = 692 buildWrapperFunction(F, "", GA->getLinkage(), F->getFunctionType()); 693 GA->replaceAllUsesWith(ConstantExpr::getBitCast(NewF, GA->getType())); 694 NewF->takeName(GA); 695 GA->eraseFromParent(); 696 FnsToInstrument.push_back(NewF); 697 } 698 } 699 } 700 701 AttrBuilder B; 702 B.addAttribute(Attribute::ReadOnly).addAttribute(Attribute::ReadNone); 703 ReadOnlyNoneAttrs = AttributeList::get(*Ctx, AttributeList::FunctionIndex, B); 704 705 // First, change the ABI of every function in the module. ABI-listed 706 // functions keep their original ABI and get a wrapper function. 707 for (std::vector<Function *>::iterator i = FnsToInstrument.begin(), 708 e = FnsToInstrument.end(); 709 i != e; ++i) { 710 Function &F = **i; 711 FunctionType *FT = F.getFunctionType(); 712 713 bool IsZeroArgsVoidRet = (FT->getNumParams() == 0 && !FT->isVarArg() && 714 FT->getReturnType()->isVoidTy()); 715 716 if (isInstrumented(&F)) { 717 // Instrumented functions get a 'dfs$' prefix. This allows us to more 718 // easily identify cases of mismatching ABIs. 719 if (getInstrumentedABI() == IA_Args && !IsZeroArgsVoidRet) { 720 FunctionType *NewFT = getArgsFunctionType(FT); 721 Function *NewF = Function::Create(NewFT, F.getLinkage(), "", &M); 722 NewF->copyAttributesFrom(&F); 723 NewF->removeAttributes( 724 AttributeList::ReturnIndex, 725 AttributeList::get( 726 NewF->getContext(), AttributeList::ReturnIndex, 727 AttributeFuncs::typeIncompatible(NewFT->getReturnType()))); 728 for (Function::arg_iterator FArg = F.arg_begin(), 729 NewFArg = NewF->arg_begin(), 730 FArgEnd = F.arg_end(); 731 FArg != FArgEnd; ++FArg, ++NewFArg) { 732 FArg->replaceAllUsesWith(&*NewFArg); 733 } 734 NewF->getBasicBlockList().splice(NewF->begin(), F.getBasicBlockList()); 735 736 for (Function::user_iterator UI = F.user_begin(), UE = F.user_end(); 737 UI != UE;) { 738 BlockAddress *BA = dyn_cast<BlockAddress>(*UI); 739 ++UI; 740 if (BA) { 741 BA->replaceAllUsesWith( 742 BlockAddress::get(NewF, BA->getBasicBlock())); 743 delete BA; 744 } 745 } 746 F.replaceAllUsesWith( 747 ConstantExpr::getBitCast(NewF, PointerType::getUnqual(FT))); 748 NewF->takeName(&F); 749 F.eraseFromParent(); 750 *i = NewF; 751 addGlobalNamePrefix(NewF); 752 } else { 753 addGlobalNamePrefix(&F); 754 } 755 } else if (!IsZeroArgsVoidRet || getWrapperKind(&F) == WK_Custom) { 756 // Build a wrapper function for F. The wrapper simply calls F, and is 757 // added to FnsToInstrument so that any instrumentation according to its 758 // WrapperKind is done in the second pass below. 759 FunctionType *NewFT = getInstrumentedABI() == IA_Args 760 ? getArgsFunctionType(FT) 761 : FT; 762 Function *NewF = buildWrapperFunction( 763 &F, std::string("dfsw$") + std::string(F.getName()), 764 GlobalValue::LinkOnceODRLinkage, NewFT); 765 if (getInstrumentedABI() == IA_TLS) 766 NewF->removeAttributes(AttributeList::FunctionIndex, ReadOnlyNoneAttrs); 767 768 Value *WrappedFnCst = 769 ConstantExpr::getBitCast(NewF, PointerType::getUnqual(FT)); 770 F.replaceAllUsesWith(WrappedFnCst); 771 772 UnwrappedFnMap[WrappedFnCst] = &F; 773 *i = NewF; 774 775 if (!F.isDeclaration()) { 776 // This function is probably defining an interposition of an 777 // uninstrumented function and hence needs to keep the original ABI. 778 // But any functions it may call need to use the instrumented ABI, so 779 // we instrument it in a mode which preserves the original ABI. 780 FnsWithNativeABI.insert(&F); 781 782 // This code needs to rebuild the iterators, as they may be invalidated 783 // by the push_back, taking care that the new range does not include 784 // any functions added by this code. 785 size_t N = i - FnsToInstrument.begin(), 786 Count = e - FnsToInstrument.begin(); 787 FnsToInstrument.push_back(&F); 788 i = FnsToInstrument.begin() + N; 789 e = FnsToInstrument.begin() + Count; 790 } 791 // Hopefully, nobody will try to indirectly call a vararg 792 // function... yet. 793 } else if (FT->isVarArg()) { 794 UnwrappedFnMap[&F] = &F; 795 *i = nullptr; 796 } 797 } 798 799 for (Function *i : FnsToInstrument) { 800 if (!i || i->isDeclaration()) 801 continue; 802 803 removeUnreachableBlocks(*i); 804 805 DFSanFunction DFSF(*this, i, FnsWithNativeABI.count(i)); 806 807 // DFSanVisitor may create new basic blocks, which confuses df_iterator. 808 // Build a copy of the list before iterating over it. 809 llvm::SmallVector<BasicBlock *, 4> BBList(depth_first(&i->getEntryBlock())); 810 811 for (BasicBlock *i : BBList) { 812 Instruction *Inst = &i->front(); 813 while (1) { 814 // DFSanVisitor may split the current basic block, changing the current 815 // instruction's next pointer and moving the next instruction to the 816 // tail block from which we should continue. 817 Instruction *Next = Inst->getNextNode(); 818 // DFSanVisitor may delete Inst, so keep track of whether it was a 819 // terminator. 820 bool IsTerminator = isa<TerminatorInst>(Inst); 821 if (!DFSF.SkipInsts.count(Inst)) 822 DFSanVisitor(DFSF).visit(Inst); 823 if (IsTerminator) 824 break; 825 Inst = Next; 826 } 827 } 828 829 // We will not necessarily be able to compute the shadow for every phi node 830 // until we have visited every block. Therefore, the code that handles phi 831 // nodes adds them to the PHIFixups list so that they can be properly 832 // handled here. 833 for (std::vector<std::pair<PHINode *, PHINode *> >::iterator 834 i = DFSF.PHIFixups.begin(), 835 e = DFSF.PHIFixups.end(); 836 i != e; ++i) { 837 for (unsigned val = 0, n = i->first->getNumIncomingValues(); val != n; 838 ++val) { 839 i->second->setIncomingValue( 840 val, DFSF.getShadow(i->first->getIncomingValue(val))); 841 } 842 } 843 844 // -dfsan-debug-nonzero-labels will split the CFG in all kinds of crazy 845 // places (i.e. instructions in basic blocks we haven't even begun visiting 846 // yet). To make our life easier, do this work in a pass after the main 847 // instrumentation. 848 if (ClDebugNonzeroLabels) { 849 for (Value *V : DFSF.NonZeroChecks) { 850 Instruction *Pos; 851 if (Instruction *I = dyn_cast<Instruction>(V)) 852 Pos = I->getNextNode(); 853 else 854 Pos = &DFSF.F->getEntryBlock().front(); 855 while (isa<PHINode>(Pos) || isa<AllocaInst>(Pos)) 856 Pos = Pos->getNextNode(); 857 IRBuilder<> IRB(Pos); 858 Value *Ne = IRB.CreateICmpNE(V, DFSF.DFS.ZeroShadow); 859 BranchInst *BI = cast<BranchInst>(SplitBlockAndInsertIfThen( 860 Ne, Pos, /*Unreachable=*/false, ColdCallWeights)); 861 IRBuilder<> ThenIRB(BI); 862 ThenIRB.CreateCall(DFSF.DFS.DFSanNonzeroLabelFn, {}); 863 } 864 } 865 } 866 867 return false; 868 } 869 870 Value *DFSanFunction::getArgTLSPtr() { 871 if (ArgTLSPtr) 872 return ArgTLSPtr; 873 if (DFS.ArgTLS) 874 return ArgTLSPtr = DFS.ArgTLS; 875 876 IRBuilder<> IRB(&F->getEntryBlock().front()); 877 return ArgTLSPtr = IRB.CreateCall(DFS.GetArgTLS, {}); 878 } 879 880 Value *DFSanFunction::getRetvalTLS() { 881 if (RetvalTLSPtr) 882 return RetvalTLSPtr; 883 if (DFS.RetvalTLS) 884 return RetvalTLSPtr = DFS.RetvalTLS; 885 886 IRBuilder<> IRB(&F->getEntryBlock().front()); 887 return RetvalTLSPtr = IRB.CreateCall(DFS.GetRetvalTLS, {}); 888 } 889 890 Value *DFSanFunction::getArgTLS(unsigned Idx, Instruction *Pos) { 891 IRBuilder<> IRB(Pos); 892 return IRB.CreateConstGEP2_64(getArgTLSPtr(), 0, Idx); 893 } 894 895 Value *DFSanFunction::getShadow(Value *V) { 896 if (!isa<Argument>(V) && !isa<Instruction>(V)) 897 return DFS.ZeroShadow; 898 Value *&Shadow = ValShadowMap[V]; 899 if (!Shadow) { 900 if (Argument *A = dyn_cast<Argument>(V)) { 901 if (IsNativeABI) 902 return DFS.ZeroShadow; 903 switch (IA) { 904 case DataFlowSanitizer::IA_TLS: { 905 Value *ArgTLSPtr = getArgTLSPtr(); 906 Instruction *ArgTLSPos = 907 DFS.ArgTLS ? &*F->getEntryBlock().begin() 908 : cast<Instruction>(ArgTLSPtr)->getNextNode(); 909 IRBuilder<> IRB(ArgTLSPos); 910 Shadow = IRB.CreateLoad(getArgTLS(A->getArgNo(), ArgTLSPos)); 911 break; 912 } 913 case DataFlowSanitizer::IA_Args: { 914 unsigned ArgIdx = A->getArgNo() + F->arg_size() / 2; 915 Function::arg_iterator i = F->arg_begin(); 916 while (ArgIdx--) 917 ++i; 918 Shadow = &*i; 919 assert(Shadow->getType() == DFS.ShadowTy); 920 break; 921 } 922 } 923 NonZeroChecks.push_back(Shadow); 924 } else { 925 Shadow = DFS.ZeroShadow; 926 } 927 } 928 return Shadow; 929 } 930 931 void DFSanFunction::setShadow(Instruction *I, Value *Shadow) { 932 assert(!ValShadowMap.count(I)); 933 assert(Shadow->getType() == DFS.ShadowTy); 934 ValShadowMap[I] = Shadow; 935 } 936 937 Value *DataFlowSanitizer::getShadowAddress(Value *Addr, Instruction *Pos) { 938 assert(Addr != RetvalTLS && "Reinstrumenting?"); 939 IRBuilder<> IRB(Pos); 940 Value *ShadowPtrMaskValue; 941 if (DFSanRuntimeShadowMask) 942 ShadowPtrMaskValue = IRB.CreateLoad(IntptrTy, ExternalShadowMask); 943 else 944 ShadowPtrMaskValue = ShadowPtrMask; 945 return IRB.CreateIntToPtr( 946 IRB.CreateMul( 947 IRB.CreateAnd(IRB.CreatePtrToInt(Addr, IntptrTy), 948 IRB.CreatePtrToInt(ShadowPtrMaskValue, IntptrTy)), 949 ShadowPtrMul), 950 ShadowPtrTy); 951 } 952 953 // Generates IR to compute the union of the two given shadows, inserting it 954 // before Pos. Returns the computed union Value. 955 Value *DFSanFunction::combineShadows(Value *V1, Value *V2, Instruction *Pos) { 956 if (V1 == DFS.ZeroShadow) 957 return V2; 958 if (V2 == DFS.ZeroShadow) 959 return V1; 960 if (V1 == V2) 961 return V1; 962 963 auto V1Elems = ShadowElements.find(V1); 964 auto V2Elems = ShadowElements.find(V2); 965 if (V1Elems != ShadowElements.end() && V2Elems != ShadowElements.end()) { 966 if (std::includes(V1Elems->second.begin(), V1Elems->second.end(), 967 V2Elems->second.begin(), V2Elems->second.end())) { 968 return V1; 969 } else if (std::includes(V2Elems->second.begin(), V2Elems->second.end(), 970 V1Elems->second.begin(), V1Elems->second.end())) { 971 return V2; 972 } 973 } else if (V1Elems != ShadowElements.end()) { 974 if (V1Elems->second.count(V2)) 975 return V1; 976 } else if (V2Elems != ShadowElements.end()) { 977 if (V2Elems->second.count(V1)) 978 return V2; 979 } 980 981 auto Key = std::make_pair(V1, V2); 982 if (V1 > V2) 983 std::swap(Key.first, Key.second); 984 CachedCombinedShadow &CCS = CachedCombinedShadows[Key]; 985 if (CCS.Block && DT.dominates(CCS.Block, Pos->getParent())) 986 return CCS.Shadow; 987 988 IRBuilder<> IRB(Pos); 989 if (AvoidNewBlocks) { 990 CallInst *Call = IRB.CreateCall(DFS.DFSanCheckedUnionFn, {V1, V2}); 991 Call->addAttribute(AttributeList::ReturnIndex, Attribute::ZExt); 992 Call->addAttribute(1, Attribute::ZExt); 993 Call->addAttribute(2, Attribute::ZExt); 994 995 CCS.Block = Pos->getParent(); 996 CCS.Shadow = Call; 997 } else { 998 BasicBlock *Head = Pos->getParent(); 999 Value *Ne = IRB.CreateICmpNE(V1, V2); 1000 BranchInst *BI = cast<BranchInst>(SplitBlockAndInsertIfThen( 1001 Ne, Pos, /*Unreachable=*/false, DFS.ColdCallWeights, &DT)); 1002 IRBuilder<> ThenIRB(BI); 1003 CallInst *Call = ThenIRB.CreateCall(DFS.DFSanUnionFn, {V1, V2}); 1004 Call->addAttribute(AttributeList::ReturnIndex, Attribute::ZExt); 1005 Call->addAttribute(1, Attribute::ZExt); 1006 Call->addAttribute(2, Attribute::ZExt); 1007 1008 BasicBlock *Tail = BI->getSuccessor(0); 1009 PHINode *Phi = PHINode::Create(DFS.ShadowTy, 2, "", &Tail->front()); 1010 Phi->addIncoming(Call, Call->getParent()); 1011 Phi->addIncoming(V1, Head); 1012 1013 CCS.Block = Tail; 1014 CCS.Shadow = Phi; 1015 } 1016 1017 std::set<Value *> UnionElems; 1018 if (V1Elems != ShadowElements.end()) { 1019 UnionElems = V1Elems->second; 1020 } else { 1021 UnionElems.insert(V1); 1022 } 1023 if (V2Elems != ShadowElements.end()) { 1024 UnionElems.insert(V2Elems->second.begin(), V2Elems->second.end()); 1025 } else { 1026 UnionElems.insert(V2); 1027 } 1028 ShadowElements[CCS.Shadow] = std::move(UnionElems); 1029 1030 return CCS.Shadow; 1031 } 1032 1033 // A convenience function which folds the shadows of each of the operands 1034 // of the provided instruction Inst, inserting the IR before Inst. Returns 1035 // the computed union Value. 1036 Value *DFSanFunction::combineOperandShadows(Instruction *Inst) { 1037 if (Inst->getNumOperands() == 0) 1038 return DFS.ZeroShadow; 1039 1040 Value *Shadow = getShadow(Inst->getOperand(0)); 1041 for (unsigned i = 1, n = Inst->getNumOperands(); i != n; ++i) { 1042 Shadow = combineShadows(Shadow, getShadow(Inst->getOperand(i)), Inst); 1043 } 1044 return Shadow; 1045 } 1046 1047 void DFSanVisitor::visitOperandShadowInst(Instruction &I) { 1048 Value *CombinedShadow = DFSF.combineOperandShadows(&I); 1049 DFSF.setShadow(&I, CombinedShadow); 1050 } 1051 1052 // Generates IR to load shadow corresponding to bytes [Addr, Addr+Size), where 1053 // Addr has alignment Align, and take the union of each of those shadows. 1054 Value *DFSanFunction::loadShadow(Value *Addr, uint64_t Size, uint64_t Align, 1055 Instruction *Pos) { 1056 if (AllocaInst *AI = dyn_cast<AllocaInst>(Addr)) { 1057 llvm::DenseMap<AllocaInst *, AllocaInst *>::iterator i = 1058 AllocaShadowMap.find(AI); 1059 if (i != AllocaShadowMap.end()) { 1060 IRBuilder<> IRB(Pos); 1061 return IRB.CreateLoad(i->second); 1062 } 1063 } 1064 1065 uint64_t ShadowAlign = Align * DFS.ShadowWidth / 8; 1066 SmallVector<Value *, 2> Objs; 1067 GetUnderlyingObjects(Addr, Objs, Pos->getModule()->getDataLayout()); 1068 bool AllConstants = true; 1069 for (Value *Obj : Objs) { 1070 if (isa<Function>(Obj) || isa<BlockAddress>(Obj)) 1071 continue; 1072 if (isa<GlobalVariable>(Obj) && cast<GlobalVariable>(Obj)->isConstant()) 1073 continue; 1074 1075 AllConstants = false; 1076 break; 1077 } 1078 if (AllConstants) 1079 return DFS.ZeroShadow; 1080 1081 Value *ShadowAddr = DFS.getShadowAddress(Addr, Pos); 1082 switch (Size) { 1083 case 0: 1084 return DFS.ZeroShadow; 1085 case 1: { 1086 LoadInst *LI = new LoadInst(ShadowAddr, "", Pos); 1087 LI->setAlignment(ShadowAlign); 1088 return LI; 1089 } 1090 case 2: { 1091 IRBuilder<> IRB(Pos); 1092 Value *ShadowAddr1 = IRB.CreateGEP(DFS.ShadowTy, ShadowAddr, 1093 ConstantInt::get(DFS.IntptrTy, 1)); 1094 return combineShadows(IRB.CreateAlignedLoad(ShadowAddr, ShadowAlign), 1095 IRB.CreateAlignedLoad(ShadowAddr1, ShadowAlign), Pos); 1096 } 1097 } 1098 if (!AvoidNewBlocks && Size % (64 / DFS.ShadowWidth) == 0) { 1099 // Fast path for the common case where each byte has identical shadow: load 1100 // shadow 64 bits at a time, fall out to a __dfsan_union_load call if any 1101 // shadow is non-equal. 1102 BasicBlock *FallbackBB = BasicBlock::Create(*DFS.Ctx, "", F); 1103 IRBuilder<> FallbackIRB(FallbackBB); 1104 CallInst *FallbackCall = FallbackIRB.CreateCall( 1105 DFS.DFSanUnionLoadFn, 1106 {ShadowAddr, ConstantInt::get(DFS.IntptrTy, Size)}); 1107 FallbackCall->addAttribute(AttributeList::ReturnIndex, Attribute::ZExt); 1108 1109 // Compare each of the shadows stored in the loaded 64 bits to each other, 1110 // by computing (WideShadow rotl ShadowWidth) == WideShadow. 1111 IRBuilder<> IRB(Pos); 1112 Value *WideAddr = 1113 IRB.CreateBitCast(ShadowAddr, Type::getInt64PtrTy(*DFS.Ctx)); 1114 Value *WideShadow = IRB.CreateAlignedLoad(WideAddr, ShadowAlign); 1115 Value *TruncShadow = IRB.CreateTrunc(WideShadow, DFS.ShadowTy); 1116 Value *ShlShadow = IRB.CreateShl(WideShadow, DFS.ShadowWidth); 1117 Value *ShrShadow = IRB.CreateLShr(WideShadow, 64 - DFS.ShadowWidth); 1118 Value *RotShadow = IRB.CreateOr(ShlShadow, ShrShadow); 1119 Value *ShadowsEq = IRB.CreateICmpEQ(WideShadow, RotShadow); 1120 1121 BasicBlock *Head = Pos->getParent(); 1122 BasicBlock *Tail = Head->splitBasicBlock(Pos->getIterator()); 1123 1124 if (DomTreeNode *OldNode = DT.getNode(Head)) { 1125 std::vector<DomTreeNode *> Children(OldNode->begin(), OldNode->end()); 1126 1127 DomTreeNode *NewNode = DT.addNewBlock(Tail, Head); 1128 for (auto Child : Children) 1129 DT.changeImmediateDominator(Child, NewNode); 1130 } 1131 1132 // In the following code LastBr will refer to the previous basic block's 1133 // conditional branch instruction, whose true successor is fixed up to point 1134 // to the next block during the loop below or to the tail after the final 1135 // iteration. 1136 BranchInst *LastBr = BranchInst::Create(FallbackBB, FallbackBB, ShadowsEq); 1137 ReplaceInstWithInst(Head->getTerminator(), LastBr); 1138 DT.addNewBlock(FallbackBB, Head); 1139 1140 for (uint64_t Ofs = 64 / DFS.ShadowWidth; Ofs != Size; 1141 Ofs += 64 / DFS.ShadowWidth) { 1142 BasicBlock *NextBB = BasicBlock::Create(*DFS.Ctx, "", F); 1143 DT.addNewBlock(NextBB, LastBr->getParent()); 1144 IRBuilder<> NextIRB(NextBB); 1145 WideAddr = NextIRB.CreateGEP(Type::getInt64Ty(*DFS.Ctx), WideAddr, 1146 ConstantInt::get(DFS.IntptrTy, 1)); 1147 Value *NextWideShadow = NextIRB.CreateAlignedLoad(WideAddr, ShadowAlign); 1148 ShadowsEq = NextIRB.CreateICmpEQ(WideShadow, NextWideShadow); 1149 LastBr->setSuccessor(0, NextBB); 1150 LastBr = NextIRB.CreateCondBr(ShadowsEq, FallbackBB, FallbackBB); 1151 } 1152 1153 LastBr->setSuccessor(0, Tail); 1154 FallbackIRB.CreateBr(Tail); 1155 PHINode *Shadow = PHINode::Create(DFS.ShadowTy, 2, "", &Tail->front()); 1156 Shadow->addIncoming(FallbackCall, FallbackBB); 1157 Shadow->addIncoming(TruncShadow, LastBr->getParent()); 1158 return Shadow; 1159 } 1160 1161 IRBuilder<> IRB(Pos); 1162 CallInst *FallbackCall = IRB.CreateCall( 1163 DFS.DFSanUnionLoadFn, {ShadowAddr, ConstantInt::get(DFS.IntptrTy, Size)}); 1164 FallbackCall->addAttribute(AttributeList::ReturnIndex, Attribute::ZExt); 1165 return FallbackCall; 1166 } 1167 1168 void DFSanVisitor::visitLoadInst(LoadInst &LI) { 1169 auto &DL = LI.getModule()->getDataLayout(); 1170 uint64_t Size = DL.getTypeStoreSize(LI.getType()); 1171 if (Size == 0) { 1172 DFSF.setShadow(&LI, DFSF.DFS.ZeroShadow); 1173 return; 1174 } 1175 1176 uint64_t Align; 1177 if (ClPreserveAlignment) { 1178 Align = LI.getAlignment(); 1179 if (Align == 0) 1180 Align = DL.getABITypeAlignment(LI.getType()); 1181 } else { 1182 Align = 1; 1183 } 1184 IRBuilder<> IRB(&LI); 1185 Value *Shadow = DFSF.loadShadow(LI.getPointerOperand(), Size, Align, &LI); 1186 if (ClCombinePointerLabelsOnLoad) { 1187 Value *PtrShadow = DFSF.getShadow(LI.getPointerOperand()); 1188 Shadow = DFSF.combineShadows(Shadow, PtrShadow, &LI); 1189 } 1190 if (Shadow != DFSF.DFS.ZeroShadow) 1191 DFSF.NonZeroChecks.push_back(Shadow); 1192 1193 DFSF.setShadow(&LI, Shadow); 1194 } 1195 1196 void DFSanFunction::storeShadow(Value *Addr, uint64_t Size, uint64_t Align, 1197 Value *Shadow, Instruction *Pos) { 1198 if (AllocaInst *AI = dyn_cast<AllocaInst>(Addr)) { 1199 llvm::DenseMap<AllocaInst *, AllocaInst *>::iterator i = 1200 AllocaShadowMap.find(AI); 1201 if (i != AllocaShadowMap.end()) { 1202 IRBuilder<> IRB(Pos); 1203 IRB.CreateStore(Shadow, i->second); 1204 return; 1205 } 1206 } 1207 1208 uint64_t ShadowAlign = Align * DFS.ShadowWidth / 8; 1209 IRBuilder<> IRB(Pos); 1210 Value *ShadowAddr = DFS.getShadowAddress(Addr, Pos); 1211 if (Shadow == DFS.ZeroShadow) { 1212 IntegerType *ShadowTy = IntegerType::get(*DFS.Ctx, Size * DFS.ShadowWidth); 1213 Value *ExtZeroShadow = ConstantInt::get(ShadowTy, 0); 1214 Value *ExtShadowAddr = 1215 IRB.CreateBitCast(ShadowAddr, PointerType::getUnqual(ShadowTy)); 1216 IRB.CreateAlignedStore(ExtZeroShadow, ExtShadowAddr, ShadowAlign); 1217 return; 1218 } 1219 1220 const unsigned ShadowVecSize = 128 / DFS.ShadowWidth; 1221 uint64_t Offset = 0; 1222 if (Size >= ShadowVecSize) { 1223 VectorType *ShadowVecTy = VectorType::get(DFS.ShadowTy, ShadowVecSize); 1224 Value *ShadowVec = UndefValue::get(ShadowVecTy); 1225 for (unsigned i = 0; i != ShadowVecSize; ++i) { 1226 ShadowVec = IRB.CreateInsertElement( 1227 ShadowVec, Shadow, ConstantInt::get(Type::getInt32Ty(*DFS.Ctx), i)); 1228 } 1229 Value *ShadowVecAddr = 1230 IRB.CreateBitCast(ShadowAddr, PointerType::getUnqual(ShadowVecTy)); 1231 do { 1232 Value *CurShadowVecAddr = 1233 IRB.CreateConstGEP1_32(ShadowVecTy, ShadowVecAddr, Offset); 1234 IRB.CreateAlignedStore(ShadowVec, CurShadowVecAddr, ShadowAlign); 1235 Size -= ShadowVecSize; 1236 ++Offset; 1237 } while (Size >= ShadowVecSize); 1238 Offset *= ShadowVecSize; 1239 } 1240 while (Size > 0) { 1241 Value *CurShadowAddr = 1242 IRB.CreateConstGEP1_32(DFS.ShadowTy, ShadowAddr, Offset); 1243 IRB.CreateAlignedStore(Shadow, CurShadowAddr, ShadowAlign); 1244 --Size; 1245 ++Offset; 1246 } 1247 } 1248 1249 void DFSanVisitor::visitStoreInst(StoreInst &SI) { 1250 auto &DL = SI.getModule()->getDataLayout(); 1251 uint64_t Size = DL.getTypeStoreSize(SI.getValueOperand()->getType()); 1252 if (Size == 0) 1253 return; 1254 1255 uint64_t Align; 1256 if (ClPreserveAlignment) { 1257 Align = SI.getAlignment(); 1258 if (Align == 0) 1259 Align = DL.getABITypeAlignment(SI.getValueOperand()->getType()); 1260 } else { 1261 Align = 1; 1262 } 1263 1264 Value* Shadow = DFSF.getShadow(SI.getValueOperand()); 1265 if (ClCombinePointerLabelsOnStore) { 1266 Value *PtrShadow = DFSF.getShadow(SI.getPointerOperand()); 1267 Shadow = DFSF.combineShadows(Shadow, PtrShadow, &SI); 1268 } 1269 DFSF.storeShadow(SI.getPointerOperand(), Size, Align, Shadow, &SI); 1270 } 1271 1272 void DFSanVisitor::visitBinaryOperator(BinaryOperator &BO) { 1273 visitOperandShadowInst(BO); 1274 } 1275 1276 void DFSanVisitor::visitCastInst(CastInst &CI) { visitOperandShadowInst(CI); } 1277 1278 void DFSanVisitor::visitCmpInst(CmpInst &CI) { visitOperandShadowInst(CI); } 1279 1280 void DFSanVisitor::visitGetElementPtrInst(GetElementPtrInst &GEPI) { 1281 visitOperandShadowInst(GEPI); 1282 } 1283 1284 void DFSanVisitor::visitExtractElementInst(ExtractElementInst &I) { 1285 visitOperandShadowInst(I); 1286 } 1287 1288 void DFSanVisitor::visitInsertElementInst(InsertElementInst &I) { 1289 visitOperandShadowInst(I); 1290 } 1291 1292 void DFSanVisitor::visitShuffleVectorInst(ShuffleVectorInst &I) { 1293 visitOperandShadowInst(I); 1294 } 1295 1296 void DFSanVisitor::visitExtractValueInst(ExtractValueInst &I) { 1297 visitOperandShadowInst(I); 1298 } 1299 1300 void DFSanVisitor::visitInsertValueInst(InsertValueInst &I) { 1301 visitOperandShadowInst(I); 1302 } 1303 1304 void DFSanVisitor::visitAllocaInst(AllocaInst &I) { 1305 bool AllLoadsStores = true; 1306 for (User *U : I.users()) { 1307 if (isa<LoadInst>(U)) 1308 continue; 1309 1310 if (StoreInst *SI = dyn_cast<StoreInst>(U)) { 1311 if (SI->getPointerOperand() == &I) 1312 continue; 1313 } 1314 1315 AllLoadsStores = false; 1316 break; 1317 } 1318 if (AllLoadsStores) { 1319 IRBuilder<> IRB(&I); 1320 DFSF.AllocaShadowMap[&I] = IRB.CreateAlloca(DFSF.DFS.ShadowTy); 1321 } 1322 DFSF.setShadow(&I, DFSF.DFS.ZeroShadow); 1323 } 1324 1325 void DFSanVisitor::visitSelectInst(SelectInst &I) { 1326 Value *CondShadow = DFSF.getShadow(I.getCondition()); 1327 Value *TrueShadow = DFSF.getShadow(I.getTrueValue()); 1328 Value *FalseShadow = DFSF.getShadow(I.getFalseValue()); 1329 1330 if (isa<VectorType>(I.getCondition()->getType())) { 1331 DFSF.setShadow( 1332 &I, 1333 DFSF.combineShadows( 1334 CondShadow, DFSF.combineShadows(TrueShadow, FalseShadow, &I), &I)); 1335 } else { 1336 Value *ShadowSel; 1337 if (TrueShadow == FalseShadow) { 1338 ShadowSel = TrueShadow; 1339 } else { 1340 ShadowSel = 1341 SelectInst::Create(I.getCondition(), TrueShadow, FalseShadow, "", &I); 1342 } 1343 DFSF.setShadow(&I, DFSF.combineShadows(CondShadow, ShadowSel, &I)); 1344 } 1345 } 1346 1347 void DFSanVisitor::visitMemSetInst(MemSetInst &I) { 1348 IRBuilder<> IRB(&I); 1349 Value *ValShadow = DFSF.getShadow(I.getValue()); 1350 IRB.CreateCall(DFSF.DFS.DFSanSetLabelFn, 1351 {ValShadow, IRB.CreateBitCast(I.getDest(), Type::getInt8PtrTy( 1352 *DFSF.DFS.Ctx)), 1353 IRB.CreateZExtOrTrunc(I.getLength(), DFSF.DFS.IntptrTy)}); 1354 } 1355 1356 void DFSanVisitor::visitMemTransferInst(MemTransferInst &I) { 1357 IRBuilder<> IRB(&I); 1358 Value *DestShadow = DFSF.DFS.getShadowAddress(I.getDest(), &I); 1359 Value *SrcShadow = DFSF.DFS.getShadowAddress(I.getSource(), &I); 1360 Value *LenShadow = IRB.CreateMul( 1361 I.getLength(), 1362 ConstantInt::get(I.getLength()->getType(), DFSF.DFS.ShadowWidth / 8)); 1363 Value *AlignShadow; 1364 if (ClPreserveAlignment) { 1365 AlignShadow = IRB.CreateMul(I.getAlignmentCst(), 1366 ConstantInt::get(I.getAlignmentCst()->getType(), 1367 DFSF.DFS.ShadowWidth / 8)); 1368 } else { 1369 AlignShadow = ConstantInt::get(I.getAlignmentCst()->getType(), 1370 DFSF.DFS.ShadowWidth / 8); 1371 } 1372 Type *Int8Ptr = Type::getInt8PtrTy(*DFSF.DFS.Ctx); 1373 DestShadow = IRB.CreateBitCast(DestShadow, Int8Ptr); 1374 SrcShadow = IRB.CreateBitCast(SrcShadow, Int8Ptr); 1375 IRB.CreateCall(I.getCalledValue(), {DestShadow, SrcShadow, LenShadow, 1376 AlignShadow, I.getVolatileCst()}); 1377 } 1378 1379 void DFSanVisitor::visitReturnInst(ReturnInst &RI) { 1380 if (!DFSF.IsNativeABI && RI.getReturnValue()) { 1381 switch (DFSF.IA) { 1382 case DataFlowSanitizer::IA_TLS: { 1383 Value *S = DFSF.getShadow(RI.getReturnValue()); 1384 IRBuilder<> IRB(&RI); 1385 IRB.CreateStore(S, DFSF.getRetvalTLS()); 1386 break; 1387 } 1388 case DataFlowSanitizer::IA_Args: { 1389 IRBuilder<> IRB(&RI); 1390 Type *RT = DFSF.F->getFunctionType()->getReturnType(); 1391 Value *InsVal = 1392 IRB.CreateInsertValue(UndefValue::get(RT), RI.getReturnValue(), 0); 1393 Value *InsShadow = 1394 IRB.CreateInsertValue(InsVal, DFSF.getShadow(RI.getReturnValue()), 1); 1395 RI.setOperand(0, InsShadow); 1396 break; 1397 } 1398 } 1399 } 1400 } 1401 1402 void DFSanVisitor::visitCallSite(CallSite CS) { 1403 Function *F = CS.getCalledFunction(); 1404 if ((F && F->isIntrinsic()) || isa<InlineAsm>(CS.getCalledValue())) { 1405 visitOperandShadowInst(*CS.getInstruction()); 1406 return; 1407 } 1408 1409 // Calls to this function are synthesized in wrappers, and we shouldn't 1410 // instrument them. 1411 if (F == DFSF.DFS.DFSanVarargWrapperFn) 1412 return; 1413 1414 IRBuilder<> IRB(CS.getInstruction()); 1415 1416 DenseMap<Value *, Function *>::iterator i = 1417 DFSF.DFS.UnwrappedFnMap.find(CS.getCalledValue()); 1418 if (i != DFSF.DFS.UnwrappedFnMap.end()) { 1419 Function *F = i->second; 1420 switch (DFSF.DFS.getWrapperKind(F)) { 1421 case DataFlowSanitizer::WK_Warning: { 1422 CS.setCalledFunction(F); 1423 IRB.CreateCall(DFSF.DFS.DFSanUnimplementedFn, 1424 IRB.CreateGlobalStringPtr(F->getName())); 1425 DFSF.setShadow(CS.getInstruction(), DFSF.DFS.ZeroShadow); 1426 return; 1427 } 1428 case DataFlowSanitizer::WK_Discard: { 1429 CS.setCalledFunction(F); 1430 DFSF.setShadow(CS.getInstruction(), DFSF.DFS.ZeroShadow); 1431 return; 1432 } 1433 case DataFlowSanitizer::WK_Functional: { 1434 CS.setCalledFunction(F); 1435 visitOperandShadowInst(*CS.getInstruction()); 1436 return; 1437 } 1438 case DataFlowSanitizer::WK_Custom: { 1439 // Don't try to handle invokes of custom functions, it's too complicated. 1440 // Instead, invoke the dfsw$ wrapper, which will in turn call the __dfsw_ 1441 // wrapper. 1442 if (CallInst *CI = dyn_cast<CallInst>(CS.getInstruction())) { 1443 FunctionType *FT = F->getFunctionType(); 1444 FunctionType *CustomFT = DFSF.DFS.getCustomFunctionType(FT); 1445 std::string CustomFName = "__dfsw_"; 1446 CustomFName += F->getName(); 1447 Constant *CustomF = 1448 DFSF.DFS.Mod->getOrInsertFunction(CustomFName, CustomFT); 1449 if (Function *CustomFn = dyn_cast<Function>(CustomF)) { 1450 CustomFn->copyAttributesFrom(F); 1451 1452 // Custom functions returning non-void will write to the return label. 1453 if (!FT->getReturnType()->isVoidTy()) { 1454 CustomFn->removeAttributes(AttributeList::FunctionIndex, 1455 DFSF.DFS.ReadOnlyNoneAttrs); 1456 } 1457 } 1458 1459 std::vector<Value *> Args; 1460 1461 CallSite::arg_iterator i = CS.arg_begin(); 1462 for (unsigned n = FT->getNumParams(); n != 0; ++i, --n) { 1463 Type *T = (*i)->getType(); 1464 FunctionType *ParamFT; 1465 if (isa<PointerType>(T) && 1466 (ParamFT = dyn_cast<FunctionType>( 1467 cast<PointerType>(T)->getElementType()))) { 1468 std::string TName = "dfst"; 1469 TName += utostr(FT->getNumParams() - n); 1470 TName += "$"; 1471 TName += F->getName(); 1472 Constant *T = DFSF.DFS.getOrBuildTrampolineFunction(ParamFT, TName); 1473 Args.push_back(T); 1474 Args.push_back( 1475 IRB.CreateBitCast(*i, Type::getInt8PtrTy(*DFSF.DFS.Ctx))); 1476 } else { 1477 Args.push_back(*i); 1478 } 1479 } 1480 1481 i = CS.arg_begin(); 1482 for (unsigned n = FT->getNumParams(); n != 0; ++i, --n) 1483 Args.push_back(DFSF.getShadow(*i)); 1484 1485 if (FT->isVarArg()) { 1486 auto *LabelVATy = ArrayType::get(DFSF.DFS.ShadowTy, 1487 CS.arg_size() - FT->getNumParams()); 1488 auto *LabelVAAlloca = new AllocaInst( 1489 LabelVATy, getDataLayout().getAllocaAddrSpace(), 1490 "labelva", &DFSF.F->getEntryBlock().front()); 1491 1492 for (unsigned n = 0; i != CS.arg_end(); ++i, ++n) { 1493 auto LabelVAPtr = IRB.CreateStructGEP(LabelVATy, LabelVAAlloca, n); 1494 IRB.CreateStore(DFSF.getShadow(*i), LabelVAPtr); 1495 } 1496 1497 Args.push_back(IRB.CreateStructGEP(LabelVATy, LabelVAAlloca, 0)); 1498 } 1499 1500 if (!FT->getReturnType()->isVoidTy()) { 1501 if (!DFSF.LabelReturnAlloca) { 1502 DFSF.LabelReturnAlloca = 1503 new AllocaInst(DFSF.DFS.ShadowTy, 1504 getDataLayout().getAllocaAddrSpace(), 1505 "labelreturn", &DFSF.F->getEntryBlock().front()); 1506 } 1507 Args.push_back(DFSF.LabelReturnAlloca); 1508 } 1509 1510 for (i = CS.arg_begin() + FT->getNumParams(); i != CS.arg_end(); ++i) 1511 Args.push_back(*i); 1512 1513 CallInst *CustomCI = IRB.CreateCall(CustomF, Args); 1514 CustomCI->setCallingConv(CI->getCallingConv()); 1515 CustomCI->setAttributes(CI->getAttributes()); 1516 1517 if (!FT->getReturnType()->isVoidTy()) { 1518 LoadInst *LabelLoad = IRB.CreateLoad(DFSF.LabelReturnAlloca); 1519 DFSF.setShadow(CustomCI, LabelLoad); 1520 } 1521 1522 CI->replaceAllUsesWith(CustomCI); 1523 CI->eraseFromParent(); 1524 return; 1525 } 1526 break; 1527 } 1528 } 1529 } 1530 1531 FunctionType *FT = cast<FunctionType>( 1532 CS.getCalledValue()->getType()->getPointerElementType()); 1533 if (DFSF.DFS.getInstrumentedABI() == DataFlowSanitizer::IA_TLS) { 1534 for (unsigned i = 0, n = FT->getNumParams(); i != n; ++i) { 1535 IRB.CreateStore(DFSF.getShadow(CS.getArgument(i)), 1536 DFSF.getArgTLS(i, CS.getInstruction())); 1537 } 1538 } 1539 1540 Instruction *Next = nullptr; 1541 if (!CS.getType()->isVoidTy()) { 1542 if (InvokeInst *II = dyn_cast<InvokeInst>(CS.getInstruction())) { 1543 if (II->getNormalDest()->getSinglePredecessor()) { 1544 Next = &II->getNormalDest()->front(); 1545 } else { 1546 BasicBlock *NewBB = 1547 SplitEdge(II->getParent(), II->getNormalDest(), &DFSF.DT); 1548 Next = &NewBB->front(); 1549 } 1550 } else { 1551 assert(CS->getIterator() != CS->getParent()->end()); 1552 Next = CS->getNextNode(); 1553 } 1554 1555 if (DFSF.DFS.getInstrumentedABI() == DataFlowSanitizer::IA_TLS) { 1556 IRBuilder<> NextIRB(Next); 1557 LoadInst *LI = NextIRB.CreateLoad(DFSF.getRetvalTLS()); 1558 DFSF.SkipInsts.insert(LI); 1559 DFSF.setShadow(CS.getInstruction(), LI); 1560 DFSF.NonZeroChecks.push_back(LI); 1561 } 1562 } 1563 1564 // Do all instrumentation for IA_Args down here to defer tampering with the 1565 // CFG in a way that SplitEdge may be able to detect. 1566 if (DFSF.DFS.getInstrumentedABI() == DataFlowSanitizer::IA_Args) { 1567 FunctionType *NewFT = DFSF.DFS.getArgsFunctionType(FT); 1568 Value *Func = 1569 IRB.CreateBitCast(CS.getCalledValue(), PointerType::getUnqual(NewFT)); 1570 std::vector<Value *> Args; 1571 1572 CallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end(); 1573 for (unsigned n = FT->getNumParams(); n != 0; ++i, --n) 1574 Args.push_back(*i); 1575 1576 i = CS.arg_begin(); 1577 for (unsigned n = FT->getNumParams(); n != 0; ++i, --n) 1578 Args.push_back(DFSF.getShadow(*i)); 1579 1580 if (FT->isVarArg()) { 1581 unsigned VarArgSize = CS.arg_size() - FT->getNumParams(); 1582 ArrayType *VarArgArrayTy = ArrayType::get(DFSF.DFS.ShadowTy, VarArgSize); 1583 AllocaInst *VarArgShadow = 1584 new AllocaInst(VarArgArrayTy, getDataLayout().getAllocaAddrSpace(), 1585 "", &DFSF.F->getEntryBlock().front()); 1586 Args.push_back(IRB.CreateConstGEP2_32(VarArgArrayTy, VarArgShadow, 0, 0)); 1587 for (unsigned n = 0; i != e; ++i, ++n) { 1588 IRB.CreateStore( 1589 DFSF.getShadow(*i), 1590 IRB.CreateConstGEP2_32(VarArgArrayTy, VarArgShadow, 0, n)); 1591 Args.push_back(*i); 1592 } 1593 } 1594 1595 CallSite NewCS; 1596 if (InvokeInst *II = dyn_cast<InvokeInst>(CS.getInstruction())) { 1597 NewCS = IRB.CreateInvoke(Func, II->getNormalDest(), II->getUnwindDest(), 1598 Args); 1599 } else { 1600 NewCS = IRB.CreateCall(Func, Args); 1601 } 1602 NewCS.setCallingConv(CS.getCallingConv()); 1603 NewCS.setAttributes(CS.getAttributes().removeAttributes( 1604 *DFSF.DFS.Ctx, AttributeList::ReturnIndex, 1605 AttributeFuncs::typeIncompatible(NewCS.getInstruction()->getType()))); 1606 1607 if (Next) { 1608 ExtractValueInst *ExVal = 1609 ExtractValueInst::Create(NewCS.getInstruction(), 0, "", Next); 1610 DFSF.SkipInsts.insert(ExVal); 1611 ExtractValueInst *ExShadow = 1612 ExtractValueInst::Create(NewCS.getInstruction(), 1, "", Next); 1613 DFSF.SkipInsts.insert(ExShadow); 1614 DFSF.setShadow(ExVal, ExShadow); 1615 DFSF.NonZeroChecks.push_back(ExShadow); 1616 1617 CS.getInstruction()->replaceAllUsesWith(ExVal); 1618 } 1619 1620 CS.getInstruction()->eraseFromParent(); 1621 } 1622 } 1623 1624 void DFSanVisitor::visitPHINode(PHINode &PN) { 1625 PHINode *ShadowPN = 1626 PHINode::Create(DFSF.DFS.ShadowTy, PN.getNumIncomingValues(), "", &PN); 1627 1628 // Give the shadow phi node valid predecessors to fool SplitEdge into working. 1629 Value *UndefShadow = UndefValue::get(DFSF.DFS.ShadowTy); 1630 for (PHINode::block_iterator i = PN.block_begin(), e = PN.block_end(); i != e; 1631 ++i) { 1632 ShadowPN->addIncoming(UndefShadow, *i); 1633 } 1634 1635 DFSF.PHIFixups.push_back(std::make_pair(&PN, ShadowPN)); 1636 DFSF.setShadow(&PN, ShadowPN); 1637 } 1638