1 //===-- DataFlowSanitizer.cpp - dynamic data flow analysis ----------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 /// \file 10 /// This file is a part of DataFlowSanitizer, a generalised dynamic data flow 11 /// analysis. 12 /// 13 /// Unlike other Sanitizer tools, this tool is not designed to detect a specific 14 /// class of bugs on its own. Instead, it provides a generic dynamic data flow 15 /// analysis framework to be used by clients to help detect application-specific 16 /// issues within their own code. 17 /// 18 /// The analysis is based on automatic propagation of data flow labels (also 19 /// known as taint labels) through a program as it performs computation. Each 20 /// byte of application memory is backed by two bytes of shadow memory which 21 /// hold the label. On Linux/x86_64, memory is laid out as follows: 22 /// 23 /// +--------------------+ 0x800000000000 (top of memory) 24 /// | application memory | 25 /// +--------------------+ 0x700000008000 (kAppAddr) 26 /// | | 27 /// | unused | 28 /// | | 29 /// +--------------------+ 0x200200000000 (kUnusedAddr) 30 /// | union table | 31 /// +--------------------+ 0x200000000000 (kUnionTableAddr) 32 /// | shadow memory | 33 /// +--------------------+ 0x000000010000 (kShadowAddr) 34 /// | reserved by kernel | 35 /// +--------------------+ 0x000000000000 36 /// 37 /// To derive a shadow memory address from an application memory address, 38 /// bits 44-46 are cleared to bring the address into the range 39 /// [0x000000008000,0x100000000000). Then the address is shifted left by 1 to 40 /// account for the double byte representation of shadow labels and move the 41 /// address into the shadow memory range. See the function 42 /// DataFlowSanitizer::getShadowAddress below. 43 /// 44 /// For more information, please refer to the design document: 45 /// http://clang.llvm.org/docs/DataFlowSanitizerDesign.html 46 47 #include "llvm/Transforms/Instrumentation.h" 48 #include "llvm/ADT/DenseMap.h" 49 #include "llvm/ADT/DenseSet.h" 50 #include "llvm/ADT/DepthFirstIterator.h" 51 #include "llvm/ADT/StringExtras.h" 52 #include "llvm/ADT/Triple.h" 53 #include "llvm/Analysis/ValueTracking.h" 54 #include "llvm/IR/Dominators.h" 55 #include "llvm/IR/DebugInfo.h" 56 #include "llvm/IR/IRBuilder.h" 57 #include "llvm/IR/InlineAsm.h" 58 #include "llvm/IR/InstVisitor.h" 59 #include "llvm/IR/LLVMContext.h" 60 #include "llvm/IR/MDBuilder.h" 61 #include "llvm/IR/Type.h" 62 #include "llvm/IR/Value.h" 63 #include "llvm/Pass.h" 64 #include "llvm/Support/CommandLine.h" 65 #include "llvm/Support/SpecialCaseList.h" 66 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 67 #include "llvm/Transforms/Utils/Local.h" 68 #include <algorithm> 69 #include <iterator> 70 #include <set> 71 #include <utility> 72 73 using namespace llvm; 74 75 // External symbol to be used when generating the shadow address for 76 // architectures with multiple VMAs. Instead of using a constant integer 77 // the runtime will set the external mask based on the VMA range. 78 static const char *const kDFSanExternShadowPtrMask = "__dfsan_shadow_ptr_mask"; 79 80 // The -dfsan-preserve-alignment flag controls whether this pass assumes that 81 // alignment requirements provided by the input IR are correct. For example, 82 // if the input IR contains a load with alignment 8, this flag will cause 83 // the shadow load to have alignment 16. This flag is disabled by default as 84 // we have unfortunately encountered too much code (including Clang itself; 85 // see PR14291) which performs misaligned access. 86 static cl::opt<bool> ClPreserveAlignment( 87 "dfsan-preserve-alignment", 88 cl::desc("respect alignment requirements provided by input IR"), cl::Hidden, 89 cl::init(false)); 90 91 // The ABI list files control how shadow parameters are passed. The pass treats 92 // every function labelled "uninstrumented" in the ABI list file as conforming 93 // to the "native" (i.e. unsanitized) ABI. Unless the ABI list contains 94 // additional annotations for those functions, a call to one of those functions 95 // will produce a warning message, as the labelling behaviour of the function is 96 // unknown. The other supported annotations are "functional" and "discard", 97 // which are described below under DataFlowSanitizer::WrapperKind. 98 static cl::list<std::string> ClABIListFiles( 99 "dfsan-abilist", 100 cl::desc("File listing native ABI functions and how the pass treats them"), 101 cl::Hidden); 102 103 // Controls whether the pass uses IA_Args or IA_TLS as the ABI for instrumented 104 // functions (see DataFlowSanitizer::InstrumentedABI below). 105 static cl::opt<bool> ClArgsABI( 106 "dfsan-args-abi", 107 cl::desc("Use the argument ABI rather than the TLS ABI"), 108 cl::Hidden); 109 110 // Controls whether the pass includes or ignores the labels of pointers in load 111 // instructions. 112 static cl::opt<bool> ClCombinePointerLabelsOnLoad( 113 "dfsan-combine-pointer-labels-on-load", 114 cl::desc("Combine the label of the pointer with the label of the data when " 115 "loading from memory."), 116 cl::Hidden, cl::init(true)); 117 118 // Controls whether the pass includes or ignores the labels of pointers in 119 // stores instructions. 120 static cl::opt<bool> ClCombinePointerLabelsOnStore( 121 "dfsan-combine-pointer-labels-on-store", 122 cl::desc("Combine the label of the pointer with the label of the data when " 123 "storing in memory."), 124 cl::Hidden, cl::init(false)); 125 126 static cl::opt<bool> ClDebugNonzeroLabels( 127 "dfsan-debug-nonzero-labels", 128 cl::desc("Insert calls to __dfsan_nonzero_label on observing a parameter, " 129 "load or return with a nonzero label"), 130 cl::Hidden); 131 132 133 namespace { 134 135 StringRef GetGlobalTypeString(const GlobalValue &G) { 136 // Types of GlobalVariables are always pointer types. 137 Type *GType = G.getValueType(); 138 // For now we support blacklisting struct types only. 139 if (StructType *SGType = dyn_cast<StructType>(GType)) { 140 if (!SGType->isLiteral()) 141 return SGType->getName(); 142 } 143 return "<unknown type>"; 144 } 145 146 class DFSanABIList { 147 std::unique_ptr<SpecialCaseList> SCL; 148 149 public: 150 DFSanABIList() {} 151 152 void set(std::unique_ptr<SpecialCaseList> List) { SCL = std::move(List); } 153 154 /// Returns whether either this function or its source file are listed in the 155 /// given category. 156 bool isIn(const Function &F, StringRef Category) const { 157 return isIn(*F.getParent(), Category) || 158 SCL->inSection("fun", F.getName(), Category); 159 } 160 161 /// Returns whether this global alias is listed in the given category. 162 /// 163 /// If GA aliases a function, the alias's name is matched as a function name 164 /// would be. Similarly, aliases of globals are matched like globals. 165 bool isIn(const GlobalAlias &GA, StringRef Category) const { 166 if (isIn(*GA.getParent(), Category)) 167 return true; 168 169 if (isa<FunctionType>(GA.getValueType())) 170 return SCL->inSection("fun", GA.getName(), Category); 171 172 return SCL->inSection("global", GA.getName(), Category) || 173 SCL->inSection("type", GetGlobalTypeString(GA), Category); 174 } 175 176 /// Returns whether this module is listed in the given category. 177 bool isIn(const Module &M, StringRef Category) const { 178 return SCL->inSection("src", M.getModuleIdentifier(), Category); 179 } 180 }; 181 182 class DataFlowSanitizer : public ModulePass { 183 friend struct DFSanFunction; 184 friend class DFSanVisitor; 185 186 enum { 187 ShadowWidth = 16 188 }; 189 190 /// Which ABI should be used for instrumented functions? 191 enum InstrumentedABI { 192 /// Argument and return value labels are passed through additional 193 /// arguments and by modifying the return type. 194 IA_Args, 195 196 /// Argument and return value labels are passed through TLS variables 197 /// __dfsan_arg_tls and __dfsan_retval_tls. 198 IA_TLS 199 }; 200 201 /// How should calls to uninstrumented functions be handled? 202 enum WrapperKind { 203 /// This function is present in an uninstrumented form but we don't know 204 /// how it should be handled. Print a warning and call the function anyway. 205 /// Don't label the return value. 206 WK_Warning, 207 208 /// This function does not write to (user-accessible) memory, and its return 209 /// value is unlabelled. 210 WK_Discard, 211 212 /// This function does not write to (user-accessible) memory, and the label 213 /// of its return value is the union of the label of its arguments. 214 WK_Functional, 215 216 /// Instead of calling the function, a custom wrapper __dfsw_F is called, 217 /// where F is the name of the function. This function may wrap the 218 /// original function or provide its own implementation. This is similar to 219 /// the IA_Args ABI, except that IA_Args uses a struct return type to 220 /// pass the return value shadow in a register, while WK_Custom uses an 221 /// extra pointer argument to return the shadow. This allows the wrapped 222 /// form of the function type to be expressed in C. 223 WK_Custom 224 }; 225 226 Module *Mod; 227 LLVMContext *Ctx; 228 IntegerType *ShadowTy; 229 PointerType *ShadowPtrTy; 230 IntegerType *IntptrTy; 231 ConstantInt *ZeroShadow; 232 ConstantInt *ShadowPtrMask; 233 ConstantInt *ShadowPtrMul; 234 Constant *ArgTLS; 235 Constant *RetvalTLS; 236 void *(*GetArgTLSPtr)(); 237 void *(*GetRetvalTLSPtr)(); 238 Constant *GetArgTLS; 239 Constant *GetRetvalTLS; 240 Constant *ExternalShadowMask; 241 FunctionType *DFSanUnionFnTy; 242 FunctionType *DFSanUnionLoadFnTy; 243 FunctionType *DFSanUnimplementedFnTy; 244 FunctionType *DFSanSetLabelFnTy; 245 FunctionType *DFSanNonzeroLabelFnTy; 246 FunctionType *DFSanVarargWrapperFnTy; 247 Constant *DFSanUnionFn; 248 Constant *DFSanCheckedUnionFn; 249 Constant *DFSanUnionLoadFn; 250 Constant *DFSanUnimplementedFn; 251 Constant *DFSanSetLabelFn; 252 Constant *DFSanNonzeroLabelFn; 253 Constant *DFSanVarargWrapperFn; 254 MDNode *ColdCallWeights; 255 DFSanABIList ABIList; 256 DenseMap<Value *, Function *> UnwrappedFnMap; 257 AttributeSet ReadOnlyNoneAttrs; 258 bool DFSanRuntimeShadowMask; 259 260 Value *getShadowAddress(Value *Addr, Instruction *Pos); 261 bool isInstrumented(const Function *F); 262 bool isInstrumented(const GlobalAlias *GA); 263 FunctionType *getArgsFunctionType(FunctionType *T); 264 FunctionType *getTrampolineFunctionType(FunctionType *T); 265 FunctionType *getCustomFunctionType(FunctionType *T); 266 InstrumentedABI getInstrumentedABI(); 267 WrapperKind getWrapperKind(Function *F); 268 void addGlobalNamePrefix(GlobalValue *GV); 269 Function *buildWrapperFunction(Function *F, StringRef NewFName, 270 GlobalValue::LinkageTypes NewFLink, 271 FunctionType *NewFT); 272 Constant *getOrBuildTrampolineFunction(FunctionType *FT, StringRef FName); 273 274 public: 275 DataFlowSanitizer( 276 const std::vector<std::string> &ABIListFiles = std::vector<std::string>(), 277 void *(*getArgTLS)() = nullptr, void *(*getRetValTLS)() = nullptr); 278 static char ID; 279 bool doInitialization(Module &M) override; 280 bool runOnModule(Module &M) override; 281 }; 282 283 struct DFSanFunction { 284 DataFlowSanitizer &DFS; 285 Function *F; 286 DominatorTree DT; 287 DataFlowSanitizer::InstrumentedABI IA; 288 bool IsNativeABI; 289 Value *ArgTLSPtr; 290 Value *RetvalTLSPtr; 291 AllocaInst *LabelReturnAlloca; 292 DenseMap<Value *, Value *> ValShadowMap; 293 DenseMap<AllocaInst *, AllocaInst *> AllocaShadowMap; 294 std::vector<std::pair<PHINode *, PHINode *> > PHIFixups; 295 DenseSet<Instruction *> SkipInsts; 296 std::vector<Value *> NonZeroChecks; 297 bool AvoidNewBlocks; 298 299 struct CachedCombinedShadow { 300 BasicBlock *Block; 301 Value *Shadow; 302 }; 303 DenseMap<std::pair<Value *, Value *>, CachedCombinedShadow> 304 CachedCombinedShadows; 305 DenseMap<Value *, std::set<Value *>> ShadowElements; 306 307 DFSanFunction(DataFlowSanitizer &DFS, Function *F, bool IsNativeABI) 308 : DFS(DFS), F(F), IA(DFS.getInstrumentedABI()), 309 IsNativeABI(IsNativeABI), ArgTLSPtr(nullptr), RetvalTLSPtr(nullptr), 310 LabelReturnAlloca(nullptr) { 311 DT.recalculate(*F); 312 // FIXME: Need to track down the register allocator issue which causes poor 313 // performance in pathological cases with large numbers of basic blocks. 314 AvoidNewBlocks = F->size() > 1000; 315 } 316 Value *getArgTLSPtr(); 317 Value *getArgTLS(unsigned Index, Instruction *Pos); 318 Value *getRetvalTLS(); 319 Value *getShadow(Value *V); 320 void setShadow(Instruction *I, Value *Shadow); 321 Value *combineShadows(Value *V1, Value *V2, Instruction *Pos); 322 Value *combineOperandShadows(Instruction *Inst); 323 Value *loadShadow(Value *ShadowAddr, uint64_t Size, uint64_t Align, 324 Instruction *Pos); 325 void storeShadow(Value *Addr, uint64_t Size, uint64_t Align, Value *Shadow, 326 Instruction *Pos); 327 }; 328 329 class DFSanVisitor : public InstVisitor<DFSanVisitor> { 330 public: 331 DFSanFunction &DFSF; 332 DFSanVisitor(DFSanFunction &DFSF) : DFSF(DFSF) {} 333 334 void visitOperandShadowInst(Instruction &I); 335 336 void visitBinaryOperator(BinaryOperator &BO); 337 void visitCastInst(CastInst &CI); 338 void visitCmpInst(CmpInst &CI); 339 void visitGetElementPtrInst(GetElementPtrInst &GEPI); 340 void visitLoadInst(LoadInst &LI); 341 void visitStoreInst(StoreInst &SI); 342 void visitReturnInst(ReturnInst &RI); 343 void visitCallSite(CallSite CS); 344 void visitPHINode(PHINode &PN); 345 void visitExtractElementInst(ExtractElementInst &I); 346 void visitInsertElementInst(InsertElementInst &I); 347 void visitShuffleVectorInst(ShuffleVectorInst &I); 348 void visitExtractValueInst(ExtractValueInst &I); 349 void visitInsertValueInst(InsertValueInst &I); 350 void visitAllocaInst(AllocaInst &I); 351 void visitSelectInst(SelectInst &I); 352 void visitMemSetInst(MemSetInst &I); 353 void visitMemTransferInst(MemTransferInst &I); 354 }; 355 356 } 357 358 char DataFlowSanitizer::ID; 359 INITIALIZE_PASS(DataFlowSanitizer, "dfsan", 360 "DataFlowSanitizer: dynamic data flow analysis.", false, false) 361 362 ModulePass * 363 llvm::createDataFlowSanitizerPass(const std::vector<std::string> &ABIListFiles, 364 void *(*getArgTLS)(), 365 void *(*getRetValTLS)()) { 366 return new DataFlowSanitizer(ABIListFiles, getArgTLS, getRetValTLS); 367 } 368 369 DataFlowSanitizer::DataFlowSanitizer( 370 const std::vector<std::string> &ABIListFiles, void *(*getArgTLS)(), 371 void *(*getRetValTLS)()) 372 : ModulePass(ID), GetArgTLSPtr(getArgTLS), GetRetvalTLSPtr(getRetValTLS), 373 DFSanRuntimeShadowMask(false) { 374 std::vector<std::string> AllABIListFiles(std::move(ABIListFiles)); 375 AllABIListFiles.insert(AllABIListFiles.end(), ClABIListFiles.begin(), 376 ClABIListFiles.end()); 377 ABIList.set(SpecialCaseList::createOrDie(AllABIListFiles)); 378 } 379 380 FunctionType *DataFlowSanitizer::getArgsFunctionType(FunctionType *T) { 381 llvm::SmallVector<Type *, 4> ArgTypes(T->param_begin(), T->param_end()); 382 ArgTypes.append(T->getNumParams(), ShadowTy); 383 if (T->isVarArg()) 384 ArgTypes.push_back(ShadowPtrTy); 385 Type *RetType = T->getReturnType(); 386 if (!RetType->isVoidTy()) 387 RetType = StructType::get(RetType, ShadowTy, (Type *)nullptr); 388 return FunctionType::get(RetType, ArgTypes, T->isVarArg()); 389 } 390 391 FunctionType *DataFlowSanitizer::getTrampolineFunctionType(FunctionType *T) { 392 assert(!T->isVarArg()); 393 llvm::SmallVector<Type *, 4> ArgTypes; 394 ArgTypes.push_back(T->getPointerTo()); 395 ArgTypes.append(T->param_begin(), T->param_end()); 396 ArgTypes.append(T->getNumParams(), ShadowTy); 397 Type *RetType = T->getReturnType(); 398 if (!RetType->isVoidTy()) 399 ArgTypes.push_back(ShadowPtrTy); 400 return FunctionType::get(T->getReturnType(), ArgTypes, false); 401 } 402 403 FunctionType *DataFlowSanitizer::getCustomFunctionType(FunctionType *T) { 404 llvm::SmallVector<Type *, 4> ArgTypes; 405 for (FunctionType::param_iterator i = T->param_begin(), e = T->param_end(); 406 i != e; ++i) { 407 FunctionType *FT; 408 if (isa<PointerType>(*i) && (FT = dyn_cast<FunctionType>(cast<PointerType>( 409 *i)->getElementType()))) { 410 ArgTypes.push_back(getTrampolineFunctionType(FT)->getPointerTo()); 411 ArgTypes.push_back(Type::getInt8PtrTy(*Ctx)); 412 } else { 413 ArgTypes.push_back(*i); 414 } 415 } 416 for (unsigned i = 0, e = T->getNumParams(); i != e; ++i) 417 ArgTypes.push_back(ShadowTy); 418 if (T->isVarArg()) 419 ArgTypes.push_back(ShadowPtrTy); 420 Type *RetType = T->getReturnType(); 421 if (!RetType->isVoidTy()) 422 ArgTypes.push_back(ShadowPtrTy); 423 return FunctionType::get(T->getReturnType(), ArgTypes, T->isVarArg()); 424 } 425 426 bool DataFlowSanitizer::doInitialization(Module &M) { 427 llvm::Triple TargetTriple(M.getTargetTriple()); 428 bool IsX86_64 = TargetTriple.getArch() == llvm::Triple::x86_64; 429 bool IsMIPS64 = TargetTriple.getArch() == llvm::Triple::mips64 || 430 TargetTriple.getArch() == llvm::Triple::mips64el; 431 bool IsAArch64 = TargetTriple.getArch() == llvm::Triple::aarch64 || 432 TargetTriple.getArch() == llvm::Triple::aarch64_be; 433 434 const DataLayout &DL = M.getDataLayout(); 435 436 Mod = &M; 437 Ctx = &M.getContext(); 438 ShadowTy = IntegerType::get(*Ctx, ShadowWidth); 439 ShadowPtrTy = PointerType::getUnqual(ShadowTy); 440 IntptrTy = DL.getIntPtrType(*Ctx); 441 ZeroShadow = ConstantInt::getSigned(ShadowTy, 0); 442 ShadowPtrMul = ConstantInt::getSigned(IntptrTy, ShadowWidth / 8); 443 if (IsX86_64) 444 ShadowPtrMask = ConstantInt::getSigned(IntptrTy, ~0x700000000000LL); 445 else if (IsMIPS64) 446 ShadowPtrMask = ConstantInt::getSigned(IntptrTy, ~0xF000000000LL); 447 // AArch64 supports multiple VMAs and the shadow mask is set at runtime. 448 else if (IsAArch64) 449 DFSanRuntimeShadowMask = true; 450 else 451 report_fatal_error("unsupported triple"); 452 453 Type *DFSanUnionArgs[2] = { ShadowTy, ShadowTy }; 454 DFSanUnionFnTy = 455 FunctionType::get(ShadowTy, DFSanUnionArgs, /*isVarArg=*/ false); 456 Type *DFSanUnionLoadArgs[2] = { ShadowPtrTy, IntptrTy }; 457 DFSanUnionLoadFnTy = 458 FunctionType::get(ShadowTy, DFSanUnionLoadArgs, /*isVarArg=*/ false); 459 DFSanUnimplementedFnTy = FunctionType::get( 460 Type::getVoidTy(*Ctx), Type::getInt8PtrTy(*Ctx), /*isVarArg=*/false); 461 Type *DFSanSetLabelArgs[3] = { ShadowTy, Type::getInt8PtrTy(*Ctx), IntptrTy }; 462 DFSanSetLabelFnTy = FunctionType::get(Type::getVoidTy(*Ctx), 463 DFSanSetLabelArgs, /*isVarArg=*/false); 464 DFSanNonzeroLabelFnTy = FunctionType::get( 465 Type::getVoidTy(*Ctx), None, /*isVarArg=*/false); 466 DFSanVarargWrapperFnTy = FunctionType::get( 467 Type::getVoidTy(*Ctx), Type::getInt8PtrTy(*Ctx), /*isVarArg=*/false); 468 469 if (GetArgTLSPtr) { 470 Type *ArgTLSTy = ArrayType::get(ShadowTy, 64); 471 ArgTLS = nullptr; 472 GetArgTLS = ConstantExpr::getIntToPtr( 473 ConstantInt::get(IntptrTy, uintptr_t(GetArgTLSPtr)), 474 PointerType::getUnqual( 475 FunctionType::get(PointerType::getUnqual(ArgTLSTy), 476 (Type *)nullptr))); 477 } 478 if (GetRetvalTLSPtr) { 479 RetvalTLS = nullptr; 480 GetRetvalTLS = ConstantExpr::getIntToPtr( 481 ConstantInt::get(IntptrTy, uintptr_t(GetRetvalTLSPtr)), 482 PointerType::getUnqual( 483 FunctionType::get(PointerType::getUnqual(ShadowTy), 484 (Type *)nullptr))); 485 } 486 487 ColdCallWeights = MDBuilder(*Ctx).createBranchWeights(1, 1000); 488 return true; 489 } 490 491 bool DataFlowSanitizer::isInstrumented(const Function *F) { 492 return !ABIList.isIn(*F, "uninstrumented"); 493 } 494 495 bool DataFlowSanitizer::isInstrumented(const GlobalAlias *GA) { 496 return !ABIList.isIn(*GA, "uninstrumented"); 497 } 498 499 DataFlowSanitizer::InstrumentedABI DataFlowSanitizer::getInstrumentedABI() { 500 return ClArgsABI ? IA_Args : IA_TLS; 501 } 502 503 DataFlowSanitizer::WrapperKind DataFlowSanitizer::getWrapperKind(Function *F) { 504 if (ABIList.isIn(*F, "functional")) 505 return WK_Functional; 506 if (ABIList.isIn(*F, "discard")) 507 return WK_Discard; 508 if (ABIList.isIn(*F, "custom")) 509 return WK_Custom; 510 511 return WK_Warning; 512 } 513 514 void DataFlowSanitizer::addGlobalNamePrefix(GlobalValue *GV) { 515 std::string GVName = GV->getName(), Prefix = "dfs$"; 516 GV->setName(Prefix + GVName); 517 518 // Try to change the name of the function in module inline asm. We only do 519 // this for specific asm directives, currently only ".symver", to try to avoid 520 // corrupting asm which happens to contain the symbol name as a substring. 521 // Note that the substitution for .symver assumes that the versioned symbol 522 // also has an instrumented name. 523 std::string Asm = GV->getParent()->getModuleInlineAsm(); 524 std::string SearchStr = ".symver " + GVName + ","; 525 size_t Pos = Asm.find(SearchStr); 526 if (Pos != std::string::npos) { 527 Asm.replace(Pos, SearchStr.size(), 528 ".symver " + Prefix + GVName + "," + Prefix); 529 GV->getParent()->setModuleInlineAsm(Asm); 530 } 531 } 532 533 Function * 534 DataFlowSanitizer::buildWrapperFunction(Function *F, StringRef NewFName, 535 GlobalValue::LinkageTypes NewFLink, 536 FunctionType *NewFT) { 537 FunctionType *FT = F->getFunctionType(); 538 Function *NewF = Function::Create(NewFT, NewFLink, NewFName, 539 F->getParent()); 540 NewF->copyAttributesFrom(F); 541 NewF->removeAttributes( 542 AttributeSet::ReturnIndex, 543 AttributeSet::get(F->getContext(), AttributeSet::ReturnIndex, 544 AttributeFuncs::typeIncompatible(NewFT->getReturnType()))); 545 546 BasicBlock *BB = BasicBlock::Create(*Ctx, "entry", NewF); 547 if (F->isVarArg()) { 548 NewF->removeAttributes( 549 AttributeSet::FunctionIndex, 550 AttributeSet().addAttribute(*Ctx, AttributeSet::FunctionIndex, 551 "split-stack")); 552 CallInst::Create(DFSanVarargWrapperFn, 553 IRBuilder<>(BB).CreateGlobalStringPtr(F->getName()), "", 554 BB); 555 new UnreachableInst(*Ctx, BB); 556 } else { 557 std::vector<Value *> Args; 558 unsigned n = FT->getNumParams(); 559 for (Function::arg_iterator ai = NewF->arg_begin(); n != 0; ++ai, --n) 560 Args.push_back(&*ai); 561 CallInst *CI = CallInst::Create(F, Args, "", BB); 562 if (FT->getReturnType()->isVoidTy()) 563 ReturnInst::Create(*Ctx, BB); 564 else 565 ReturnInst::Create(*Ctx, CI, BB); 566 } 567 568 return NewF; 569 } 570 571 Constant *DataFlowSanitizer::getOrBuildTrampolineFunction(FunctionType *FT, 572 StringRef FName) { 573 FunctionType *FTT = getTrampolineFunctionType(FT); 574 Constant *C = Mod->getOrInsertFunction(FName, FTT); 575 Function *F = dyn_cast<Function>(C); 576 if (F && F->isDeclaration()) { 577 F->setLinkage(GlobalValue::LinkOnceODRLinkage); 578 BasicBlock *BB = BasicBlock::Create(*Ctx, "entry", F); 579 std::vector<Value *> Args; 580 Function::arg_iterator AI = F->arg_begin(); ++AI; 581 for (unsigned N = FT->getNumParams(); N != 0; ++AI, --N) 582 Args.push_back(&*AI); 583 CallInst *CI = CallInst::Create(&*F->arg_begin(), Args, "", BB); 584 ReturnInst *RI; 585 if (FT->getReturnType()->isVoidTy()) 586 RI = ReturnInst::Create(*Ctx, BB); 587 else 588 RI = ReturnInst::Create(*Ctx, CI, BB); 589 590 DFSanFunction DFSF(*this, F, /*IsNativeABI=*/true); 591 Function::arg_iterator ValAI = F->arg_begin(), ShadowAI = AI; ++ValAI; 592 for (unsigned N = FT->getNumParams(); N != 0; ++ValAI, ++ShadowAI, --N) 593 DFSF.ValShadowMap[&*ValAI] = &*ShadowAI; 594 DFSanVisitor(DFSF).visitCallInst(*CI); 595 if (!FT->getReturnType()->isVoidTy()) 596 new StoreInst(DFSF.getShadow(RI->getReturnValue()), 597 &*std::prev(F->arg_end()), RI); 598 } 599 600 return C; 601 } 602 603 bool DataFlowSanitizer::runOnModule(Module &M) { 604 if (ABIList.isIn(M, "skip")) 605 return false; 606 607 if (!GetArgTLSPtr) { 608 Type *ArgTLSTy = ArrayType::get(ShadowTy, 64); 609 ArgTLS = Mod->getOrInsertGlobal("__dfsan_arg_tls", ArgTLSTy); 610 if (GlobalVariable *G = dyn_cast<GlobalVariable>(ArgTLS)) 611 G->setThreadLocalMode(GlobalVariable::InitialExecTLSModel); 612 } 613 if (!GetRetvalTLSPtr) { 614 RetvalTLS = Mod->getOrInsertGlobal("__dfsan_retval_tls", ShadowTy); 615 if (GlobalVariable *G = dyn_cast<GlobalVariable>(RetvalTLS)) 616 G->setThreadLocalMode(GlobalVariable::InitialExecTLSModel); 617 } 618 619 ExternalShadowMask = 620 Mod->getOrInsertGlobal(kDFSanExternShadowPtrMask, IntptrTy); 621 622 DFSanUnionFn = Mod->getOrInsertFunction("__dfsan_union", DFSanUnionFnTy); 623 if (Function *F = dyn_cast<Function>(DFSanUnionFn)) { 624 F->addAttribute(AttributeSet::FunctionIndex, Attribute::NoUnwind); 625 F->addAttribute(AttributeSet::FunctionIndex, Attribute::ReadNone); 626 F->addAttribute(AttributeSet::ReturnIndex, Attribute::ZExt); 627 F->addAttribute(1, Attribute::ZExt); 628 F->addAttribute(2, Attribute::ZExt); 629 } 630 DFSanCheckedUnionFn = Mod->getOrInsertFunction("dfsan_union", DFSanUnionFnTy); 631 if (Function *F = dyn_cast<Function>(DFSanCheckedUnionFn)) { 632 F->addAttribute(AttributeSet::FunctionIndex, Attribute::NoUnwind); 633 F->addAttribute(AttributeSet::FunctionIndex, Attribute::ReadNone); 634 F->addAttribute(AttributeSet::ReturnIndex, Attribute::ZExt); 635 F->addAttribute(1, Attribute::ZExt); 636 F->addAttribute(2, Attribute::ZExt); 637 } 638 DFSanUnionLoadFn = 639 Mod->getOrInsertFunction("__dfsan_union_load", DFSanUnionLoadFnTy); 640 if (Function *F = dyn_cast<Function>(DFSanUnionLoadFn)) { 641 F->addAttribute(AttributeSet::FunctionIndex, Attribute::NoUnwind); 642 F->addAttribute(AttributeSet::FunctionIndex, Attribute::ReadOnly); 643 F->addAttribute(AttributeSet::ReturnIndex, Attribute::ZExt); 644 } 645 DFSanUnimplementedFn = 646 Mod->getOrInsertFunction("__dfsan_unimplemented", DFSanUnimplementedFnTy); 647 DFSanSetLabelFn = 648 Mod->getOrInsertFunction("__dfsan_set_label", DFSanSetLabelFnTy); 649 if (Function *F = dyn_cast<Function>(DFSanSetLabelFn)) { 650 F->addAttribute(1, Attribute::ZExt); 651 } 652 DFSanNonzeroLabelFn = 653 Mod->getOrInsertFunction("__dfsan_nonzero_label", DFSanNonzeroLabelFnTy); 654 DFSanVarargWrapperFn = Mod->getOrInsertFunction("__dfsan_vararg_wrapper", 655 DFSanVarargWrapperFnTy); 656 657 std::vector<Function *> FnsToInstrument; 658 llvm::SmallPtrSet<Function *, 2> FnsWithNativeABI; 659 for (Function &i : M) { 660 if (!i.isIntrinsic() && 661 &i != DFSanUnionFn && 662 &i != DFSanCheckedUnionFn && 663 &i != DFSanUnionLoadFn && 664 &i != DFSanUnimplementedFn && 665 &i != DFSanSetLabelFn && 666 &i != DFSanNonzeroLabelFn && 667 &i != DFSanVarargWrapperFn) 668 FnsToInstrument.push_back(&i); 669 } 670 671 // Give function aliases prefixes when necessary, and build wrappers where the 672 // instrumentedness is inconsistent. 673 for (Module::alias_iterator i = M.alias_begin(), e = M.alias_end(); i != e;) { 674 GlobalAlias *GA = &*i; 675 ++i; 676 // Don't stop on weak. We assume people aren't playing games with the 677 // instrumentedness of overridden weak aliases. 678 if (auto F = dyn_cast<Function>(GA->getBaseObject())) { 679 bool GAInst = isInstrumented(GA), FInst = isInstrumented(F); 680 if (GAInst && FInst) { 681 addGlobalNamePrefix(GA); 682 } else if (GAInst != FInst) { 683 // Non-instrumented alias of an instrumented function, or vice versa. 684 // Replace the alias with a native-ABI wrapper of the aliasee. The pass 685 // below will take care of instrumenting it. 686 Function *NewF = 687 buildWrapperFunction(F, "", GA->getLinkage(), F->getFunctionType()); 688 GA->replaceAllUsesWith(ConstantExpr::getBitCast(NewF, GA->getType())); 689 NewF->takeName(GA); 690 GA->eraseFromParent(); 691 FnsToInstrument.push_back(NewF); 692 } 693 } 694 } 695 696 AttrBuilder B; 697 B.addAttribute(Attribute::ReadOnly).addAttribute(Attribute::ReadNone); 698 ReadOnlyNoneAttrs = AttributeSet::get(*Ctx, AttributeSet::FunctionIndex, B); 699 700 // First, change the ABI of every function in the module. ABI-listed 701 // functions keep their original ABI and get a wrapper function. 702 for (std::vector<Function *>::iterator i = FnsToInstrument.begin(), 703 e = FnsToInstrument.end(); 704 i != e; ++i) { 705 Function &F = **i; 706 FunctionType *FT = F.getFunctionType(); 707 708 bool IsZeroArgsVoidRet = (FT->getNumParams() == 0 && !FT->isVarArg() && 709 FT->getReturnType()->isVoidTy()); 710 711 if (isInstrumented(&F)) { 712 // Instrumented functions get a 'dfs$' prefix. This allows us to more 713 // easily identify cases of mismatching ABIs. 714 if (getInstrumentedABI() == IA_Args && !IsZeroArgsVoidRet) { 715 FunctionType *NewFT = getArgsFunctionType(FT); 716 Function *NewF = Function::Create(NewFT, F.getLinkage(), "", &M); 717 NewF->copyAttributesFrom(&F); 718 NewF->removeAttributes( 719 AttributeSet::ReturnIndex, 720 AttributeSet::get(NewF->getContext(), AttributeSet::ReturnIndex, 721 AttributeFuncs::typeIncompatible(NewFT->getReturnType()))); 722 for (Function::arg_iterator FArg = F.arg_begin(), 723 NewFArg = NewF->arg_begin(), 724 FArgEnd = F.arg_end(); 725 FArg != FArgEnd; ++FArg, ++NewFArg) { 726 FArg->replaceAllUsesWith(&*NewFArg); 727 } 728 NewF->getBasicBlockList().splice(NewF->begin(), F.getBasicBlockList()); 729 730 for (Function::user_iterator UI = F.user_begin(), UE = F.user_end(); 731 UI != UE;) { 732 BlockAddress *BA = dyn_cast<BlockAddress>(*UI); 733 ++UI; 734 if (BA) { 735 BA->replaceAllUsesWith( 736 BlockAddress::get(NewF, BA->getBasicBlock())); 737 delete BA; 738 } 739 } 740 F.replaceAllUsesWith( 741 ConstantExpr::getBitCast(NewF, PointerType::getUnqual(FT))); 742 NewF->takeName(&F); 743 F.eraseFromParent(); 744 *i = NewF; 745 addGlobalNamePrefix(NewF); 746 } else { 747 addGlobalNamePrefix(&F); 748 } 749 } else if (!IsZeroArgsVoidRet || getWrapperKind(&F) == WK_Custom) { 750 // Build a wrapper function for F. The wrapper simply calls F, and is 751 // added to FnsToInstrument so that any instrumentation according to its 752 // WrapperKind is done in the second pass below. 753 FunctionType *NewFT = getInstrumentedABI() == IA_Args 754 ? getArgsFunctionType(FT) 755 : FT; 756 Function *NewF = buildWrapperFunction( 757 &F, std::string("dfsw$") + std::string(F.getName()), 758 GlobalValue::LinkOnceODRLinkage, NewFT); 759 if (getInstrumentedABI() == IA_TLS) 760 NewF->removeAttributes(AttributeSet::FunctionIndex, ReadOnlyNoneAttrs); 761 762 Value *WrappedFnCst = 763 ConstantExpr::getBitCast(NewF, PointerType::getUnqual(FT)); 764 F.replaceAllUsesWith(WrappedFnCst); 765 766 UnwrappedFnMap[WrappedFnCst] = &F; 767 *i = NewF; 768 769 if (!F.isDeclaration()) { 770 // This function is probably defining an interposition of an 771 // uninstrumented function and hence needs to keep the original ABI. 772 // But any functions it may call need to use the instrumented ABI, so 773 // we instrument it in a mode which preserves the original ABI. 774 FnsWithNativeABI.insert(&F); 775 776 // This code needs to rebuild the iterators, as they may be invalidated 777 // by the push_back, taking care that the new range does not include 778 // any functions added by this code. 779 size_t N = i - FnsToInstrument.begin(), 780 Count = e - FnsToInstrument.begin(); 781 FnsToInstrument.push_back(&F); 782 i = FnsToInstrument.begin() + N; 783 e = FnsToInstrument.begin() + Count; 784 } 785 // Hopefully, nobody will try to indirectly call a vararg 786 // function... yet. 787 } else if (FT->isVarArg()) { 788 UnwrappedFnMap[&F] = &F; 789 *i = nullptr; 790 } 791 } 792 793 for (Function *i : FnsToInstrument) { 794 if (!i || i->isDeclaration()) 795 continue; 796 797 removeUnreachableBlocks(*i); 798 799 DFSanFunction DFSF(*this, i, FnsWithNativeABI.count(i)); 800 801 // DFSanVisitor may create new basic blocks, which confuses df_iterator. 802 // Build a copy of the list before iterating over it. 803 llvm::SmallVector<BasicBlock *, 4> BBList(depth_first(&i->getEntryBlock())); 804 805 for (BasicBlock *i : BBList) { 806 Instruction *Inst = &i->front(); 807 while (1) { 808 // DFSanVisitor may split the current basic block, changing the current 809 // instruction's next pointer and moving the next instruction to the 810 // tail block from which we should continue. 811 Instruction *Next = Inst->getNextNode(); 812 // DFSanVisitor may delete Inst, so keep track of whether it was a 813 // terminator. 814 bool IsTerminator = isa<TerminatorInst>(Inst); 815 if (!DFSF.SkipInsts.count(Inst)) 816 DFSanVisitor(DFSF).visit(Inst); 817 if (IsTerminator) 818 break; 819 Inst = Next; 820 } 821 } 822 823 // We will not necessarily be able to compute the shadow for every phi node 824 // until we have visited every block. Therefore, the code that handles phi 825 // nodes adds them to the PHIFixups list so that they can be properly 826 // handled here. 827 for (std::vector<std::pair<PHINode *, PHINode *> >::iterator 828 i = DFSF.PHIFixups.begin(), 829 e = DFSF.PHIFixups.end(); 830 i != e; ++i) { 831 for (unsigned val = 0, n = i->first->getNumIncomingValues(); val != n; 832 ++val) { 833 i->second->setIncomingValue( 834 val, DFSF.getShadow(i->first->getIncomingValue(val))); 835 } 836 } 837 838 // -dfsan-debug-nonzero-labels will split the CFG in all kinds of crazy 839 // places (i.e. instructions in basic blocks we haven't even begun visiting 840 // yet). To make our life easier, do this work in a pass after the main 841 // instrumentation. 842 if (ClDebugNonzeroLabels) { 843 for (Value *V : DFSF.NonZeroChecks) { 844 Instruction *Pos; 845 if (Instruction *I = dyn_cast<Instruction>(V)) 846 Pos = I->getNextNode(); 847 else 848 Pos = &DFSF.F->getEntryBlock().front(); 849 while (isa<PHINode>(Pos) || isa<AllocaInst>(Pos)) 850 Pos = Pos->getNextNode(); 851 IRBuilder<> IRB(Pos); 852 Value *Ne = IRB.CreateICmpNE(V, DFSF.DFS.ZeroShadow); 853 BranchInst *BI = cast<BranchInst>(SplitBlockAndInsertIfThen( 854 Ne, Pos, /*Unreachable=*/false, ColdCallWeights)); 855 IRBuilder<> ThenIRB(BI); 856 ThenIRB.CreateCall(DFSF.DFS.DFSanNonzeroLabelFn, {}); 857 } 858 } 859 } 860 861 return false; 862 } 863 864 Value *DFSanFunction::getArgTLSPtr() { 865 if (ArgTLSPtr) 866 return ArgTLSPtr; 867 if (DFS.ArgTLS) 868 return ArgTLSPtr = DFS.ArgTLS; 869 870 IRBuilder<> IRB(&F->getEntryBlock().front()); 871 return ArgTLSPtr = IRB.CreateCall(DFS.GetArgTLS, {}); 872 } 873 874 Value *DFSanFunction::getRetvalTLS() { 875 if (RetvalTLSPtr) 876 return RetvalTLSPtr; 877 if (DFS.RetvalTLS) 878 return RetvalTLSPtr = DFS.RetvalTLS; 879 880 IRBuilder<> IRB(&F->getEntryBlock().front()); 881 return RetvalTLSPtr = IRB.CreateCall(DFS.GetRetvalTLS, {}); 882 } 883 884 Value *DFSanFunction::getArgTLS(unsigned Idx, Instruction *Pos) { 885 IRBuilder<> IRB(Pos); 886 return IRB.CreateConstGEP2_64(getArgTLSPtr(), 0, Idx); 887 } 888 889 Value *DFSanFunction::getShadow(Value *V) { 890 if (!isa<Argument>(V) && !isa<Instruction>(V)) 891 return DFS.ZeroShadow; 892 Value *&Shadow = ValShadowMap[V]; 893 if (!Shadow) { 894 if (Argument *A = dyn_cast<Argument>(V)) { 895 if (IsNativeABI) 896 return DFS.ZeroShadow; 897 switch (IA) { 898 case DataFlowSanitizer::IA_TLS: { 899 Value *ArgTLSPtr = getArgTLSPtr(); 900 Instruction *ArgTLSPos = 901 DFS.ArgTLS ? &*F->getEntryBlock().begin() 902 : cast<Instruction>(ArgTLSPtr)->getNextNode(); 903 IRBuilder<> IRB(ArgTLSPos); 904 Shadow = IRB.CreateLoad(getArgTLS(A->getArgNo(), ArgTLSPos)); 905 break; 906 } 907 case DataFlowSanitizer::IA_Args: { 908 unsigned ArgIdx = A->getArgNo() + F->arg_size() / 2; 909 Function::arg_iterator i = F->arg_begin(); 910 while (ArgIdx--) 911 ++i; 912 Shadow = &*i; 913 assert(Shadow->getType() == DFS.ShadowTy); 914 break; 915 } 916 } 917 NonZeroChecks.push_back(Shadow); 918 } else { 919 Shadow = DFS.ZeroShadow; 920 } 921 } 922 return Shadow; 923 } 924 925 void DFSanFunction::setShadow(Instruction *I, Value *Shadow) { 926 assert(!ValShadowMap.count(I)); 927 assert(Shadow->getType() == DFS.ShadowTy); 928 ValShadowMap[I] = Shadow; 929 } 930 931 Value *DataFlowSanitizer::getShadowAddress(Value *Addr, Instruction *Pos) { 932 assert(Addr != RetvalTLS && "Reinstrumenting?"); 933 IRBuilder<> IRB(Pos); 934 Value *ShadowPtrMaskValue; 935 if (DFSanRuntimeShadowMask) 936 ShadowPtrMaskValue = IRB.CreateLoad(IntptrTy, ExternalShadowMask); 937 else 938 ShadowPtrMaskValue = ShadowPtrMask; 939 return IRB.CreateIntToPtr( 940 IRB.CreateMul( 941 IRB.CreateAnd(IRB.CreatePtrToInt(Addr, IntptrTy), 942 IRB.CreatePtrToInt(ShadowPtrMaskValue, IntptrTy)), 943 ShadowPtrMul), 944 ShadowPtrTy); 945 } 946 947 // Generates IR to compute the union of the two given shadows, inserting it 948 // before Pos. Returns the computed union Value. 949 Value *DFSanFunction::combineShadows(Value *V1, Value *V2, Instruction *Pos) { 950 if (V1 == DFS.ZeroShadow) 951 return V2; 952 if (V2 == DFS.ZeroShadow) 953 return V1; 954 if (V1 == V2) 955 return V1; 956 957 auto V1Elems = ShadowElements.find(V1); 958 auto V2Elems = ShadowElements.find(V2); 959 if (V1Elems != ShadowElements.end() && V2Elems != ShadowElements.end()) { 960 if (std::includes(V1Elems->second.begin(), V1Elems->second.end(), 961 V2Elems->second.begin(), V2Elems->second.end())) { 962 return V1; 963 } else if (std::includes(V2Elems->second.begin(), V2Elems->second.end(), 964 V1Elems->second.begin(), V1Elems->second.end())) { 965 return V2; 966 } 967 } else if (V1Elems != ShadowElements.end()) { 968 if (V1Elems->second.count(V2)) 969 return V1; 970 } else if (V2Elems != ShadowElements.end()) { 971 if (V2Elems->second.count(V1)) 972 return V2; 973 } 974 975 auto Key = std::make_pair(V1, V2); 976 if (V1 > V2) 977 std::swap(Key.first, Key.second); 978 CachedCombinedShadow &CCS = CachedCombinedShadows[Key]; 979 if (CCS.Block && DT.dominates(CCS.Block, Pos->getParent())) 980 return CCS.Shadow; 981 982 IRBuilder<> IRB(Pos); 983 if (AvoidNewBlocks) { 984 CallInst *Call = IRB.CreateCall(DFS.DFSanCheckedUnionFn, {V1, V2}); 985 Call->addAttribute(AttributeSet::ReturnIndex, Attribute::ZExt); 986 Call->addAttribute(1, Attribute::ZExt); 987 Call->addAttribute(2, Attribute::ZExt); 988 989 CCS.Block = Pos->getParent(); 990 CCS.Shadow = Call; 991 } else { 992 BasicBlock *Head = Pos->getParent(); 993 Value *Ne = IRB.CreateICmpNE(V1, V2); 994 BranchInst *BI = cast<BranchInst>(SplitBlockAndInsertIfThen( 995 Ne, Pos, /*Unreachable=*/false, DFS.ColdCallWeights, &DT)); 996 IRBuilder<> ThenIRB(BI); 997 CallInst *Call = ThenIRB.CreateCall(DFS.DFSanUnionFn, {V1, V2}); 998 Call->addAttribute(AttributeSet::ReturnIndex, Attribute::ZExt); 999 Call->addAttribute(1, Attribute::ZExt); 1000 Call->addAttribute(2, Attribute::ZExt); 1001 1002 BasicBlock *Tail = BI->getSuccessor(0); 1003 PHINode *Phi = PHINode::Create(DFS.ShadowTy, 2, "", &Tail->front()); 1004 Phi->addIncoming(Call, Call->getParent()); 1005 Phi->addIncoming(V1, Head); 1006 1007 CCS.Block = Tail; 1008 CCS.Shadow = Phi; 1009 } 1010 1011 std::set<Value *> UnionElems; 1012 if (V1Elems != ShadowElements.end()) { 1013 UnionElems = V1Elems->second; 1014 } else { 1015 UnionElems.insert(V1); 1016 } 1017 if (V2Elems != ShadowElements.end()) { 1018 UnionElems.insert(V2Elems->second.begin(), V2Elems->second.end()); 1019 } else { 1020 UnionElems.insert(V2); 1021 } 1022 ShadowElements[CCS.Shadow] = std::move(UnionElems); 1023 1024 return CCS.Shadow; 1025 } 1026 1027 // A convenience function which folds the shadows of each of the operands 1028 // of the provided instruction Inst, inserting the IR before Inst. Returns 1029 // the computed union Value. 1030 Value *DFSanFunction::combineOperandShadows(Instruction *Inst) { 1031 if (Inst->getNumOperands() == 0) 1032 return DFS.ZeroShadow; 1033 1034 Value *Shadow = getShadow(Inst->getOperand(0)); 1035 for (unsigned i = 1, n = Inst->getNumOperands(); i != n; ++i) { 1036 Shadow = combineShadows(Shadow, getShadow(Inst->getOperand(i)), Inst); 1037 } 1038 return Shadow; 1039 } 1040 1041 void DFSanVisitor::visitOperandShadowInst(Instruction &I) { 1042 Value *CombinedShadow = DFSF.combineOperandShadows(&I); 1043 DFSF.setShadow(&I, CombinedShadow); 1044 } 1045 1046 // Generates IR to load shadow corresponding to bytes [Addr, Addr+Size), where 1047 // Addr has alignment Align, and take the union of each of those shadows. 1048 Value *DFSanFunction::loadShadow(Value *Addr, uint64_t Size, uint64_t Align, 1049 Instruction *Pos) { 1050 if (AllocaInst *AI = dyn_cast<AllocaInst>(Addr)) { 1051 llvm::DenseMap<AllocaInst *, AllocaInst *>::iterator i = 1052 AllocaShadowMap.find(AI); 1053 if (i != AllocaShadowMap.end()) { 1054 IRBuilder<> IRB(Pos); 1055 return IRB.CreateLoad(i->second); 1056 } 1057 } 1058 1059 uint64_t ShadowAlign = Align * DFS.ShadowWidth / 8; 1060 SmallVector<Value *, 2> Objs; 1061 GetUnderlyingObjects(Addr, Objs, Pos->getModule()->getDataLayout()); 1062 bool AllConstants = true; 1063 for (Value *Obj : Objs) { 1064 if (isa<Function>(Obj) || isa<BlockAddress>(Obj)) 1065 continue; 1066 if (isa<GlobalVariable>(Obj) && cast<GlobalVariable>(Obj)->isConstant()) 1067 continue; 1068 1069 AllConstants = false; 1070 break; 1071 } 1072 if (AllConstants) 1073 return DFS.ZeroShadow; 1074 1075 Value *ShadowAddr = DFS.getShadowAddress(Addr, Pos); 1076 switch (Size) { 1077 case 0: 1078 return DFS.ZeroShadow; 1079 case 1: { 1080 LoadInst *LI = new LoadInst(ShadowAddr, "", Pos); 1081 LI->setAlignment(ShadowAlign); 1082 return LI; 1083 } 1084 case 2: { 1085 IRBuilder<> IRB(Pos); 1086 Value *ShadowAddr1 = IRB.CreateGEP(DFS.ShadowTy, ShadowAddr, 1087 ConstantInt::get(DFS.IntptrTy, 1)); 1088 return combineShadows(IRB.CreateAlignedLoad(ShadowAddr, ShadowAlign), 1089 IRB.CreateAlignedLoad(ShadowAddr1, ShadowAlign), Pos); 1090 } 1091 } 1092 if (!AvoidNewBlocks && Size % (64 / DFS.ShadowWidth) == 0) { 1093 // Fast path for the common case where each byte has identical shadow: load 1094 // shadow 64 bits at a time, fall out to a __dfsan_union_load call if any 1095 // shadow is non-equal. 1096 BasicBlock *FallbackBB = BasicBlock::Create(*DFS.Ctx, "", F); 1097 IRBuilder<> FallbackIRB(FallbackBB); 1098 CallInst *FallbackCall = FallbackIRB.CreateCall( 1099 DFS.DFSanUnionLoadFn, 1100 {ShadowAddr, ConstantInt::get(DFS.IntptrTy, Size)}); 1101 FallbackCall->addAttribute(AttributeSet::ReturnIndex, Attribute::ZExt); 1102 1103 // Compare each of the shadows stored in the loaded 64 bits to each other, 1104 // by computing (WideShadow rotl ShadowWidth) == WideShadow. 1105 IRBuilder<> IRB(Pos); 1106 Value *WideAddr = 1107 IRB.CreateBitCast(ShadowAddr, Type::getInt64PtrTy(*DFS.Ctx)); 1108 Value *WideShadow = IRB.CreateAlignedLoad(WideAddr, ShadowAlign); 1109 Value *TruncShadow = IRB.CreateTrunc(WideShadow, DFS.ShadowTy); 1110 Value *ShlShadow = IRB.CreateShl(WideShadow, DFS.ShadowWidth); 1111 Value *ShrShadow = IRB.CreateLShr(WideShadow, 64 - DFS.ShadowWidth); 1112 Value *RotShadow = IRB.CreateOr(ShlShadow, ShrShadow); 1113 Value *ShadowsEq = IRB.CreateICmpEQ(WideShadow, RotShadow); 1114 1115 BasicBlock *Head = Pos->getParent(); 1116 BasicBlock *Tail = Head->splitBasicBlock(Pos->getIterator()); 1117 1118 if (DomTreeNode *OldNode = DT.getNode(Head)) { 1119 std::vector<DomTreeNode *> Children(OldNode->begin(), OldNode->end()); 1120 1121 DomTreeNode *NewNode = DT.addNewBlock(Tail, Head); 1122 for (auto Child : Children) 1123 DT.changeImmediateDominator(Child, NewNode); 1124 } 1125 1126 // In the following code LastBr will refer to the previous basic block's 1127 // conditional branch instruction, whose true successor is fixed up to point 1128 // to the next block during the loop below or to the tail after the final 1129 // iteration. 1130 BranchInst *LastBr = BranchInst::Create(FallbackBB, FallbackBB, ShadowsEq); 1131 ReplaceInstWithInst(Head->getTerminator(), LastBr); 1132 DT.addNewBlock(FallbackBB, Head); 1133 1134 for (uint64_t Ofs = 64 / DFS.ShadowWidth; Ofs != Size; 1135 Ofs += 64 / DFS.ShadowWidth) { 1136 BasicBlock *NextBB = BasicBlock::Create(*DFS.Ctx, "", F); 1137 DT.addNewBlock(NextBB, LastBr->getParent()); 1138 IRBuilder<> NextIRB(NextBB); 1139 WideAddr = NextIRB.CreateGEP(Type::getInt64Ty(*DFS.Ctx), WideAddr, 1140 ConstantInt::get(DFS.IntptrTy, 1)); 1141 Value *NextWideShadow = NextIRB.CreateAlignedLoad(WideAddr, ShadowAlign); 1142 ShadowsEq = NextIRB.CreateICmpEQ(WideShadow, NextWideShadow); 1143 LastBr->setSuccessor(0, NextBB); 1144 LastBr = NextIRB.CreateCondBr(ShadowsEq, FallbackBB, FallbackBB); 1145 } 1146 1147 LastBr->setSuccessor(0, Tail); 1148 FallbackIRB.CreateBr(Tail); 1149 PHINode *Shadow = PHINode::Create(DFS.ShadowTy, 2, "", &Tail->front()); 1150 Shadow->addIncoming(FallbackCall, FallbackBB); 1151 Shadow->addIncoming(TruncShadow, LastBr->getParent()); 1152 return Shadow; 1153 } 1154 1155 IRBuilder<> IRB(Pos); 1156 CallInst *FallbackCall = IRB.CreateCall( 1157 DFS.DFSanUnionLoadFn, {ShadowAddr, ConstantInt::get(DFS.IntptrTy, Size)}); 1158 FallbackCall->addAttribute(AttributeSet::ReturnIndex, Attribute::ZExt); 1159 return FallbackCall; 1160 } 1161 1162 void DFSanVisitor::visitLoadInst(LoadInst &LI) { 1163 auto &DL = LI.getModule()->getDataLayout(); 1164 uint64_t Size = DL.getTypeStoreSize(LI.getType()); 1165 if (Size == 0) { 1166 DFSF.setShadow(&LI, DFSF.DFS.ZeroShadow); 1167 return; 1168 } 1169 1170 uint64_t Align; 1171 if (ClPreserveAlignment) { 1172 Align = LI.getAlignment(); 1173 if (Align == 0) 1174 Align = DL.getABITypeAlignment(LI.getType()); 1175 } else { 1176 Align = 1; 1177 } 1178 IRBuilder<> IRB(&LI); 1179 Value *Shadow = DFSF.loadShadow(LI.getPointerOperand(), Size, Align, &LI); 1180 if (ClCombinePointerLabelsOnLoad) { 1181 Value *PtrShadow = DFSF.getShadow(LI.getPointerOperand()); 1182 Shadow = DFSF.combineShadows(Shadow, PtrShadow, &LI); 1183 } 1184 if (Shadow != DFSF.DFS.ZeroShadow) 1185 DFSF.NonZeroChecks.push_back(Shadow); 1186 1187 DFSF.setShadow(&LI, Shadow); 1188 } 1189 1190 void DFSanFunction::storeShadow(Value *Addr, uint64_t Size, uint64_t Align, 1191 Value *Shadow, Instruction *Pos) { 1192 if (AllocaInst *AI = dyn_cast<AllocaInst>(Addr)) { 1193 llvm::DenseMap<AllocaInst *, AllocaInst *>::iterator i = 1194 AllocaShadowMap.find(AI); 1195 if (i != AllocaShadowMap.end()) { 1196 IRBuilder<> IRB(Pos); 1197 IRB.CreateStore(Shadow, i->second); 1198 return; 1199 } 1200 } 1201 1202 uint64_t ShadowAlign = Align * DFS.ShadowWidth / 8; 1203 IRBuilder<> IRB(Pos); 1204 Value *ShadowAddr = DFS.getShadowAddress(Addr, Pos); 1205 if (Shadow == DFS.ZeroShadow) { 1206 IntegerType *ShadowTy = IntegerType::get(*DFS.Ctx, Size * DFS.ShadowWidth); 1207 Value *ExtZeroShadow = ConstantInt::get(ShadowTy, 0); 1208 Value *ExtShadowAddr = 1209 IRB.CreateBitCast(ShadowAddr, PointerType::getUnqual(ShadowTy)); 1210 IRB.CreateAlignedStore(ExtZeroShadow, ExtShadowAddr, ShadowAlign); 1211 return; 1212 } 1213 1214 const unsigned ShadowVecSize = 128 / DFS.ShadowWidth; 1215 uint64_t Offset = 0; 1216 if (Size >= ShadowVecSize) { 1217 VectorType *ShadowVecTy = VectorType::get(DFS.ShadowTy, ShadowVecSize); 1218 Value *ShadowVec = UndefValue::get(ShadowVecTy); 1219 for (unsigned i = 0; i != ShadowVecSize; ++i) { 1220 ShadowVec = IRB.CreateInsertElement( 1221 ShadowVec, Shadow, ConstantInt::get(Type::getInt32Ty(*DFS.Ctx), i)); 1222 } 1223 Value *ShadowVecAddr = 1224 IRB.CreateBitCast(ShadowAddr, PointerType::getUnqual(ShadowVecTy)); 1225 do { 1226 Value *CurShadowVecAddr = 1227 IRB.CreateConstGEP1_32(ShadowVecTy, ShadowVecAddr, Offset); 1228 IRB.CreateAlignedStore(ShadowVec, CurShadowVecAddr, ShadowAlign); 1229 Size -= ShadowVecSize; 1230 ++Offset; 1231 } while (Size >= ShadowVecSize); 1232 Offset *= ShadowVecSize; 1233 } 1234 while (Size > 0) { 1235 Value *CurShadowAddr = 1236 IRB.CreateConstGEP1_32(DFS.ShadowTy, ShadowAddr, Offset); 1237 IRB.CreateAlignedStore(Shadow, CurShadowAddr, ShadowAlign); 1238 --Size; 1239 ++Offset; 1240 } 1241 } 1242 1243 void DFSanVisitor::visitStoreInst(StoreInst &SI) { 1244 auto &DL = SI.getModule()->getDataLayout(); 1245 uint64_t Size = DL.getTypeStoreSize(SI.getValueOperand()->getType()); 1246 if (Size == 0) 1247 return; 1248 1249 uint64_t Align; 1250 if (ClPreserveAlignment) { 1251 Align = SI.getAlignment(); 1252 if (Align == 0) 1253 Align = DL.getABITypeAlignment(SI.getValueOperand()->getType()); 1254 } else { 1255 Align = 1; 1256 } 1257 1258 Value* Shadow = DFSF.getShadow(SI.getValueOperand()); 1259 if (ClCombinePointerLabelsOnStore) { 1260 Value *PtrShadow = DFSF.getShadow(SI.getPointerOperand()); 1261 Shadow = DFSF.combineShadows(Shadow, PtrShadow, &SI); 1262 } 1263 DFSF.storeShadow(SI.getPointerOperand(), Size, Align, Shadow, &SI); 1264 } 1265 1266 void DFSanVisitor::visitBinaryOperator(BinaryOperator &BO) { 1267 visitOperandShadowInst(BO); 1268 } 1269 1270 void DFSanVisitor::visitCastInst(CastInst &CI) { visitOperandShadowInst(CI); } 1271 1272 void DFSanVisitor::visitCmpInst(CmpInst &CI) { visitOperandShadowInst(CI); } 1273 1274 void DFSanVisitor::visitGetElementPtrInst(GetElementPtrInst &GEPI) { 1275 visitOperandShadowInst(GEPI); 1276 } 1277 1278 void DFSanVisitor::visitExtractElementInst(ExtractElementInst &I) { 1279 visitOperandShadowInst(I); 1280 } 1281 1282 void DFSanVisitor::visitInsertElementInst(InsertElementInst &I) { 1283 visitOperandShadowInst(I); 1284 } 1285 1286 void DFSanVisitor::visitShuffleVectorInst(ShuffleVectorInst &I) { 1287 visitOperandShadowInst(I); 1288 } 1289 1290 void DFSanVisitor::visitExtractValueInst(ExtractValueInst &I) { 1291 visitOperandShadowInst(I); 1292 } 1293 1294 void DFSanVisitor::visitInsertValueInst(InsertValueInst &I) { 1295 visitOperandShadowInst(I); 1296 } 1297 1298 void DFSanVisitor::visitAllocaInst(AllocaInst &I) { 1299 bool AllLoadsStores = true; 1300 for (User *U : I.users()) { 1301 if (isa<LoadInst>(U)) 1302 continue; 1303 1304 if (StoreInst *SI = dyn_cast<StoreInst>(U)) { 1305 if (SI->getPointerOperand() == &I) 1306 continue; 1307 } 1308 1309 AllLoadsStores = false; 1310 break; 1311 } 1312 if (AllLoadsStores) { 1313 IRBuilder<> IRB(&I); 1314 DFSF.AllocaShadowMap[&I] = IRB.CreateAlloca(DFSF.DFS.ShadowTy); 1315 } 1316 DFSF.setShadow(&I, DFSF.DFS.ZeroShadow); 1317 } 1318 1319 void DFSanVisitor::visitSelectInst(SelectInst &I) { 1320 Value *CondShadow = DFSF.getShadow(I.getCondition()); 1321 Value *TrueShadow = DFSF.getShadow(I.getTrueValue()); 1322 Value *FalseShadow = DFSF.getShadow(I.getFalseValue()); 1323 1324 if (isa<VectorType>(I.getCondition()->getType())) { 1325 DFSF.setShadow( 1326 &I, 1327 DFSF.combineShadows( 1328 CondShadow, DFSF.combineShadows(TrueShadow, FalseShadow, &I), &I)); 1329 } else { 1330 Value *ShadowSel; 1331 if (TrueShadow == FalseShadow) { 1332 ShadowSel = TrueShadow; 1333 } else { 1334 ShadowSel = 1335 SelectInst::Create(I.getCondition(), TrueShadow, FalseShadow, "", &I); 1336 } 1337 DFSF.setShadow(&I, DFSF.combineShadows(CondShadow, ShadowSel, &I)); 1338 } 1339 } 1340 1341 void DFSanVisitor::visitMemSetInst(MemSetInst &I) { 1342 IRBuilder<> IRB(&I); 1343 Value *ValShadow = DFSF.getShadow(I.getValue()); 1344 IRB.CreateCall(DFSF.DFS.DFSanSetLabelFn, 1345 {ValShadow, IRB.CreateBitCast(I.getDest(), Type::getInt8PtrTy( 1346 *DFSF.DFS.Ctx)), 1347 IRB.CreateZExtOrTrunc(I.getLength(), DFSF.DFS.IntptrTy)}); 1348 } 1349 1350 void DFSanVisitor::visitMemTransferInst(MemTransferInst &I) { 1351 IRBuilder<> IRB(&I); 1352 Value *DestShadow = DFSF.DFS.getShadowAddress(I.getDest(), &I); 1353 Value *SrcShadow = DFSF.DFS.getShadowAddress(I.getSource(), &I); 1354 Value *LenShadow = IRB.CreateMul( 1355 I.getLength(), 1356 ConstantInt::get(I.getLength()->getType(), DFSF.DFS.ShadowWidth / 8)); 1357 Value *AlignShadow; 1358 if (ClPreserveAlignment) { 1359 AlignShadow = IRB.CreateMul(I.getAlignmentCst(), 1360 ConstantInt::get(I.getAlignmentCst()->getType(), 1361 DFSF.DFS.ShadowWidth / 8)); 1362 } else { 1363 AlignShadow = ConstantInt::get(I.getAlignmentCst()->getType(), 1364 DFSF.DFS.ShadowWidth / 8); 1365 } 1366 Type *Int8Ptr = Type::getInt8PtrTy(*DFSF.DFS.Ctx); 1367 DestShadow = IRB.CreateBitCast(DestShadow, Int8Ptr); 1368 SrcShadow = IRB.CreateBitCast(SrcShadow, Int8Ptr); 1369 IRB.CreateCall(I.getCalledValue(), {DestShadow, SrcShadow, LenShadow, 1370 AlignShadow, I.getVolatileCst()}); 1371 } 1372 1373 void DFSanVisitor::visitReturnInst(ReturnInst &RI) { 1374 if (!DFSF.IsNativeABI && RI.getReturnValue()) { 1375 switch (DFSF.IA) { 1376 case DataFlowSanitizer::IA_TLS: { 1377 Value *S = DFSF.getShadow(RI.getReturnValue()); 1378 IRBuilder<> IRB(&RI); 1379 IRB.CreateStore(S, DFSF.getRetvalTLS()); 1380 break; 1381 } 1382 case DataFlowSanitizer::IA_Args: { 1383 IRBuilder<> IRB(&RI); 1384 Type *RT = DFSF.F->getFunctionType()->getReturnType(); 1385 Value *InsVal = 1386 IRB.CreateInsertValue(UndefValue::get(RT), RI.getReturnValue(), 0); 1387 Value *InsShadow = 1388 IRB.CreateInsertValue(InsVal, DFSF.getShadow(RI.getReturnValue()), 1); 1389 RI.setOperand(0, InsShadow); 1390 break; 1391 } 1392 } 1393 } 1394 } 1395 1396 void DFSanVisitor::visitCallSite(CallSite CS) { 1397 Function *F = CS.getCalledFunction(); 1398 if ((F && F->isIntrinsic()) || isa<InlineAsm>(CS.getCalledValue())) { 1399 visitOperandShadowInst(*CS.getInstruction()); 1400 return; 1401 } 1402 1403 // Calls to this function are synthesized in wrappers, and we shouldn't 1404 // instrument them. 1405 if (F == DFSF.DFS.DFSanVarargWrapperFn) 1406 return; 1407 1408 IRBuilder<> IRB(CS.getInstruction()); 1409 1410 DenseMap<Value *, Function *>::iterator i = 1411 DFSF.DFS.UnwrappedFnMap.find(CS.getCalledValue()); 1412 if (i != DFSF.DFS.UnwrappedFnMap.end()) { 1413 Function *F = i->second; 1414 switch (DFSF.DFS.getWrapperKind(F)) { 1415 case DataFlowSanitizer::WK_Warning: { 1416 CS.setCalledFunction(F); 1417 IRB.CreateCall(DFSF.DFS.DFSanUnimplementedFn, 1418 IRB.CreateGlobalStringPtr(F->getName())); 1419 DFSF.setShadow(CS.getInstruction(), DFSF.DFS.ZeroShadow); 1420 return; 1421 } 1422 case DataFlowSanitizer::WK_Discard: { 1423 CS.setCalledFunction(F); 1424 DFSF.setShadow(CS.getInstruction(), DFSF.DFS.ZeroShadow); 1425 return; 1426 } 1427 case DataFlowSanitizer::WK_Functional: { 1428 CS.setCalledFunction(F); 1429 visitOperandShadowInst(*CS.getInstruction()); 1430 return; 1431 } 1432 case DataFlowSanitizer::WK_Custom: { 1433 // Don't try to handle invokes of custom functions, it's too complicated. 1434 // Instead, invoke the dfsw$ wrapper, which will in turn call the __dfsw_ 1435 // wrapper. 1436 if (CallInst *CI = dyn_cast<CallInst>(CS.getInstruction())) { 1437 FunctionType *FT = F->getFunctionType(); 1438 FunctionType *CustomFT = DFSF.DFS.getCustomFunctionType(FT); 1439 std::string CustomFName = "__dfsw_"; 1440 CustomFName += F->getName(); 1441 Constant *CustomF = 1442 DFSF.DFS.Mod->getOrInsertFunction(CustomFName, CustomFT); 1443 if (Function *CustomFn = dyn_cast<Function>(CustomF)) { 1444 CustomFn->copyAttributesFrom(F); 1445 1446 // Custom functions returning non-void will write to the return label. 1447 if (!FT->getReturnType()->isVoidTy()) { 1448 CustomFn->removeAttributes(AttributeSet::FunctionIndex, 1449 DFSF.DFS.ReadOnlyNoneAttrs); 1450 } 1451 } 1452 1453 std::vector<Value *> Args; 1454 1455 CallSite::arg_iterator i = CS.arg_begin(); 1456 for (unsigned n = FT->getNumParams(); n != 0; ++i, --n) { 1457 Type *T = (*i)->getType(); 1458 FunctionType *ParamFT; 1459 if (isa<PointerType>(T) && 1460 (ParamFT = dyn_cast<FunctionType>( 1461 cast<PointerType>(T)->getElementType()))) { 1462 std::string TName = "dfst"; 1463 TName += utostr(FT->getNumParams() - n); 1464 TName += "$"; 1465 TName += F->getName(); 1466 Constant *T = DFSF.DFS.getOrBuildTrampolineFunction(ParamFT, TName); 1467 Args.push_back(T); 1468 Args.push_back( 1469 IRB.CreateBitCast(*i, Type::getInt8PtrTy(*DFSF.DFS.Ctx))); 1470 } else { 1471 Args.push_back(*i); 1472 } 1473 } 1474 1475 i = CS.arg_begin(); 1476 for (unsigned n = FT->getNumParams(); n != 0; ++i, --n) 1477 Args.push_back(DFSF.getShadow(*i)); 1478 1479 if (FT->isVarArg()) { 1480 auto *LabelVATy = ArrayType::get(DFSF.DFS.ShadowTy, 1481 CS.arg_size() - FT->getNumParams()); 1482 auto *LabelVAAlloca = new AllocaInst( 1483 LabelVATy, "labelva", &DFSF.F->getEntryBlock().front()); 1484 1485 for (unsigned n = 0; i != CS.arg_end(); ++i, ++n) { 1486 auto LabelVAPtr = IRB.CreateStructGEP(LabelVATy, LabelVAAlloca, n); 1487 IRB.CreateStore(DFSF.getShadow(*i), LabelVAPtr); 1488 } 1489 1490 Args.push_back(IRB.CreateStructGEP(LabelVATy, LabelVAAlloca, 0)); 1491 } 1492 1493 if (!FT->getReturnType()->isVoidTy()) { 1494 if (!DFSF.LabelReturnAlloca) { 1495 DFSF.LabelReturnAlloca = 1496 new AllocaInst(DFSF.DFS.ShadowTy, "labelreturn", 1497 &DFSF.F->getEntryBlock().front()); 1498 } 1499 Args.push_back(DFSF.LabelReturnAlloca); 1500 } 1501 1502 for (i = CS.arg_begin() + FT->getNumParams(); i != CS.arg_end(); ++i) 1503 Args.push_back(*i); 1504 1505 CallInst *CustomCI = IRB.CreateCall(CustomF, Args); 1506 CustomCI->setCallingConv(CI->getCallingConv()); 1507 CustomCI->setAttributes(CI->getAttributes()); 1508 1509 if (!FT->getReturnType()->isVoidTy()) { 1510 LoadInst *LabelLoad = IRB.CreateLoad(DFSF.LabelReturnAlloca); 1511 DFSF.setShadow(CustomCI, LabelLoad); 1512 } 1513 1514 CI->replaceAllUsesWith(CustomCI); 1515 CI->eraseFromParent(); 1516 return; 1517 } 1518 break; 1519 } 1520 } 1521 } 1522 1523 FunctionType *FT = cast<FunctionType>( 1524 CS.getCalledValue()->getType()->getPointerElementType()); 1525 if (DFSF.DFS.getInstrumentedABI() == DataFlowSanitizer::IA_TLS) { 1526 for (unsigned i = 0, n = FT->getNumParams(); i != n; ++i) { 1527 IRB.CreateStore(DFSF.getShadow(CS.getArgument(i)), 1528 DFSF.getArgTLS(i, CS.getInstruction())); 1529 } 1530 } 1531 1532 Instruction *Next = nullptr; 1533 if (!CS.getType()->isVoidTy()) { 1534 if (InvokeInst *II = dyn_cast<InvokeInst>(CS.getInstruction())) { 1535 if (II->getNormalDest()->getSinglePredecessor()) { 1536 Next = &II->getNormalDest()->front(); 1537 } else { 1538 BasicBlock *NewBB = 1539 SplitEdge(II->getParent(), II->getNormalDest(), &DFSF.DT); 1540 Next = &NewBB->front(); 1541 } 1542 } else { 1543 assert(CS->getIterator() != CS->getParent()->end()); 1544 Next = CS->getNextNode(); 1545 } 1546 1547 if (DFSF.DFS.getInstrumentedABI() == DataFlowSanitizer::IA_TLS) { 1548 IRBuilder<> NextIRB(Next); 1549 LoadInst *LI = NextIRB.CreateLoad(DFSF.getRetvalTLS()); 1550 DFSF.SkipInsts.insert(LI); 1551 DFSF.setShadow(CS.getInstruction(), LI); 1552 DFSF.NonZeroChecks.push_back(LI); 1553 } 1554 } 1555 1556 // Do all instrumentation for IA_Args down here to defer tampering with the 1557 // CFG in a way that SplitEdge may be able to detect. 1558 if (DFSF.DFS.getInstrumentedABI() == DataFlowSanitizer::IA_Args) { 1559 FunctionType *NewFT = DFSF.DFS.getArgsFunctionType(FT); 1560 Value *Func = 1561 IRB.CreateBitCast(CS.getCalledValue(), PointerType::getUnqual(NewFT)); 1562 std::vector<Value *> Args; 1563 1564 CallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end(); 1565 for (unsigned n = FT->getNumParams(); n != 0; ++i, --n) 1566 Args.push_back(*i); 1567 1568 i = CS.arg_begin(); 1569 for (unsigned n = FT->getNumParams(); n != 0; ++i, --n) 1570 Args.push_back(DFSF.getShadow(*i)); 1571 1572 if (FT->isVarArg()) { 1573 unsigned VarArgSize = CS.arg_size() - FT->getNumParams(); 1574 ArrayType *VarArgArrayTy = ArrayType::get(DFSF.DFS.ShadowTy, VarArgSize); 1575 AllocaInst *VarArgShadow = 1576 new AllocaInst(VarArgArrayTy, "", &DFSF.F->getEntryBlock().front()); 1577 Args.push_back(IRB.CreateConstGEP2_32(VarArgArrayTy, VarArgShadow, 0, 0)); 1578 for (unsigned n = 0; i != e; ++i, ++n) { 1579 IRB.CreateStore( 1580 DFSF.getShadow(*i), 1581 IRB.CreateConstGEP2_32(VarArgArrayTy, VarArgShadow, 0, n)); 1582 Args.push_back(*i); 1583 } 1584 } 1585 1586 CallSite NewCS; 1587 if (InvokeInst *II = dyn_cast<InvokeInst>(CS.getInstruction())) { 1588 NewCS = IRB.CreateInvoke(Func, II->getNormalDest(), II->getUnwindDest(), 1589 Args); 1590 } else { 1591 NewCS = IRB.CreateCall(Func, Args); 1592 } 1593 NewCS.setCallingConv(CS.getCallingConv()); 1594 NewCS.setAttributes(CS.getAttributes().removeAttributes( 1595 *DFSF.DFS.Ctx, AttributeSet::ReturnIndex, 1596 AttributeFuncs::typeIncompatible(NewCS.getInstruction()->getType()))); 1597 1598 if (Next) { 1599 ExtractValueInst *ExVal = 1600 ExtractValueInst::Create(NewCS.getInstruction(), 0, "", Next); 1601 DFSF.SkipInsts.insert(ExVal); 1602 ExtractValueInst *ExShadow = 1603 ExtractValueInst::Create(NewCS.getInstruction(), 1, "", Next); 1604 DFSF.SkipInsts.insert(ExShadow); 1605 DFSF.setShadow(ExVal, ExShadow); 1606 DFSF.NonZeroChecks.push_back(ExShadow); 1607 1608 CS.getInstruction()->replaceAllUsesWith(ExVal); 1609 } 1610 1611 CS.getInstruction()->eraseFromParent(); 1612 } 1613 } 1614 1615 void DFSanVisitor::visitPHINode(PHINode &PN) { 1616 PHINode *ShadowPN = 1617 PHINode::Create(DFSF.DFS.ShadowTy, PN.getNumIncomingValues(), "", &PN); 1618 1619 // Give the shadow phi node valid predecessors to fool SplitEdge into working. 1620 Value *UndefShadow = UndefValue::get(DFSF.DFS.ShadowTy); 1621 for (PHINode::block_iterator i = PN.block_begin(), e = PN.block_end(); i != e; 1622 ++i) { 1623 ShadowPN->addIncoming(UndefShadow, *i); 1624 } 1625 1626 DFSF.PHIFixups.push_back(std::make_pair(&PN, ShadowPN)); 1627 DFSF.setShadow(&PN, ShadowPN); 1628 } 1629