1 //===-- ThreadSanitizer.cpp - race detector -------------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file is a part of ThreadSanitizer, a race detector. 11 // 12 // The tool is under development, for the details about previous versions see 13 // http://code.google.com/p/data-race-test 14 // 15 // The instrumentation phase is quite simple: 16 // - Insert calls to run-time library before every memory access. 17 // - Optimizations may apply to avoid instrumenting some of the accesses. 18 // - Insert calls at function entry/exit. 19 // The rest is handled by the run-time library. 20 //===----------------------------------------------------------------------===// 21 22 #include "llvm/Analysis/CaptureTracking.h" 23 #include "llvm/Analysis/ValueTracking.h" 24 #include "llvm/Transforms/Instrumentation.h" 25 #include "llvm/ADT/SmallSet.h" 26 #include "llvm/ADT/SmallString.h" 27 #include "llvm/ADT/SmallVector.h" 28 #include "llvm/ADT/Statistic.h" 29 #include "llvm/ADT/StringExtras.h" 30 #include "llvm/IR/DataLayout.h" 31 #include "llvm/IR/Function.h" 32 #include "llvm/IR/IRBuilder.h" 33 #include "llvm/IR/IntrinsicInst.h" 34 #include "llvm/IR/Intrinsics.h" 35 #include "llvm/IR/LLVMContext.h" 36 #include "llvm/IR/Metadata.h" 37 #include "llvm/IR/Module.h" 38 #include "llvm/IR/Type.h" 39 #include "llvm/Support/CommandLine.h" 40 #include "llvm/Support/Debug.h" 41 #include "llvm/Support/MathExtras.h" 42 #include "llvm/Support/raw_ostream.h" 43 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 44 #include "llvm/Transforms/Utils/ModuleUtils.h" 45 46 using namespace llvm; 47 48 #define DEBUG_TYPE "tsan" 49 50 static cl::opt<bool> ClInstrumentMemoryAccesses( 51 "tsan-instrument-memory-accesses", cl::init(true), 52 cl::desc("Instrument memory accesses"), cl::Hidden); 53 static cl::opt<bool> ClInstrumentFuncEntryExit( 54 "tsan-instrument-func-entry-exit", cl::init(true), 55 cl::desc("Instrument function entry and exit"), cl::Hidden); 56 static cl::opt<bool> ClInstrumentAtomics( 57 "tsan-instrument-atomics", cl::init(true), 58 cl::desc("Instrument atomics"), cl::Hidden); 59 static cl::opt<bool> ClInstrumentMemIntrinsics( 60 "tsan-instrument-memintrinsics", cl::init(true), 61 cl::desc("Instrument memintrinsics (memset/memcpy/memmove)"), cl::Hidden); 62 63 STATISTIC(NumInstrumentedReads, "Number of instrumented reads"); 64 STATISTIC(NumInstrumentedWrites, "Number of instrumented writes"); 65 STATISTIC(NumOmittedReadsBeforeWrite, 66 "Number of reads ignored due to following writes"); 67 STATISTIC(NumAccessesWithBadSize, "Number of accesses with bad size"); 68 STATISTIC(NumInstrumentedVtableWrites, "Number of vtable ptr writes"); 69 STATISTIC(NumInstrumentedVtableReads, "Number of vtable ptr reads"); 70 STATISTIC(NumOmittedReadsFromConstantGlobals, 71 "Number of reads from constant globals"); 72 STATISTIC(NumOmittedReadsFromVtable, "Number of vtable reads"); 73 STATISTIC(NumOmittedNonCaptured, "Number of accesses ignored due to capturing"); 74 75 namespace { 76 77 /// ThreadSanitizer: instrument the code in module to find races. 78 struct ThreadSanitizer : public FunctionPass { 79 ThreadSanitizer() : FunctionPass(ID), DL(nullptr) {} 80 const char *getPassName() const override; 81 bool runOnFunction(Function &F) override; 82 bool doInitialization(Module &M) override; 83 static char ID; // Pass identification, replacement for typeid. 84 85 private: 86 void initializeCallbacks(Module &M); 87 bool instrumentLoadOrStore(Instruction *I); 88 bool instrumentAtomic(Instruction *I); 89 bool instrumentMemIntrinsic(Instruction *I); 90 void chooseInstructionsToInstrument(SmallVectorImpl<Instruction*> &Local, 91 SmallVectorImpl<Instruction*> &All); 92 bool addrPointsToConstantData(Value *Addr); 93 int getMemoryAccessFuncIndex(Value *Addr); 94 95 const DataLayout *DL; 96 Type *IntptrTy; 97 IntegerType *OrdTy; 98 // Callbacks to run-time library are computed in doInitialization. 99 Function *TsanFuncEntry; 100 Function *TsanFuncExit; 101 // Accesses sizes are powers of two: 1, 2, 4, 8, 16. 102 static const size_t kNumberOfAccessSizes = 5; 103 Function *TsanRead[kNumberOfAccessSizes]; 104 Function *TsanWrite[kNumberOfAccessSizes]; 105 Function *TsanUnalignedRead[kNumberOfAccessSizes]; 106 Function *TsanUnalignedWrite[kNumberOfAccessSizes]; 107 Function *TsanAtomicLoad[kNumberOfAccessSizes]; 108 Function *TsanAtomicStore[kNumberOfAccessSizes]; 109 Function *TsanAtomicRMW[AtomicRMWInst::LAST_BINOP + 1][kNumberOfAccessSizes]; 110 Function *TsanAtomicCAS[kNumberOfAccessSizes]; 111 Function *TsanAtomicThreadFence; 112 Function *TsanAtomicSignalFence; 113 Function *TsanVptrUpdate; 114 Function *TsanVptrLoad; 115 Function *MemmoveFn, *MemcpyFn, *MemsetFn; 116 }; 117 } // namespace 118 119 char ThreadSanitizer::ID = 0; 120 INITIALIZE_PASS(ThreadSanitizer, "tsan", 121 "ThreadSanitizer: detects data races.", 122 false, false) 123 124 const char *ThreadSanitizer::getPassName() const { 125 return "ThreadSanitizer"; 126 } 127 128 FunctionPass *llvm::createThreadSanitizerPass() { 129 return new ThreadSanitizer(); 130 } 131 132 static Function *checkInterfaceFunction(Constant *FuncOrBitcast) { 133 if (Function *F = dyn_cast<Function>(FuncOrBitcast)) 134 return F; 135 FuncOrBitcast->dump(); 136 report_fatal_error("ThreadSanitizer interface function redefined"); 137 } 138 139 void ThreadSanitizer::initializeCallbacks(Module &M) { 140 IRBuilder<> IRB(M.getContext()); 141 // Initialize the callbacks. 142 TsanFuncEntry = checkInterfaceFunction(M.getOrInsertFunction( 143 "__tsan_func_entry", IRB.getVoidTy(), IRB.getInt8PtrTy(), nullptr)); 144 TsanFuncExit = checkInterfaceFunction(M.getOrInsertFunction( 145 "__tsan_func_exit", IRB.getVoidTy(), nullptr)); 146 OrdTy = IRB.getInt32Ty(); 147 for (size_t i = 0; i < kNumberOfAccessSizes; ++i) { 148 const size_t ByteSize = 1 << i; 149 const size_t BitSize = ByteSize * 8; 150 SmallString<32> ReadName("__tsan_read" + itostr(ByteSize)); 151 TsanRead[i] = checkInterfaceFunction(M.getOrInsertFunction( 152 ReadName, IRB.getVoidTy(), IRB.getInt8PtrTy(), nullptr)); 153 154 SmallString<32> WriteName("__tsan_write" + itostr(ByteSize)); 155 TsanWrite[i] = checkInterfaceFunction(M.getOrInsertFunction( 156 WriteName, IRB.getVoidTy(), IRB.getInt8PtrTy(), nullptr)); 157 158 SmallString<64> UnalignedReadName("__tsan_unaligned_read" + 159 itostr(ByteSize)); 160 TsanUnalignedRead[i] = checkInterfaceFunction(M.getOrInsertFunction( 161 UnalignedReadName, IRB.getVoidTy(), IRB.getInt8PtrTy(), nullptr)); 162 163 SmallString<64> UnalignedWriteName("__tsan_unaligned_write" + 164 itostr(ByteSize)); 165 TsanUnalignedWrite[i] = checkInterfaceFunction(M.getOrInsertFunction( 166 UnalignedWriteName, IRB.getVoidTy(), IRB.getInt8PtrTy(), nullptr)); 167 168 Type *Ty = Type::getIntNTy(M.getContext(), BitSize); 169 Type *PtrTy = Ty->getPointerTo(); 170 SmallString<32> AtomicLoadName("__tsan_atomic" + itostr(BitSize) + 171 "_load"); 172 TsanAtomicLoad[i] = checkInterfaceFunction(M.getOrInsertFunction( 173 AtomicLoadName, Ty, PtrTy, OrdTy, nullptr)); 174 175 SmallString<32> AtomicStoreName("__tsan_atomic" + itostr(BitSize) + 176 "_store"); 177 TsanAtomicStore[i] = checkInterfaceFunction(M.getOrInsertFunction( 178 AtomicStoreName, IRB.getVoidTy(), PtrTy, Ty, OrdTy, 179 nullptr)); 180 181 for (int op = AtomicRMWInst::FIRST_BINOP; 182 op <= AtomicRMWInst::LAST_BINOP; ++op) { 183 TsanAtomicRMW[op][i] = nullptr; 184 const char *NamePart = nullptr; 185 if (op == AtomicRMWInst::Xchg) 186 NamePart = "_exchange"; 187 else if (op == AtomicRMWInst::Add) 188 NamePart = "_fetch_add"; 189 else if (op == AtomicRMWInst::Sub) 190 NamePart = "_fetch_sub"; 191 else if (op == AtomicRMWInst::And) 192 NamePart = "_fetch_and"; 193 else if (op == AtomicRMWInst::Or) 194 NamePart = "_fetch_or"; 195 else if (op == AtomicRMWInst::Xor) 196 NamePart = "_fetch_xor"; 197 else if (op == AtomicRMWInst::Nand) 198 NamePart = "_fetch_nand"; 199 else 200 continue; 201 SmallString<32> RMWName("__tsan_atomic" + itostr(BitSize) + NamePart); 202 TsanAtomicRMW[op][i] = checkInterfaceFunction(M.getOrInsertFunction( 203 RMWName, Ty, PtrTy, Ty, OrdTy, nullptr)); 204 } 205 206 SmallString<32> AtomicCASName("__tsan_atomic" + itostr(BitSize) + 207 "_compare_exchange_val"); 208 TsanAtomicCAS[i] = checkInterfaceFunction(M.getOrInsertFunction( 209 AtomicCASName, Ty, PtrTy, Ty, Ty, OrdTy, OrdTy, nullptr)); 210 } 211 TsanVptrUpdate = checkInterfaceFunction(M.getOrInsertFunction( 212 "__tsan_vptr_update", IRB.getVoidTy(), IRB.getInt8PtrTy(), 213 IRB.getInt8PtrTy(), nullptr)); 214 TsanVptrLoad = checkInterfaceFunction(M.getOrInsertFunction( 215 "__tsan_vptr_read", IRB.getVoidTy(), IRB.getInt8PtrTy(), nullptr)); 216 TsanAtomicThreadFence = checkInterfaceFunction(M.getOrInsertFunction( 217 "__tsan_atomic_thread_fence", IRB.getVoidTy(), OrdTy, nullptr)); 218 TsanAtomicSignalFence = checkInterfaceFunction(M.getOrInsertFunction( 219 "__tsan_atomic_signal_fence", IRB.getVoidTy(), OrdTy, nullptr)); 220 221 MemmoveFn = checkInterfaceFunction(M.getOrInsertFunction( 222 "memmove", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), 223 IRB.getInt8PtrTy(), IntptrTy, nullptr)); 224 MemcpyFn = checkInterfaceFunction(M.getOrInsertFunction( 225 "memcpy", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), 226 IntptrTy, nullptr)); 227 MemsetFn = checkInterfaceFunction(M.getOrInsertFunction( 228 "memset", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt32Ty(), 229 IntptrTy, nullptr)); 230 } 231 232 bool ThreadSanitizer::doInitialization(Module &M) { 233 DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>(); 234 if (!DLP) 235 report_fatal_error("data layout missing"); 236 DL = &DLP->getDataLayout(); 237 238 // Always insert a call to __tsan_init into the module's CTORs. 239 IRBuilder<> IRB(M.getContext()); 240 IntptrTy = IRB.getIntPtrTy(DL); 241 Value *TsanInit = M.getOrInsertFunction("__tsan_init", 242 IRB.getVoidTy(), nullptr); 243 appendToGlobalCtors(M, cast<Function>(TsanInit), 0); 244 245 return true; 246 } 247 248 static bool isVtableAccess(Instruction *I) { 249 if (MDNode *Tag = I->getMetadata(LLVMContext::MD_tbaa)) 250 return Tag->isTBAAVtableAccess(); 251 return false; 252 } 253 254 bool ThreadSanitizer::addrPointsToConstantData(Value *Addr) { 255 // If this is a GEP, just analyze its pointer operand. 256 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Addr)) 257 Addr = GEP->getPointerOperand(); 258 259 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Addr)) { 260 if (GV->isConstant()) { 261 // Reads from constant globals can not race with any writes. 262 NumOmittedReadsFromConstantGlobals++; 263 return true; 264 } 265 } else if (LoadInst *L = dyn_cast<LoadInst>(Addr)) { 266 if (isVtableAccess(L)) { 267 // Reads from a vtable pointer can not race with any writes. 268 NumOmittedReadsFromVtable++; 269 return true; 270 } 271 } 272 return false; 273 } 274 275 // Instrumenting some of the accesses may be proven redundant. 276 // Currently handled: 277 // - read-before-write (within same BB, no calls between) 278 // - not captured variables 279 // 280 // We do not handle some of the patterns that should not survive 281 // after the classic compiler optimizations. 282 // E.g. two reads from the same temp should be eliminated by CSE, 283 // two writes should be eliminated by DSE, etc. 284 // 285 // 'Local' is a vector of insns within the same BB (no calls between). 286 // 'All' is a vector of insns that will be instrumented. 287 void ThreadSanitizer::chooseInstructionsToInstrument( 288 SmallVectorImpl<Instruction*> &Local, 289 SmallVectorImpl<Instruction*> &All) { 290 SmallSet<Value*, 8> WriteTargets; 291 // Iterate from the end. 292 for (SmallVectorImpl<Instruction*>::reverse_iterator It = Local.rbegin(), 293 E = Local.rend(); It != E; ++It) { 294 Instruction *I = *It; 295 if (StoreInst *Store = dyn_cast<StoreInst>(I)) { 296 WriteTargets.insert(Store->getPointerOperand()); 297 } else { 298 LoadInst *Load = cast<LoadInst>(I); 299 Value *Addr = Load->getPointerOperand(); 300 if (WriteTargets.count(Addr)) { 301 // We will write to this temp, so no reason to analyze the read. 302 NumOmittedReadsBeforeWrite++; 303 continue; 304 } 305 if (addrPointsToConstantData(Addr)) { 306 // Addr points to some constant data -- it can not race with any writes. 307 continue; 308 } 309 } 310 Value *Addr = isa<StoreInst>(*I) 311 ? cast<StoreInst>(I)->getPointerOperand() 312 : cast<LoadInst>(I)->getPointerOperand(); 313 if (isa<AllocaInst>(GetUnderlyingObject(Addr, nullptr)) && 314 !PointerMayBeCaptured(Addr, true, true)) { 315 // The variable is addressable but not captured, so it cannot be 316 // referenced from a different thread and participate in a data race 317 // (see llvm/Analysis/CaptureTracking.h for details). 318 NumOmittedNonCaptured++; 319 continue; 320 } 321 All.push_back(I); 322 } 323 Local.clear(); 324 } 325 326 static bool isAtomic(Instruction *I) { 327 if (LoadInst *LI = dyn_cast<LoadInst>(I)) 328 return LI->isAtomic() && LI->getSynchScope() == CrossThread; 329 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 330 return SI->isAtomic() && SI->getSynchScope() == CrossThread; 331 if (isa<AtomicRMWInst>(I)) 332 return true; 333 if (isa<AtomicCmpXchgInst>(I)) 334 return true; 335 if (isa<FenceInst>(I)) 336 return true; 337 return false; 338 } 339 340 bool ThreadSanitizer::runOnFunction(Function &F) { 341 if (!DL) return false; 342 initializeCallbacks(*F.getParent()); 343 SmallVector<Instruction*, 8> RetVec; 344 SmallVector<Instruction*, 8> AllLoadsAndStores; 345 SmallVector<Instruction*, 8> LocalLoadsAndStores; 346 SmallVector<Instruction*, 8> AtomicAccesses; 347 SmallVector<Instruction*, 8> MemIntrinCalls; 348 bool Res = false; 349 bool HasCalls = false; 350 bool SanitizeFunction = F.hasFnAttribute(Attribute::SanitizeThread); 351 352 // Traverse all instructions, collect loads/stores/returns, check for calls. 353 for (auto &BB : F) { 354 for (auto &Inst : BB) { 355 if (isAtomic(&Inst)) 356 AtomicAccesses.push_back(&Inst); 357 else if (isa<LoadInst>(Inst) || isa<StoreInst>(Inst)) 358 LocalLoadsAndStores.push_back(&Inst); 359 else if (isa<ReturnInst>(Inst)) 360 RetVec.push_back(&Inst); 361 else if (isa<CallInst>(Inst) || isa<InvokeInst>(Inst)) { 362 if (isa<MemIntrinsic>(Inst)) 363 MemIntrinCalls.push_back(&Inst); 364 HasCalls = true; 365 chooseInstructionsToInstrument(LocalLoadsAndStores, AllLoadsAndStores); 366 } 367 } 368 chooseInstructionsToInstrument(LocalLoadsAndStores, AllLoadsAndStores); 369 } 370 371 // We have collected all loads and stores. 372 // FIXME: many of these accesses do not need to be checked for races 373 // (e.g. variables that do not escape, etc). 374 375 // Instrument memory accesses only if we want to report bugs in the function. 376 if (ClInstrumentMemoryAccesses && SanitizeFunction) 377 for (auto Inst : AllLoadsAndStores) { 378 Res |= instrumentLoadOrStore(Inst); 379 } 380 381 // Instrument atomic memory accesses in any case (they can be used to 382 // implement synchronization). 383 if (ClInstrumentAtomics) 384 for (auto Inst : AtomicAccesses) { 385 Res |= instrumentAtomic(Inst); 386 } 387 388 if (ClInstrumentMemIntrinsics && SanitizeFunction) 389 for (auto Inst : MemIntrinCalls) { 390 Res |= instrumentMemIntrinsic(Inst); 391 } 392 393 // Instrument function entry/exit points if there were instrumented accesses. 394 if ((Res || HasCalls) && ClInstrumentFuncEntryExit) { 395 IRBuilder<> IRB(F.getEntryBlock().getFirstNonPHI()); 396 Value *ReturnAddress = IRB.CreateCall( 397 Intrinsic::getDeclaration(F.getParent(), Intrinsic::returnaddress), 398 IRB.getInt32(0)); 399 IRB.CreateCall(TsanFuncEntry, ReturnAddress); 400 for (auto RetInst : RetVec) { 401 IRBuilder<> IRBRet(RetInst); 402 IRBRet.CreateCall(TsanFuncExit); 403 } 404 Res = true; 405 } 406 return Res; 407 } 408 409 bool ThreadSanitizer::instrumentLoadOrStore(Instruction *I) { 410 IRBuilder<> IRB(I); 411 bool IsWrite = isa<StoreInst>(*I); 412 Value *Addr = IsWrite 413 ? cast<StoreInst>(I)->getPointerOperand() 414 : cast<LoadInst>(I)->getPointerOperand(); 415 int Idx = getMemoryAccessFuncIndex(Addr); 416 if (Idx < 0) 417 return false; 418 if (IsWrite && isVtableAccess(I)) { 419 DEBUG(dbgs() << " VPTR : " << *I << "\n"); 420 Value *StoredValue = cast<StoreInst>(I)->getValueOperand(); 421 // StoredValue may be a vector type if we are storing several vptrs at once. 422 // In this case, just take the first element of the vector since this is 423 // enough to find vptr races. 424 if (isa<VectorType>(StoredValue->getType())) 425 StoredValue = IRB.CreateExtractElement( 426 StoredValue, ConstantInt::get(IRB.getInt32Ty(), 0)); 427 if (StoredValue->getType()->isIntegerTy()) 428 StoredValue = IRB.CreateIntToPtr(StoredValue, IRB.getInt8PtrTy()); 429 // Call TsanVptrUpdate. 430 IRB.CreateCall2(TsanVptrUpdate, 431 IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()), 432 IRB.CreatePointerCast(StoredValue, IRB.getInt8PtrTy())); 433 NumInstrumentedVtableWrites++; 434 return true; 435 } 436 if (!IsWrite && isVtableAccess(I)) { 437 IRB.CreateCall(TsanVptrLoad, 438 IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy())); 439 NumInstrumentedVtableReads++; 440 return true; 441 } 442 const unsigned Alignment = IsWrite 443 ? cast<StoreInst>(I)->getAlignment() 444 : cast<LoadInst>(I)->getAlignment(); 445 Type *OrigTy = cast<PointerType>(Addr->getType())->getElementType(); 446 const uint32_t TypeSize = DL->getTypeStoreSizeInBits(OrigTy); 447 Value *OnAccessFunc = nullptr; 448 if (Alignment == 0 || Alignment >= 8 || (Alignment % (TypeSize / 8)) == 0) 449 OnAccessFunc = IsWrite ? TsanWrite[Idx] : TsanRead[Idx]; 450 else 451 OnAccessFunc = IsWrite ? TsanUnalignedWrite[Idx] : TsanUnalignedRead[Idx]; 452 IRB.CreateCall(OnAccessFunc, IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy())); 453 if (IsWrite) NumInstrumentedWrites++; 454 else NumInstrumentedReads++; 455 return true; 456 } 457 458 static ConstantInt *createOrdering(IRBuilder<> *IRB, AtomicOrdering ord) { 459 uint32_t v = 0; 460 switch (ord) { 461 case NotAtomic: llvm_unreachable("unexpected atomic ordering!"); 462 case Unordered: // Fall-through. 463 case Monotonic: v = 0; break; 464 // case Consume: v = 1; break; // Not specified yet. 465 case Acquire: v = 2; break; 466 case Release: v = 3; break; 467 case AcquireRelease: v = 4; break; 468 case SequentiallyConsistent: v = 5; break; 469 } 470 return IRB->getInt32(v); 471 } 472 473 // If a memset intrinsic gets inlined by the code gen, we will miss races on it. 474 // So, we either need to ensure the intrinsic is not inlined, or instrument it. 475 // We do not instrument memset/memmove/memcpy intrinsics (too complicated), 476 // instead we simply replace them with regular function calls, which are then 477 // intercepted by the run-time. 478 // Since tsan is running after everyone else, the calls should not be 479 // replaced back with intrinsics. If that becomes wrong at some point, 480 // we will need to call e.g. __tsan_memset to avoid the intrinsics. 481 bool ThreadSanitizer::instrumentMemIntrinsic(Instruction *I) { 482 IRBuilder<> IRB(I); 483 if (MemSetInst *M = dyn_cast<MemSetInst>(I)) { 484 IRB.CreateCall3(MemsetFn, 485 IRB.CreatePointerCast(M->getArgOperand(0), IRB.getInt8PtrTy()), 486 IRB.CreateIntCast(M->getArgOperand(1), IRB.getInt32Ty(), false), 487 IRB.CreateIntCast(M->getArgOperand(2), IntptrTy, false)); 488 I->eraseFromParent(); 489 } else if (MemTransferInst *M = dyn_cast<MemTransferInst>(I)) { 490 IRB.CreateCall3(isa<MemCpyInst>(M) ? MemcpyFn : MemmoveFn, 491 IRB.CreatePointerCast(M->getArgOperand(0), IRB.getInt8PtrTy()), 492 IRB.CreatePointerCast(M->getArgOperand(1), IRB.getInt8PtrTy()), 493 IRB.CreateIntCast(M->getArgOperand(2), IntptrTy, false)); 494 I->eraseFromParent(); 495 } 496 return false; 497 } 498 499 // Both llvm and ThreadSanitizer atomic operations are based on C++11/C1x 500 // standards. For background see C++11 standard. A slightly older, publicly 501 // available draft of the standard (not entirely up-to-date, but close enough 502 // for casual browsing) is available here: 503 // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2011/n3242.pdf 504 // The following page contains more background information: 505 // http://www.hpl.hp.com/personal/Hans_Boehm/c++mm/ 506 507 bool ThreadSanitizer::instrumentAtomic(Instruction *I) { 508 IRBuilder<> IRB(I); 509 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 510 Value *Addr = LI->getPointerOperand(); 511 int Idx = getMemoryAccessFuncIndex(Addr); 512 if (Idx < 0) 513 return false; 514 const size_t ByteSize = 1 << Idx; 515 const size_t BitSize = ByteSize * 8; 516 Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize); 517 Type *PtrTy = Ty->getPointerTo(); 518 Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy), 519 createOrdering(&IRB, LI->getOrdering())}; 520 CallInst *C = CallInst::Create(TsanAtomicLoad[Idx], Args); 521 ReplaceInstWithInst(I, C); 522 523 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) { 524 Value *Addr = SI->getPointerOperand(); 525 int Idx = getMemoryAccessFuncIndex(Addr); 526 if (Idx < 0) 527 return false; 528 const size_t ByteSize = 1 << Idx; 529 const size_t BitSize = ByteSize * 8; 530 Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize); 531 Type *PtrTy = Ty->getPointerTo(); 532 Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy), 533 IRB.CreateIntCast(SI->getValueOperand(), Ty, false), 534 createOrdering(&IRB, SI->getOrdering())}; 535 CallInst *C = CallInst::Create(TsanAtomicStore[Idx], Args); 536 ReplaceInstWithInst(I, C); 537 } else if (AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I)) { 538 Value *Addr = RMWI->getPointerOperand(); 539 int Idx = getMemoryAccessFuncIndex(Addr); 540 if (Idx < 0) 541 return false; 542 Function *F = TsanAtomicRMW[RMWI->getOperation()][Idx]; 543 if (!F) 544 return false; 545 const size_t ByteSize = 1 << Idx; 546 const size_t BitSize = ByteSize * 8; 547 Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize); 548 Type *PtrTy = Ty->getPointerTo(); 549 Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy), 550 IRB.CreateIntCast(RMWI->getValOperand(), Ty, false), 551 createOrdering(&IRB, RMWI->getOrdering())}; 552 CallInst *C = CallInst::Create(F, Args); 553 ReplaceInstWithInst(I, C); 554 } else if (AtomicCmpXchgInst *CASI = dyn_cast<AtomicCmpXchgInst>(I)) { 555 Value *Addr = CASI->getPointerOperand(); 556 int Idx = getMemoryAccessFuncIndex(Addr); 557 if (Idx < 0) 558 return false; 559 const size_t ByteSize = 1 << Idx; 560 const size_t BitSize = ByteSize * 8; 561 Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize); 562 Type *PtrTy = Ty->getPointerTo(); 563 Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy), 564 IRB.CreateIntCast(CASI->getCompareOperand(), Ty, false), 565 IRB.CreateIntCast(CASI->getNewValOperand(), Ty, false), 566 createOrdering(&IRB, CASI->getSuccessOrdering()), 567 createOrdering(&IRB, CASI->getFailureOrdering())}; 568 CallInst *C = IRB.CreateCall(TsanAtomicCAS[Idx], Args); 569 Value *Success = IRB.CreateICmpEQ(C, CASI->getCompareOperand()); 570 571 Value *Res = IRB.CreateInsertValue(UndefValue::get(CASI->getType()), C, 0); 572 Res = IRB.CreateInsertValue(Res, Success, 1); 573 574 I->replaceAllUsesWith(Res); 575 I->eraseFromParent(); 576 } else if (FenceInst *FI = dyn_cast<FenceInst>(I)) { 577 Value *Args[] = {createOrdering(&IRB, FI->getOrdering())}; 578 Function *F = FI->getSynchScope() == SingleThread ? 579 TsanAtomicSignalFence : TsanAtomicThreadFence; 580 CallInst *C = CallInst::Create(F, Args); 581 ReplaceInstWithInst(I, C); 582 } 583 return true; 584 } 585 586 int ThreadSanitizer::getMemoryAccessFuncIndex(Value *Addr) { 587 Type *OrigPtrTy = Addr->getType(); 588 Type *OrigTy = cast<PointerType>(OrigPtrTy)->getElementType(); 589 assert(OrigTy->isSized()); 590 uint32_t TypeSize = DL->getTypeStoreSizeInBits(OrigTy); 591 if (TypeSize != 8 && TypeSize != 16 && 592 TypeSize != 32 && TypeSize != 64 && TypeSize != 128) { 593 NumAccessesWithBadSize++; 594 // Ignore all unusual sizes. 595 return -1; 596 } 597 size_t Idx = countTrailingZeros(TypeSize / 8); 598 assert(Idx < kNumberOfAccessSizes); 599 return Idx; 600 } 601