1 //===-- ThreadSanitizer.cpp - race detector -------------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file is a part of ThreadSanitizer, a race detector. 11 // 12 // The tool is under development, for the details about previous versions see 13 // http://code.google.com/p/data-race-test 14 // 15 // The instrumentation phase is quite simple: 16 // - Insert calls to run-time library before every memory access. 17 // - Optimizations may apply to avoid instrumenting some of the accesses. 18 // - Insert calls at function entry/exit. 19 // The rest is handled by the run-time library. 20 //===----------------------------------------------------------------------===// 21 22 #include "llvm/Analysis/CaptureTracking.h" 23 #include "llvm/Analysis/ValueTracking.h" 24 #include "llvm/Transforms/Instrumentation.h" 25 #include "llvm/ADT/SmallSet.h" 26 #include "llvm/ADT/SmallString.h" 27 #include "llvm/ADT/SmallVector.h" 28 #include "llvm/ADT/Statistic.h" 29 #include "llvm/ADT/StringExtras.h" 30 #include "llvm/IR/DataLayout.h" 31 #include "llvm/IR/Function.h" 32 #include "llvm/IR/IRBuilder.h" 33 #include "llvm/IR/IntrinsicInst.h" 34 #include "llvm/IR/Intrinsics.h" 35 #include "llvm/IR/LLVMContext.h" 36 #include "llvm/IR/Metadata.h" 37 #include "llvm/IR/Module.h" 38 #include "llvm/IR/Type.h" 39 #include "llvm/Support/CommandLine.h" 40 #include "llvm/Support/Debug.h" 41 #include "llvm/Support/MathExtras.h" 42 #include "llvm/Support/raw_ostream.h" 43 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 44 #include "llvm/Transforms/Utils/ModuleUtils.h" 45 46 using namespace llvm; 47 48 #define DEBUG_TYPE "tsan" 49 50 static cl::opt<bool> ClInstrumentMemoryAccesses( 51 "tsan-instrument-memory-accesses", cl::init(true), 52 cl::desc("Instrument memory accesses"), cl::Hidden); 53 static cl::opt<bool> ClInstrumentFuncEntryExit( 54 "tsan-instrument-func-entry-exit", cl::init(true), 55 cl::desc("Instrument function entry and exit"), cl::Hidden); 56 static cl::opt<bool> ClInstrumentAtomics( 57 "tsan-instrument-atomics", cl::init(true), 58 cl::desc("Instrument atomics"), cl::Hidden); 59 static cl::opt<bool> ClInstrumentMemIntrinsics( 60 "tsan-instrument-memintrinsics", cl::init(true), 61 cl::desc("Instrument memintrinsics (memset/memcpy/memmove)"), cl::Hidden); 62 63 STATISTIC(NumInstrumentedReads, "Number of instrumented reads"); 64 STATISTIC(NumInstrumentedWrites, "Number of instrumented writes"); 65 STATISTIC(NumOmittedReadsBeforeWrite, 66 "Number of reads ignored due to following writes"); 67 STATISTIC(NumAccessesWithBadSize, "Number of accesses with bad size"); 68 STATISTIC(NumInstrumentedVtableWrites, "Number of vtable ptr writes"); 69 STATISTIC(NumInstrumentedVtableReads, "Number of vtable ptr reads"); 70 STATISTIC(NumOmittedReadsFromConstantGlobals, 71 "Number of reads from constant globals"); 72 STATISTIC(NumOmittedReadsFromVtable, "Number of vtable reads"); 73 STATISTIC(NumOmittedNonCaptured, "Number of accesses ignored due to capturing"); 74 75 namespace { 76 77 /// ThreadSanitizer: instrument the code in module to find races. 78 struct ThreadSanitizer : public FunctionPass { 79 ThreadSanitizer() : FunctionPass(ID), DL(nullptr) {} 80 const char *getPassName() const override; 81 bool runOnFunction(Function &F) override; 82 bool doInitialization(Module &M) override; 83 static char ID; // Pass identification, replacement for typeid. 84 85 private: 86 void initializeCallbacks(Module &M); 87 bool instrumentLoadOrStore(Instruction *I); 88 bool instrumentAtomic(Instruction *I); 89 bool instrumentMemIntrinsic(Instruction *I); 90 void chooseInstructionsToInstrument(SmallVectorImpl<Instruction*> &Local, 91 SmallVectorImpl<Instruction*> &All); 92 bool addrPointsToConstantData(Value *Addr); 93 int getMemoryAccessFuncIndex(Value *Addr); 94 95 const DataLayout *DL; 96 Type *IntptrTy; 97 IntegerType *OrdTy; 98 // Callbacks to run-time library are computed in doInitialization. 99 Function *TsanFuncEntry; 100 Function *TsanFuncExit; 101 // Accesses sizes are powers of two: 1, 2, 4, 8, 16. 102 static const size_t kNumberOfAccessSizes = 5; 103 Function *TsanRead[kNumberOfAccessSizes]; 104 Function *TsanWrite[kNumberOfAccessSizes]; 105 Function *TsanUnalignedRead[kNumberOfAccessSizes]; 106 Function *TsanUnalignedWrite[kNumberOfAccessSizes]; 107 Function *TsanAtomicLoad[kNumberOfAccessSizes]; 108 Function *TsanAtomicStore[kNumberOfAccessSizes]; 109 Function *TsanAtomicRMW[AtomicRMWInst::LAST_BINOP + 1][kNumberOfAccessSizes]; 110 Function *TsanAtomicCAS[kNumberOfAccessSizes]; 111 Function *TsanAtomicThreadFence; 112 Function *TsanAtomicSignalFence; 113 Function *TsanVptrUpdate; 114 Function *TsanVptrLoad; 115 Function *MemmoveFn, *MemcpyFn, *MemsetFn; 116 }; 117 } // namespace 118 119 char ThreadSanitizer::ID = 0; 120 INITIALIZE_PASS(ThreadSanitizer, "tsan", 121 "ThreadSanitizer: detects data races.", 122 false, false) 123 124 const char *ThreadSanitizer::getPassName() const { 125 return "ThreadSanitizer"; 126 } 127 128 FunctionPass *llvm::createThreadSanitizerPass() { 129 return new ThreadSanitizer(); 130 } 131 132 static Function *checkInterfaceFunction(Constant *FuncOrBitcast) { 133 if (Function *F = dyn_cast<Function>(FuncOrBitcast)) 134 return F; 135 FuncOrBitcast->dump(); 136 report_fatal_error("ThreadSanitizer interface function redefined"); 137 } 138 139 void ThreadSanitizer::initializeCallbacks(Module &M) { 140 IRBuilder<> IRB(M.getContext()); 141 // Initialize the callbacks. 142 TsanFuncEntry = checkInterfaceFunction(M.getOrInsertFunction( 143 "__tsan_func_entry", IRB.getVoidTy(), IRB.getInt8PtrTy(), nullptr)); 144 TsanFuncExit = checkInterfaceFunction(M.getOrInsertFunction( 145 "__tsan_func_exit", IRB.getVoidTy(), nullptr)); 146 OrdTy = IRB.getInt32Ty(); 147 for (size_t i = 0; i < kNumberOfAccessSizes; ++i) { 148 const size_t ByteSize = 1 << i; 149 const size_t BitSize = ByteSize * 8; 150 SmallString<32> ReadName("__tsan_read" + itostr(ByteSize)); 151 TsanRead[i] = checkInterfaceFunction(M.getOrInsertFunction( 152 ReadName, IRB.getVoidTy(), IRB.getInt8PtrTy(), nullptr)); 153 154 SmallString<32> WriteName("__tsan_write" + itostr(ByteSize)); 155 TsanWrite[i] = checkInterfaceFunction(M.getOrInsertFunction( 156 WriteName, IRB.getVoidTy(), IRB.getInt8PtrTy(), nullptr)); 157 158 SmallString<64> UnalignedReadName("__tsan_unaligned_read" + 159 itostr(ByteSize)); 160 TsanUnalignedRead[i] = checkInterfaceFunction(M.getOrInsertFunction( 161 UnalignedReadName, IRB.getVoidTy(), IRB.getInt8PtrTy(), nullptr)); 162 163 SmallString<64> UnalignedWriteName("__tsan_unaligned_write" + 164 itostr(ByteSize)); 165 TsanUnalignedWrite[i] = checkInterfaceFunction(M.getOrInsertFunction( 166 UnalignedWriteName, IRB.getVoidTy(), IRB.getInt8PtrTy(), nullptr)); 167 168 Type *Ty = Type::getIntNTy(M.getContext(), BitSize); 169 Type *PtrTy = Ty->getPointerTo(); 170 SmallString<32> AtomicLoadName("__tsan_atomic" + itostr(BitSize) + 171 "_load"); 172 TsanAtomicLoad[i] = checkInterfaceFunction(M.getOrInsertFunction( 173 AtomicLoadName, Ty, PtrTy, OrdTy, nullptr)); 174 175 SmallString<32> AtomicStoreName("__tsan_atomic" + itostr(BitSize) + 176 "_store"); 177 TsanAtomicStore[i] = checkInterfaceFunction(M.getOrInsertFunction( 178 AtomicStoreName, IRB.getVoidTy(), PtrTy, Ty, OrdTy, 179 nullptr)); 180 181 for (int op = AtomicRMWInst::FIRST_BINOP; 182 op <= AtomicRMWInst::LAST_BINOP; ++op) { 183 TsanAtomicRMW[op][i] = nullptr; 184 const char *NamePart = nullptr; 185 if (op == AtomicRMWInst::Xchg) 186 NamePart = "_exchange"; 187 else if (op == AtomicRMWInst::Add) 188 NamePart = "_fetch_add"; 189 else if (op == AtomicRMWInst::Sub) 190 NamePart = "_fetch_sub"; 191 else if (op == AtomicRMWInst::And) 192 NamePart = "_fetch_and"; 193 else if (op == AtomicRMWInst::Or) 194 NamePart = "_fetch_or"; 195 else if (op == AtomicRMWInst::Xor) 196 NamePart = "_fetch_xor"; 197 else if (op == AtomicRMWInst::Nand) 198 NamePart = "_fetch_nand"; 199 else 200 continue; 201 SmallString<32> RMWName("__tsan_atomic" + itostr(BitSize) + NamePart); 202 TsanAtomicRMW[op][i] = checkInterfaceFunction(M.getOrInsertFunction( 203 RMWName, Ty, PtrTy, Ty, OrdTy, nullptr)); 204 } 205 206 SmallString<32> AtomicCASName("__tsan_atomic" + itostr(BitSize) + 207 "_compare_exchange_val"); 208 TsanAtomicCAS[i] = checkInterfaceFunction(M.getOrInsertFunction( 209 AtomicCASName, Ty, PtrTy, Ty, Ty, OrdTy, OrdTy, nullptr)); 210 } 211 TsanVptrUpdate = checkInterfaceFunction(M.getOrInsertFunction( 212 "__tsan_vptr_update", IRB.getVoidTy(), IRB.getInt8PtrTy(), 213 IRB.getInt8PtrTy(), nullptr)); 214 TsanVptrLoad = checkInterfaceFunction(M.getOrInsertFunction( 215 "__tsan_vptr_read", IRB.getVoidTy(), IRB.getInt8PtrTy(), nullptr)); 216 TsanAtomicThreadFence = checkInterfaceFunction(M.getOrInsertFunction( 217 "__tsan_atomic_thread_fence", IRB.getVoidTy(), OrdTy, nullptr)); 218 TsanAtomicSignalFence = checkInterfaceFunction(M.getOrInsertFunction( 219 "__tsan_atomic_signal_fence", IRB.getVoidTy(), OrdTy, nullptr)); 220 221 MemmoveFn = checkInterfaceFunction(M.getOrInsertFunction( 222 "memmove", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), 223 IRB.getInt8PtrTy(), IntptrTy, nullptr)); 224 MemcpyFn = checkInterfaceFunction(M.getOrInsertFunction( 225 "memcpy", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), 226 IntptrTy, nullptr)); 227 MemsetFn = checkInterfaceFunction(M.getOrInsertFunction( 228 "memset", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt32Ty(), 229 IntptrTy, nullptr)); 230 } 231 232 bool ThreadSanitizer::doInitialization(Module &M) { 233 DL = &M.getDataLayout(); 234 235 // Always insert a call to __tsan_init into the module's CTORs. 236 IRBuilder<> IRB(M.getContext()); 237 IntptrTy = IRB.getIntPtrTy(DL); 238 Value *TsanInit = M.getOrInsertFunction("__tsan_init", 239 IRB.getVoidTy(), nullptr); 240 appendToGlobalCtors(M, cast<Function>(TsanInit), 0); 241 242 return true; 243 } 244 245 static bool isVtableAccess(Instruction *I) { 246 if (MDNode *Tag = I->getMetadata(LLVMContext::MD_tbaa)) 247 return Tag->isTBAAVtableAccess(); 248 return false; 249 } 250 251 bool ThreadSanitizer::addrPointsToConstantData(Value *Addr) { 252 // If this is a GEP, just analyze its pointer operand. 253 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Addr)) 254 Addr = GEP->getPointerOperand(); 255 256 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Addr)) { 257 if (GV->isConstant()) { 258 // Reads from constant globals can not race with any writes. 259 NumOmittedReadsFromConstantGlobals++; 260 return true; 261 } 262 } else if (LoadInst *L = dyn_cast<LoadInst>(Addr)) { 263 if (isVtableAccess(L)) { 264 // Reads from a vtable pointer can not race with any writes. 265 NumOmittedReadsFromVtable++; 266 return true; 267 } 268 } 269 return false; 270 } 271 272 // Instrumenting some of the accesses may be proven redundant. 273 // Currently handled: 274 // - read-before-write (within same BB, no calls between) 275 // - not captured variables 276 // 277 // We do not handle some of the patterns that should not survive 278 // after the classic compiler optimizations. 279 // E.g. two reads from the same temp should be eliminated by CSE, 280 // two writes should be eliminated by DSE, etc. 281 // 282 // 'Local' is a vector of insns within the same BB (no calls between). 283 // 'All' is a vector of insns that will be instrumented. 284 void ThreadSanitizer::chooseInstructionsToInstrument( 285 SmallVectorImpl<Instruction*> &Local, 286 SmallVectorImpl<Instruction*> &All) { 287 SmallSet<Value*, 8> WriteTargets; 288 // Iterate from the end. 289 for (SmallVectorImpl<Instruction*>::reverse_iterator It = Local.rbegin(), 290 E = Local.rend(); It != E; ++It) { 291 Instruction *I = *It; 292 if (StoreInst *Store = dyn_cast<StoreInst>(I)) { 293 WriteTargets.insert(Store->getPointerOperand()); 294 } else { 295 LoadInst *Load = cast<LoadInst>(I); 296 Value *Addr = Load->getPointerOperand(); 297 if (WriteTargets.count(Addr)) { 298 // We will write to this temp, so no reason to analyze the read. 299 NumOmittedReadsBeforeWrite++; 300 continue; 301 } 302 if (addrPointsToConstantData(Addr)) { 303 // Addr points to some constant data -- it can not race with any writes. 304 continue; 305 } 306 } 307 Value *Addr = isa<StoreInst>(*I) 308 ? cast<StoreInst>(I)->getPointerOperand() 309 : cast<LoadInst>(I)->getPointerOperand(); 310 if (isa<AllocaInst>(GetUnderlyingObject(Addr, nullptr)) && 311 !PointerMayBeCaptured(Addr, true, true)) { 312 // The variable is addressable but not captured, so it cannot be 313 // referenced from a different thread and participate in a data race 314 // (see llvm/Analysis/CaptureTracking.h for details). 315 NumOmittedNonCaptured++; 316 continue; 317 } 318 All.push_back(I); 319 } 320 Local.clear(); 321 } 322 323 static bool isAtomic(Instruction *I) { 324 if (LoadInst *LI = dyn_cast<LoadInst>(I)) 325 return LI->isAtomic() && LI->getSynchScope() == CrossThread; 326 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 327 return SI->isAtomic() && SI->getSynchScope() == CrossThread; 328 if (isa<AtomicRMWInst>(I)) 329 return true; 330 if (isa<AtomicCmpXchgInst>(I)) 331 return true; 332 if (isa<FenceInst>(I)) 333 return true; 334 return false; 335 } 336 337 bool ThreadSanitizer::runOnFunction(Function &F) { 338 if (!DL) return false; 339 initializeCallbacks(*F.getParent()); 340 SmallVector<Instruction*, 8> RetVec; 341 SmallVector<Instruction*, 8> AllLoadsAndStores; 342 SmallVector<Instruction*, 8> LocalLoadsAndStores; 343 SmallVector<Instruction*, 8> AtomicAccesses; 344 SmallVector<Instruction*, 8> MemIntrinCalls; 345 bool Res = false; 346 bool HasCalls = false; 347 bool SanitizeFunction = F.hasFnAttribute(Attribute::SanitizeThread); 348 349 // Traverse all instructions, collect loads/stores/returns, check for calls. 350 for (auto &BB : F) { 351 for (auto &Inst : BB) { 352 if (isAtomic(&Inst)) 353 AtomicAccesses.push_back(&Inst); 354 else if (isa<LoadInst>(Inst) || isa<StoreInst>(Inst)) 355 LocalLoadsAndStores.push_back(&Inst); 356 else if (isa<ReturnInst>(Inst)) 357 RetVec.push_back(&Inst); 358 else if (isa<CallInst>(Inst) || isa<InvokeInst>(Inst)) { 359 if (isa<MemIntrinsic>(Inst)) 360 MemIntrinCalls.push_back(&Inst); 361 HasCalls = true; 362 chooseInstructionsToInstrument(LocalLoadsAndStores, AllLoadsAndStores); 363 } 364 } 365 chooseInstructionsToInstrument(LocalLoadsAndStores, AllLoadsAndStores); 366 } 367 368 // We have collected all loads and stores. 369 // FIXME: many of these accesses do not need to be checked for races 370 // (e.g. variables that do not escape, etc). 371 372 // Instrument memory accesses only if we want to report bugs in the function. 373 if (ClInstrumentMemoryAccesses && SanitizeFunction) 374 for (auto Inst : AllLoadsAndStores) { 375 Res |= instrumentLoadOrStore(Inst); 376 } 377 378 // Instrument atomic memory accesses in any case (they can be used to 379 // implement synchronization). 380 if (ClInstrumentAtomics) 381 for (auto Inst : AtomicAccesses) { 382 Res |= instrumentAtomic(Inst); 383 } 384 385 if (ClInstrumentMemIntrinsics && SanitizeFunction) 386 for (auto Inst : MemIntrinCalls) { 387 Res |= instrumentMemIntrinsic(Inst); 388 } 389 390 // Instrument function entry/exit points if there were instrumented accesses. 391 if ((Res || HasCalls) && ClInstrumentFuncEntryExit) { 392 IRBuilder<> IRB(F.getEntryBlock().getFirstNonPHI()); 393 Value *ReturnAddress = IRB.CreateCall( 394 Intrinsic::getDeclaration(F.getParent(), Intrinsic::returnaddress), 395 IRB.getInt32(0)); 396 IRB.CreateCall(TsanFuncEntry, ReturnAddress); 397 for (auto RetInst : RetVec) { 398 IRBuilder<> IRBRet(RetInst); 399 IRBRet.CreateCall(TsanFuncExit); 400 } 401 Res = true; 402 } 403 return Res; 404 } 405 406 bool ThreadSanitizer::instrumentLoadOrStore(Instruction *I) { 407 IRBuilder<> IRB(I); 408 bool IsWrite = isa<StoreInst>(*I); 409 Value *Addr = IsWrite 410 ? cast<StoreInst>(I)->getPointerOperand() 411 : cast<LoadInst>(I)->getPointerOperand(); 412 int Idx = getMemoryAccessFuncIndex(Addr); 413 if (Idx < 0) 414 return false; 415 if (IsWrite && isVtableAccess(I)) { 416 DEBUG(dbgs() << " VPTR : " << *I << "\n"); 417 Value *StoredValue = cast<StoreInst>(I)->getValueOperand(); 418 // StoredValue may be a vector type if we are storing several vptrs at once. 419 // In this case, just take the first element of the vector since this is 420 // enough to find vptr races. 421 if (isa<VectorType>(StoredValue->getType())) 422 StoredValue = IRB.CreateExtractElement( 423 StoredValue, ConstantInt::get(IRB.getInt32Ty(), 0)); 424 if (StoredValue->getType()->isIntegerTy()) 425 StoredValue = IRB.CreateIntToPtr(StoredValue, IRB.getInt8PtrTy()); 426 // Call TsanVptrUpdate. 427 IRB.CreateCall2(TsanVptrUpdate, 428 IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()), 429 IRB.CreatePointerCast(StoredValue, IRB.getInt8PtrTy())); 430 NumInstrumentedVtableWrites++; 431 return true; 432 } 433 if (!IsWrite && isVtableAccess(I)) { 434 IRB.CreateCall(TsanVptrLoad, 435 IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy())); 436 NumInstrumentedVtableReads++; 437 return true; 438 } 439 const unsigned Alignment = IsWrite 440 ? cast<StoreInst>(I)->getAlignment() 441 : cast<LoadInst>(I)->getAlignment(); 442 Type *OrigTy = cast<PointerType>(Addr->getType())->getElementType(); 443 const uint32_t TypeSize = DL->getTypeStoreSizeInBits(OrigTy); 444 Value *OnAccessFunc = nullptr; 445 if (Alignment == 0 || Alignment >= 8 || (Alignment % (TypeSize / 8)) == 0) 446 OnAccessFunc = IsWrite ? TsanWrite[Idx] : TsanRead[Idx]; 447 else 448 OnAccessFunc = IsWrite ? TsanUnalignedWrite[Idx] : TsanUnalignedRead[Idx]; 449 IRB.CreateCall(OnAccessFunc, IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy())); 450 if (IsWrite) NumInstrumentedWrites++; 451 else NumInstrumentedReads++; 452 return true; 453 } 454 455 static ConstantInt *createOrdering(IRBuilder<> *IRB, AtomicOrdering ord) { 456 uint32_t v = 0; 457 switch (ord) { 458 case NotAtomic: llvm_unreachable("unexpected atomic ordering!"); 459 case Unordered: // Fall-through. 460 case Monotonic: v = 0; break; 461 // case Consume: v = 1; break; // Not specified yet. 462 case Acquire: v = 2; break; 463 case Release: v = 3; break; 464 case AcquireRelease: v = 4; break; 465 case SequentiallyConsistent: v = 5; break; 466 } 467 return IRB->getInt32(v); 468 } 469 470 // If a memset intrinsic gets inlined by the code gen, we will miss races on it. 471 // So, we either need to ensure the intrinsic is not inlined, or instrument it. 472 // We do not instrument memset/memmove/memcpy intrinsics (too complicated), 473 // instead we simply replace them with regular function calls, which are then 474 // intercepted by the run-time. 475 // Since tsan is running after everyone else, the calls should not be 476 // replaced back with intrinsics. If that becomes wrong at some point, 477 // we will need to call e.g. __tsan_memset to avoid the intrinsics. 478 bool ThreadSanitizer::instrumentMemIntrinsic(Instruction *I) { 479 IRBuilder<> IRB(I); 480 if (MemSetInst *M = dyn_cast<MemSetInst>(I)) { 481 IRB.CreateCall3(MemsetFn, 482 IRB.CreatePointerCast(M->getArgOperand(0), IRB.getInt8PtrTy()), 483 IRB.CreateIntCast(M->getArgOperand(1), IRB.getInt32Ty(), false), 484 IRB.CreateIntCast(M->getArgOperand(2), IntptrTy, false)); 485 I->eraseFromParent(); 486 } else if (MemTransferInst *M = dyn_cast<MemTransferInst>(I)) { 487 IRB.CreateCall3(isa<MemCpyInst>(M) ? MemcpyFn : MemmoveFn, 488 IRB.CreatePointerCast(M->getArgOperand(0), IRB.getInt8PtrTy()), 489 IRB.CreatePointerCast(M->getArgOperand(1), IRB.getInt8PtrTy()), 490 IRB.CreateIntCast(M->getArgOperand(2), IntptrTy, false)); 491 I->eraseFromParent(); 492 } 493 return false; 494 } 495 496 // Both llvm and ThreadSanitizer atomic operations are based on C++11/C1x 497 // standards. For background see C++11 standard. A slightly older, publicly 498 // available draft of the standard (not entirely up-to-date, but close enough 499 // for casual browsing) is available here: 500 // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2011/n3242.pdf 501 // The following page contains more background information: 502 // http://www.hpl.hp.com/personal/Hans_Boehm/c++mm/ 503 504 bool ThreadSanitizer::instrumentAtomic(Instruction *I) { 505 IRBuilder<> IRB(I); 506 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 507 Value *Addr = LI->getPointerOperand(); 508 int Idx = getMemoryAccessFuncIndex(Addr); 509 if (Idx < 0) 510 return false; 511 const size_t ByteSize = 1 << Idx; 512 const size_t BitSize = ByteSize * 8; 513 Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize); 514 Type *PtrTy = Ty->getPointerTo(); 515 Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy), 516 createOrdering(&IRB, LI->getOrdering())}; 517 CallInst *C = CallInst::Create(TsanAtomicLoad[Idx], Args); 518 ReplaceInstWithInst(I, C); 519 520 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) { 521 Value *Addr = SI->getPointerOperand(); 522 int Idx = getMemoryAccessFuncIndex(Addr); 523 if (Idx < 0) 524 return false; 525 const size_t ByteSize = 1 << Idx; 526 const size_t BitSize = ByteSize * 8; 527 Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize); 528 Type *PtrTy = Ty->getPointerTo(); 529 Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy), 530 IRB.CreateIntCast(SI->getValueOperand(), Ty, false), 531 createOrdering(&IRB, SI->getOrdering())}; 532 CallInst *C = CallInst::Create(TsanAtomicStore[Idx], Args); 533 ReplaceInstWithInst(I, C); 534 } else if (AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I)) { 535 Value *Addr = RMWI->getPointerOperand(); 536 int Idx = getMemoryAccessFuncIndex(Addr); 537 if (Idx < 0) 538 return false; 539 Function *F = TsanAtomicRMW[RMWI->getOperation()][Idx]; 540 if (!F) 541 return false; 542 const size_t ByteSize = 1 << Idx; 543 const size_t BitSize = ByteSize * 8; 544 Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize); 545 Type *PtrTy = Ty->getPointerTo(); 546 Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy), 547 IRB.CreateIntCast(RMWI->getValOperand(), Ty, false), 548 createOrdering(&IRB, RMWI->getOrdering())}; 549 CallInst *C = CallInst::Create(F, Args); 550 ReplaceInstWithInst(I, C); 551 } else if (AtomicCmpXchgInst *CASI = dyn_cast<AtomicCmpXchgInst>(I)) { 552 Value *Addr = CASI->getPointerOperand(); 553 int Idx = getMemoryAccessFuncIndex(Addr); 554 if (Idx < 0) 555 return false; 556 const size_t ByteSize = 1 << Idx; 557 const size_t BitSize = ByteSize * 8; 558 Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize); 559 Type *PtrTy = Ty->getPointerTo(); 560 Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy), 561 IRB.CreateIntCast(CASI->getCompareOperand(), Ty, false), 562 IRB.CreateIntCast(CASI->getNewValOperand(), Ty, false), 563 createOrdering(&IRB, CASI->getSuccessOrdering()), 564 createOrdering(&IRB, CASI->getFailureOrdering())}; 565 CallInst *C = IRB.CreateCall(TsanAtomicCAS[Idx], Args); 566 Value *Success = IRB.CreateICmpEQ(C, CASI->getCompareOperand()); 567 568 Value *Res = IRB.CreateInsertValue(UndefValue::get(CASI->getType()), C, 0); 569 Res = IRB.CreateInsertValue(Res, Success, 1); 570 571 I->replaceAllUsesWith(Res); 572 I->eraseFromParent(); 573 } else if (FenceInst *FI = dyn_cast<FenceInst>(I)) { 574 Value *Args[] = {createOrdering(&IRB, FI->getOrdering())}; 575 Function *F = FI->getSynchScope() == SingleThread ? 576 TsanAtomicSignalFence : TsanAtomicThreadFence; 577 CallInst *C = CallInst::Create(F, Args); 578 ReplaceInstWithInst(I, C); 579 } 580 return true; 581 } 582 583 int ThreadSanitizer::getMemoryAccessFuncIndex(Value *Addr) { 584 Type *OrigPtrTy = Addr->getType(); 585 Type *OrigTy = cast<PointerType>(OrigPtrTy)->getElementType(); 586 assert(OrigTy->isSized()); 587 uint32_t TypeSize = DL->getTypeStoreSizeInBits(OrigTy); 588 if (TypeSize != 8 && TypeSize != 16 && 589 TypeSize != 32 && TypeSize != 64 && TypeSize != 128) { 590 NumAccessesWithBadSize++; 591 // Ignore all unusual sizes. 592 return -1; 593 } 594 size_t Idx = countTrailingZeros(TypeSize / 8); 595 assert(Idx < kNumberOfAccessSizes); 596 return Idx; 597 } 598