1 //===-- ThreadSanitizer.cpp - race detector -------------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file is a part of ThreadSanitizer, a race detector. 11 // 12 // The tool is under development, for the details about previous versions see 13 // http://code.google.com/p/data-race-test 14 // 15 // The instrumentation phase is quite simple: 16 // - Insert calls to run-time library before every memory access. 17 // - Optimizations may apply to avoid instrumenting some of the accesses. 18 // - Insert calls at function entry/exit. 19 // The rest is handled by the run-time library. 20 //===----------------------------------------------------------------------===// 21 22 #define DEBUG_TYPE "tsan" 23 24 #include "BlackList.h" 25 #include "llvm/Function.h" 26 #include "llvm/IRBuilder.h" 27 #include "llvm/Intrinsics.h" 28 #include "llvm/LLVMContext.h" 29 #include "llvm/Metadata.h" 30 #include "llvm/Module.h" 31 #include "llvm/Type.h" 32 #include "llvm/ADT/SmallSet.h" 33 #include "llvm/ADT/SmallString.h" 34 #include "llvm/ADT/SmallVector.h" 35 #include "llvm/ADT/Statistic.h" 36 #include "llvm/ADT/StringExtras.h" 37 #include "llvm/Support/CommandLine.h" 38 #include "llvm/Support/Debug.h" 39 #include "llvm/Support/MathExtras.h" 40 #include "llvm/Support/raw_ostream.h" 41 #include "llvm/DataLayout.h" 42 #include "llvm/Transforms/Instrumentation.h" 43 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 44 #include "llvm/Transforms/Utils/ModuleUtils.h" 45 46 using namespace llvm; 47 48 static cl::opt<std::string> ClBlackListFile("tsan-blacklist", 49 cl::desc("Blacklist file"), cl::Hidden); 50 static cl::opt<bool> ClInstrumentMemoryAccesses( 51 "tsan-instrument-memory-accesses", cl::init(true), 52 cl::desc("Instrument memory accesses"), cl::Hidden); 53 static cl::opt<bool> ClInstrumentFuncEntryExit( 54 "tsan-instrument-func-entry-exit", cl::init(true), 55 cl::desc("Instrument function entry and exit"), cl::Hidden); 56 static cl::opt<bool> ClInstrumentAtomics( 57 "tsan-instrument-atomics", cl::init(true), 58 cl::desc("Instrument atomics"), cl::Hidden); 59 60 STATISTIC(NumInstrumentedReads, "Number of instrumented reads"); 61 STATISTIC(NumInstrumentedWrites, "Number of instrumented writes"); 62 STATISTIC(NumOmittedReadsBeforeWrite, 63 "Number of reads ignored due to following writes"); 64 STATISTIC(NumAccessesWithBadSize, "Number of accesses with bad size"); 65 STATISTIC(NumInstrumentedVtableWrites, "Number of vtable ptr writes"); 66 STATISTIC(NumOmittedReadsFromConstantGlobals, 67 "Number of reads from constant globals"); 68 STATISTIC(NumOmittedReadsFromVtable, "Number of vtable reads"); 69 70 namespace { 71 72 /// ThreadSanitizer: instrument the code in module to find races. 73 struct ThreadSanitizer : public FunctionPass { 74 ThreadSanitizer(); 75 const char *getPassName() const; 76 bool runOnFunction(Function &F); 77 bool doInitialization(Module &M); 78 static char ID; // Pass identification, replacement for typeid. 79 80 private: 81 bool instrumentLoadOrStore(Instruction *I); 82 bool instrumentAtomic(Instruction *I); 83 void chooseInstructionsToInstrument(SmallVectorImpl<Instruction*> &Local, 84 SmallVectorImpl<Instruction*> &All); 85 bool addrPointsToConstantData(Value *Addr); 86 int getMemoryAccessFuncIndex(Value *Addr); 87 88 DataLayout *TD; 89 OwningPtr<BlackList> BL; 90 IntegerType *OrdTy; 91 // Callbacks to run-time library are computed in doInitialization. 92 Function *TsanFuncEntry; 93 Function *TsanFuncExit; 94 // Accesses sizes are powers of two: 1, 2, 4, 8, 16. 95 static const size_t kNumberOfAccessSizes = 5; 96 Function *TsanRead[kNumberOfAccessSizes]; 97 Function *TsanWrite[kNumberOfAccessSizes]; 98 Function *TsanAtomicLoad[kNumberOfAccessSizes]; 99 Function *TsanAtomicStore[kNumberOfAccessSizes]; 100 Function *TsanAtomicRMW[AtomicRMWInst::LAST_BINOP + 1][kNumberOfAccessSizes]; 101 Function *TsanAtomicCAS[kNumberOfAccessSizes]; 102 Function *TsanAtomicThreadFence; 103 Function *TsanAtomicSignalFence; 104 Function *TsanVptrUpdate; 105 }; 106 } // namespace 107 108 char ThreadSanitizer::ID = 0; 109 INITIALIZE_PASS(ThreadSanitizer, "tsan", 110 "ThreadSanitizer: detects data races.", 111 false, false) 112 113 const char *ThreadSanitizer::getPassName() const { 114 return "ThreadSanitizer"; 115 } 116 117 ThreadSanitizer::ThreadSanitizer() 118 : FunctionPass(ID), 119 TD(NULL) { 120 } 121 122 FunctionPass *llvm::createThreadSanitizerPass() { 123 return new ThreadSanitizer(); 124 } 125 126 static Function *checkInterfaceFunction(Constant *FuncOrBitcast) { 127 if (Function *F = dyn_cast<Function>(FuncOrBitcast)) 128 return F; 129 FuncOrBitcast->dump(); 130 report_fatal_error("ThreadSanitizer interface function redefined"); 131 } 132 133 bool ThreadSanitizer::doInitialization(Module &M) { 134 TD = getAnalysisIfAvailable<DataLayout>(); 135 if (!TD) 136 return false; 137 BL.reset(new BlackList(ClBlackListFile)); 138 139 // Always insert a call to __tsan_init into the module's CTORs. 140 IRBuilder<> IRB(M.getContext()); 141 Value *TsanInit = M.getOrInsertFunction("__tsan_init", 142 IRB.getVoidTy(), NULL); 143 appendToGlobalCtors(M, cast<Function>(TsanInit), 0); 144 145 // Initialize the callbacks. 146 TsanFuncEntry = checkInterfaceFunction(M.getOrInsertFunction( 147 "__tsan_func_entry", IRB.getVoidTy(), IRB.getInt8PtrTy(), NULL)); 148 TsanFuncExit = checkInterfaceFunction(M.getOrInsertFunction( 149 "__tsan_func_exit", IRB.getVoidTy(), NULL)); 150 OrdTy = IRB.getInt32Ty(); 151 for (size_t i = 0; i < kNumberOfAccessSizes; ++i) { 152 const size_t ByteSize = 1 << i; 153 const size_t BitSize = ByteSize * 8; 154 SmallString<32> ReadName("__tsan_read" + itostr(ByteSize)); 155 TsanRead[i] = checkInterfaceFunction(M.getOrInsertFunction( 156 ReadName, IRB.getVoidTy(), IRB.getInt8PtrTy(), NULL)); 157 158 SmallString<32> WriteName("__tsan_write" + itostr(ByteSize)); 159 TsanWrite[i] = checkInterfaceFunction(M.getOrInsertFunction( 160 WriteName, IRB.getVoidTy(), IRB.getInt8PtrTy(), NULL)); 161 162 Type *Ty = Type::getIntNTy(M.getContext(), BitSize); 163 Type *PtrTy = Ty->getPointerTo(); 164 SmallString<32> AtomicLoadName("__tsan_atomic" + itostr(BitSize) + 165 "_load"); 166 TsanAtomicLoad[i] = checkInterfaceFunction(M.getOrInsertFunction( 167 AtomicLoadName, Ty, PtrTy, OrdTy, NULL)); 168 169 SmallString<32> AtomicStoreName("__tsan_atomic" + itostr(BitSize) + 170 "_store"); 171 TsanAtomicStore[i] = checkInterfaceFunction(M.getOrInsertFunction( 172 AtomicStoreName, IRB.getVoidTy(), PtrTy, Ty, OrdTy, 173 NULL)); 174 175 for (int op = AtomicRMWInst::FIRST_BINOP; 176 op <= AtomicRMWInst::LAST_BINOP; ++op) { 177 TsanAtomicRMW[op][i] = NULL; 178 const char *NamePart = NULL; 179 if (op == AtomicRMWInst::Xchg) 180 NamePart = "_exchange"; 181 else if (op == AtomicRMWInst::Add) 182 NamePart = "_fetch_add"; 183 else if (op == AtomicRMWInst::Sub) 184 NamePart = "_fetch_sub"; 185 else if (op == AtomicRMWInst::And) 186 NamePart = "_fetch_and"; 187 else if (op == AtomicRMWInst::Or) 188 NamePart = "_fetch_or"; 189 else if (op == AtomicRMWInst::Xor) 190 NamePart = "_fetch_xor"; 191 else if (op == AtomicRMWInst::Nand) 192 NamePart = "_fetch_nand"; 193 else 194 continue; 195 SmallString<32> RMWName("__tsan_atomic" + itostr(BitSize) + NamePart); 196 TsanAtomicRMW[op][i] = checkInterfaceFunction(M.getOrInsertFunction( 197 RMWName, Ty, PtrTy, Ty, OrdTy, NULL)); 198 } 199 200 SmallString<32> AtomicCASName("__tsan_atomic" + itostr(BitSize) + 201 "_compare_exchange_val"); 202 TsanAtomicCAS[i] = checkInterfaceFunction(M.getOrInsertFunction( 203 AtomicCASName, Ty, PtrTy, Ty, Ty, OrdTy, OrdTy, NULL)); 204 } 205 TsanVptrUpdate = checkInterfaceFunction(M.getOrInsertFunction( 206 "__tsan_vptr_update", IRB.getVoidTy(), IRB.getInt8PtrTy(), 207 IRB.getInt8PtrTy(), NULL)); 208 TsanAtomicThreadFence = checkInterfaceFunction(M.getOrInsertFunction( 209 "__tsan_atomic_thread_fence", IRB.getVoidTy(), OrdTy, NULL)); 210 TsanAtomicSignalFence = checkInterfaceFunction(M.getOrInsertFunction( 211 "__tsan_atomic_signal_fence", IRB.getVoidTy(), OrdTy, NULL)); 212 return true; 213 } 214 215 static bool isVtableAccess(Instruction *I) { 216 if (MDNode *Tag = I->getMetadata(LLVMContext::MD_tbaa)) { 217 if (Tag->getNumOperands() < 1) return false; 218 if (MDString *Tag1 = dyn_cast<MDString>(Tag->getOperand(0))) { 219 if (Tag1->getString() == "vtable pointer") return true; 220 } 221 } 222 return false; 223 } 224 225 bool ThreadSanitizer::addrPointsToConstantData(Value *Addr) { 226 // If this is a GEP, just analyze its pointer operand. 227 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Addr)) 228 Addr = GEP->getPointerOperand(); 229 230 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Addr)) { 231 if (GV->isConstant()) { 232 // Reads from constant globals can not race with any writes. 233 NumOmittedReadsFromConstantGlobals++; 234 return true; 235 } 236 } else if (LoadInst *L = dyn_cast<LoadInst>(Addr)) { 237 if (isVtableAccess(L)) { 238 // Reads from a vtable pointer can not race with any writes. 239 NumOmittedReadsFromVtable++; 240 return true; 241 } 242 } 243 return false; 244 } 245 246 // Instrumenting some of the accesses may be proven redundant. 247 // Currently handled: 248 // - read-before-write (within same BB, no calls between) 249 // 250 // We do not handle some of the patterns that should not survive 251 // after the classic compiler optimizations. 252 // E.g. two reads from the same temp should be eliminated by CSE, 253 // two writes should be eliminated by DSE, etc. 254 // 255 // 'Local' is a vector of insns within the same BB (no calls between). 256 // 'All' is a vector of insns that will be instrumented. 257 void ThreadSanitizer::chooseInstructionsToInstrument( 258 SmallVectorImpl<Instruction*> &Local, 259 SmallVectorImpl<Instruction*> &All) { 260 SmallSet<Value*, 8> WriteTargets; 261 // Iterate from the end. 262 for (SmallVectorImpl<Instruction*>::reverse_iterator It = Local.rbegin(), 263 E = Local.rend(); It != E; ++It) { 264 Instruction *I = *It; 265 if (StoreInst *Store = dyn_cast<StoreInst>(I)) { 266 WriteTargets.insert(Store->getPointerOperand()); 267 } else { 268 LoadInst *Load = cast<LoadInst>(I); 269 Value *Addr = Load->getPointerOperand(); 270 if (WriteTargets.count(Addr)) { 271 // We will write to this temp, so no reason to analyze the read. 272 NumOmittedReadsBeforeWrite++; 273 continue; 274 } 275 if (addrPointsToConstantData(Addr)) { 276 // Addr points to some constant data -- it can not race with any writes. 277 continue; 278 } 279 } 280 All.push_back(I); 281 } 282 Local.clear(); 283 } 284 285 static bool isAtomic(Instruction *I) { 286 if (LoadInst *LI = dyn_cast<LoadInst>(I)) 287 return LI->isAtomic() && LI->getSynchScope() == CrossThread; 288 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 289 return SI->isAtomic() && SI->getSynchScope() == CrossThread; 290 if (isa<AtomicRMWInst>(I)) 291 return true; 292 if (isa<AtomicCmpXchgInst>(I)) 293 return true; 294 if (isa<FenceInst>(I)) 295 return true; 296 return false; 297 } 298 299 bool ThreadSanitizer::runOnFunction(Function &F) { 300 if (!TD) return false; 301 if (BL->isIn(F)) return false; 302 SmallVector<Instruction*, 8> RetVec; 303 SmallVector<Instruction*, 8> AllLoadsAndStores; 304 SmallVector<Instruction*, 8> LocalLoadsAndStores; 305 SmallVector<Instruction*, 8> AtomicAccesses; 306 bool Res = false; 307 bool HasCalls = false; 308 309 // Traverse all instructions, collect loads/stores/returns, check for calls. 310 for (Function::iterator FI = F.begin(), FE = F.end(); 311 FI != FE; ++FI) { 312 BasicBlock &BB = *FI; 313 for (BasicBlock::iterator BI = BB.begin(), BE = BB.end(); 314 BI != BE; ++BI) { 315 if (isAtomic(BI)) 316 AtomicAccesses.push_back(BI); 317 else if (isa<LoadInst>(BI) || isa<StoreInst>(BI)) 318 LocalLoadsAndStores.push_back(BI); 319 else if (isa<ReturnInst>(BI)) 320 RetVec.push_back(BI); 321 else if (isa<CallInst>(BI) || isa<InvokeInst>(BI)) { 322 HasCalls = true; 323 chooseInstructionsToInstrument(LocalLoadsAndStores, AllLoadsAndStores); 324 } 325 } 326 chooseInstructionsToInstrument(LocalLoadsAndStores, AllLoadsAndStores); 327 } 328 329 // We have collected all loads and stores. 330 // FIXME: many of these accesses do not need to be checked for races 331 // (e.g. variables that do not escape, etc). 332 333 // Instrument memory accesses. 334 if (ClInstrumentMemoryAccesses) 335 for (size_t i = 0, n = AllLoadsAndStores.size(); i < n; ++i) { 336 Res |= instrumentLoadOrStore(AllLoadsAndStores[i]); 337 } 338 339 // Instrument atomic memory accesses. 340 if (ClInstrumentAtomics) 341 for (size_t i = 0, n = AtomicAccesses.size(); i < n; ++i) { 342 Res |= instrumentAtomic(AtomicAccesses[i]); 343 } 344 345 // Instrument function entry/exit points if there were instrumented accesses. 346 if ((Res || HasCalls) && ClInstrumentFuncEntryExit) { 347 IRBuilder<> IRB(F.getEntryBlock().getFirstNonPHI()); 348 Value *ReturnAddress = IRB.CreateCall( 349 Intrinsic::getDeclaration(F.getParent(), Intrinsic::returnaddress), 350 IRB.getInt32(0)); 351 IRB.CreateCall(TsanFuncEntry, ReturnAddress); 352 for (size_t i = 0, n = RetVec.size(); i < n; ++i) { 353 IRBuilder<> IRBRet(RetVec[i]); 354 IRBRet.CreateCall(TsanFuncExit); 355 } 356 Res = true; 357 } 358 return Res; 359 } 360 361 bool ThreadSanitizer::instrumentLoadOrStore(Instruction *I) { 362 IRBuilder<> IRB(I); 363 bool IsWrite = isa<StoreInst>(*I); 364 Value *Addr = IsWrite 365 ? cast<StoreInst>(I)->getPointerOperand() 366 : cast<LoadInst>(I)->getPointerOperand(); 367 int Idx = getMemoryAccessFuncIndex(Addr); 368 if (Idx < 0) 369 return false; 370 if (IsWrite && isVtableAccess(I)) { 371 DEBUG(dbgs() << " VPTR : " << *I << "\n"); 372 Value *StoredValue = cast<StoreInst>(I)->getValueOperand(); 373 // StoredValue does not necessary have a pointer type. 374 if (isa<IntegerType>(StoredValue->getType())) 375 StoredValue = IRB.CreateIntToPtr(StoredValue, IRB.getInt8PtrTy()); 376 // Call TsanVptrUpdate. 377 IRB.CreateCall2(TsanVptrUpdate, 378 IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()), 379 IRB.CreatePointerCast(StoredValue, IRB.getInt8PtrTy())); 380 NumInstrumentedVtableWrites++; 381 return true; 382 } 383 Value *OnAccessFunc = IsWrite ? TsanWrite[Idx] : TsanRead[Idx]; 384 IRB.CreateCall(OnAccessFunc, IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy())); 385 if (IsWrite) NumInstrumentedWrites++; 386 else NumInstrumentedReads++; 387 return true; 388 } 389 390 static ConstantInt *createOrdering(IRBuilder<> *IRB, AtomicOrdering ord) { 391 uint32_t v = 0; 392 switch (ord) { 393 case NotAtomic: assert(false); 394 case Unordered: // Fall-through. 395 case Monotonic: v = 0; break; 396 // case Consume: v = 1; break; // Not specified yet. 397 case Acquire: v = 2; break; 398 case Release: v = 3; break; 399 case AcquireRelease: v = 4; break; 400 case SequentiallyConsistent: v = 5; break; 401 } 402 return IRB->getInt32(v); 403 } 404 405 static ConstantInt *createFailOrdering(IRBuilder<> *IRB, AtomicOrdering ord) { 406 uint32_t v = 0; 407 switch (ord) { 408 case NotAtomic: assert(false); 409 case Unordered: // Fall-through. 410 case Monotonic: v = 0; break; 411 // case Consume: v = 1; break; // Not specified yet. 412 case Acquire: v = 2; break; 413 case Release: v = 0; break; 414 case AcquireRelease: v = 2; break; 415 case SequentiallyConsistent: v = 5; break; 416 } 417 return IRB->getInt32(v); 418 } 419 420 // Both llvm and ThreadSanitizer atomic operations are based on C++11/C1x 421 // standards. For background see C++11 standard. A slightly older, publically 422 // available draft of the standard (not entirely up-to-date, but close enough 423 // for casual browsing) is available here: 424 // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2011/n3242.pdf 425 // The following page contains more background information: 426 // http://www.hpl.hp.com/personal/Hans_Boehm/c++mm/ 427 428 bool ThreadSanitizer::instrumentAtomic(Instruction *I) { 429 IRBuilder<> IRB(I); 430 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 431 Value *Addr = LI->getPointerOperand(); 432 int Idx = getMemoryAccessFuncIndex(Addr); 433 if (Idx < 0) 434 return false; 435 const size_t ByteSize = 1 << Idx; 436 const size_t BitSize = ByteSize * 8; 437 Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize); 438 Type *PtrTy = Ty->getPointerTo(); 439 Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy), 440 createOrdering(&IRB, LI->getOrdering())}; 441 CallInst *C = CallInst::Create(TsanAtomicLoad[Idx], 442 ArrayRef<Value*>(Args)); 443 ReplaceInstWithInst(I, C); 444 445 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) { 446 Value *Addr = SI->getPointerOperand(); 447 int Idx = getMemoryAccessFuncIndex(Addr); 448 if (Idx < 0) 449 return false; 450 const size_t ByteSize = 1 << Idx; 451 const size_t BitSize = ByteSize * 8; 452 Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize); 453 Type *PtrTy = Ty->getPointerTo(); 454 Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy), 455 IRB.CreateIntCast(SI->getValueOperand(), Ty, false), 456 createOrdering(&IRB, SI->getOrdering())}; 457 CallInst *C = CallInst::Create(TsanAtomicStore[Idx], 458 ArrayRef<Value*>(Args)); 459 ReplaceInstWithInst(I, C); 460 } else if (AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I)) { 461 Value *Addr = RMWI->getPointerOperand(); 462 int Idx = getMemoryAccessFuncIndex(Addr); 463 if (Idx < 0) 464 return false; 465 Function *F = TsanAtomicRMW[RMWI->getOperation()][Idx]; 466 if (F == NULL) 467 return false; 468 const size_t ByteSize = 1 << Idx; 469 const size_t BitSize = ByteSize * 8; 470 Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize); 471 Type *PtrTy = Ty->getPointerTo(); 472 Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy), 473 IRB.CreateIntCast(RMWI->getValOperand(), Ty, false), 474 createOrdering(&IRB, RMWI->getOrdering())}; 475 CallInst *C = CallInst::Create(F, ArrayRef<Value*>(Args)); 476 ReplaceInstWithInst(I, C); 477 } else if (AtomicCmpXchgInst *CASI = dyn_cast<AtomicCmpXchgInst>(I)) { 478 Value *Addr = CASI->getPointerOperand(); 479 int Idx = getMemoryAccessFuncIndex(Addr); 480 if (Idx < 0) 481 return false; 482 const size_t ByteSize = 1 << Idx; 483 const size_t BitSize = ByteSize * 8; 484 Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize); 485 Type *PtrTy = Ty->getPointerTo(); 486 Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy), 487 IRB.CreateIntCast(CASI->getCompareOperand(), Ty, false), 488 IRB.CreateIntCast(CASI->getNewValOperand(), Ty, false), 489 createOrdering(&IRB, CASI->getOrdering()), 490 createFailOrdering(&IRB, CASI->getOrdering())}; 491 CallInst *C = CallInst::Create(TsanAtomicCAS[Idx], ArrayRef<Value*>(Args)); 492 ReplaceInstWithInst(I, C); 493 } else if (FenceInst *FI = dyn_cast<FenceInst>(I)) { 494 Value *Args[] = {createOrdering(&IRB, FI->getOrdering())}; 495 Function *F = FI->getSynchScope() == SingleThread ? 496 TsanAtomicSignalFence : TsanAtomicThreadFence; 497 CallInst *C = CallInst::Create(F, ArrayRef<Value*>(Args)); 498 ReplaceInstWithInst(I, C); 499 } 500 return true; 501 } 502 503 int ThreadSanitizer::getMemoryAccessFuncIndex(Value *Addr) { 504 Type *OrigPtrTy = Addr->getType(); 505 Type *OrigTy = cast<PointerType>(OrigPtrTy)->getElementType(); 506 assert(OrigTy->isSized()); 507 uint32_t TypeSize = TD->getTypeStoreSizeInBits(OrigTy); 508 if (TypeSize != 8 && TypeSize != 16 && 509 TypeSize != 32 && TypeSize != 64 && TypeSize != 128) { 510 NumAccessesWithBadSize++; 511 // Ignore all unusual sizes. 512 return -1; 513 } 514 size_t Idx = CountTrailingZeros_32(TypeSize / 8); 515 assert(Idx < kNumberOfAccessSizes); 516 return Idx; 517 } 518