1 //===-- ThreadSanitizer.cpp - race detector -------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of ThreadSanitizer, a race detector.
10 //
11 // The tool is under development, for the details about previous versions see
12 // http://code.google.com/p/data-race-test
13 //
14 // The instrumentation phase is quite simple:
15 //   - Insert calls to run-time library before every memory access.
16 //      - Optimizations may apply to avoid instrumenting some of the accesses.
17 //   - Insert calls at function entry/exit.
18 // The rest is handled by the run-time library.
19 //===----------------------------------------------------------------------===//
20 
21 #include "llvm/Transforms/Instrumentation/ThreadSanitizer.h"
22 #include "llvm/ADT/SmallPtrSet.h"
23 #include "llvm/ADT/SmallString.h"
24 #include "llvm/ADT/SmallVector.h"
25 #include "llvm/ADT/Statistic.h"
26 #include "llvm/ADT/StringExtras.h"
27 #include "llvm/Analysis/CaptureTracking.h"
28 #include "llvm/Analysis/TargetLibraryInfo.h"
29 #include "llvm/Analysis/ValueTracking.h"
30 #include "llvm/IR/DataLayout.h"
31 #include "llvm/IR/Function.h"
32 #include "llvm/IR/IRBuilder.h"
33 #include "llvm/IR/IntrinsicInst.h"
34 #include "llvm/IR/Intrinsics.h"
35 #include "llvm/IR/LLVMContext.h"
36 #include "llvm/IR/Metadata.h"
37 #include "llvm/IR/Module.h"
38 #include "llvm/IR/Type.h"
39 #include "llvm/InitializePasses.h"
40 #include "llvm/ProfileData/InstrProf.h"
41 #include "llvm/Support/CommandLine.h"
42 #include "llvm/Support/Debug.h"
43 #include "llvm/Support/MathExtras.h"
44 #include "llvm/Support/raw_ostream.h"
45 #include "llvm/Transforms/Instrumentation.h"
46 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
47 #include "llvm/Transforms/Utils/EscapeEnumerator.h"
48 #include "llvm/Transforms/Utils/Local.h"
49 #include "llvm/Transforms/Utils/ModuleUtils.h"
50 
51 using namespace llvm;
52 
53 #define DEBUG_TYPE "tsan"
54 
55 static cl::opt<bool>  ClInstrumentMemoryAccesses(
56     "tsan-instrument-memory-accesses", cl::init(true),
57     cl::desc("Instrument memory accesses"), cl::Hidden);
58 static cl::opt<bool>  ClInstrumentFuncEntryExit(
59     "tsan-instrument-func-entry-exit", cl::init(true),
60     cl::desc("Instrument function entry and exit"), cl::Hidden);
61 static cl::opt<bool>  ClHandleCxxExceptions(
62     "tsan-handle-cxx-exceptions", cl::init(true),
63     cl::desc("Handle C++ exceptions (insert cleanup blocks for unwinding)"),
64     cl::Hidden);
65 static cl::opt<bool>  ClInstrumentAtomics(
66     "tsan-instrument-atomics", cl::init(true),
67     cl::desc("Instrument atomics"), cl::Hidden);
68 static cl::opt<bool>  ClInstrumentMemIntrinsics(
69     "tsan-instrument-memintrinsics", cl::init(true),
70     cl::desc("Instrument memintrinsics (memset/memcpy/memmove)"), cl::Hidden);
71 static cl::opt<bool>  ClDistinguishVolatile(
72     "tsan-distinguish-volatile", cl::init(false),
73     cl::desc("Emit special instrumentation for accesses to volatiles"),
74     cl::Hidden);
75 
76 STATISTIC(NumInstrumentedReads, "Number of instrumented reads");
77 STATISTIC(NumInstrumentedWrites, "Number of instrumented writes");
78 STATISTIC(NumOmittedReadsBeforeWrite,
79           "Number of reads ignored due to following writes");
80 STATISTIC(NumAccessesWithBadSize, "Number of accesses with bad size");
81 STATISTIC(NumInstrumentedVtableWrites, "Number of vtable ptr writes");
82 STATISTIC(NumInstrumentedVtableReads, "Number of vtable ptr reads");
83 STATISTIC(NumOmittedReadsFromConstantGlobals,
84           "Number of reads from constant globals");
85 STATISTIC(NumOmittedReadsFromVtable, "Number of vtable reads");
86 STATISTIC(NumOmittedNonCaptured, "Number of accesses ignored due to capturing");
87 
88 static const char *const kTsanModuleCtorName = "tsan.module_ctor";
89 static const char *const kTsanInitName = "__tsan_init";
90 
91 namespace {
92 
93 /// ThreadSanitizer: instrument the code in module to find races.
94 ///
95 /// Instantiating ThreadSanitizer inserts the tsan runtime library API function
96 /// declarations into the module if they don't exist already. Instantiating
97 /// ensures the __tsan_init function is in the list of global constructors for
98 /// the module.
99 struct ThreadSanitizer {
100   bool sanitizeFunction(Function &F, const TargetLibraryInfo &TLI);
101 
102 private:
103   void initialize(Module &M);
104   bool instrumentLoadOrStore(Instruction *I, const DataLayout &DL);
105   bool instrumentAtomic(Instruction *I, const DataLayout &DL);
106   bool instrumentMemIntrinsic(Instruction *I);
107   void chooseInstructionsToInstrument(SmallVectorImpl<Instruction *> &Local,
108                                       SmallVectorImpl<Instruction *> &All,
109                                       const DataLayout &DL);
110   bool addrPointsToConstantData(Value *Addr);
111   int getMemoryAccessFuncIndex(Value *Addr, const DataLayout &DL);
112   void InsertRuntimeIgnores(Function &F);
113 
114   Type *IntptrTy;
115   FunctionCallee TsanFuncEntry;
116   FunctionCallee TsanFuncExit;
117   FunctionCallee TsanIgnoreBegin;
118   FunctionCallee TsanIgnoreEnd;
119   // Accesses sizes are powers of two: 1, 2, 4, 8, 16.
120   static const size_t kNumberOfAccessSizes = 5;
121   FunctionCallee TsanRead[kNumberOfAccessSizes];
122   FunctionCallee TsanWrite[kNumberOfAccessSizes];
123   FunctionCallee TsanUnalignedRead[kNumberOfAccessSizes];
124   FunctionCallee TsanUnalignedWrite[kNumberOfAccessSizes];
125   FunctionCallee TsanVolatileRead[kNumberOfAccessSizes];
126   FunctionCallee TsanVolatileWrite[kNumberOfAccessSizes];
127   FunctionCallee TsanUnalignedVolatileRead[kNumberOfAccessSizes];
128   FunctionCallee TsanUnalignedVolatileWrite[kNumberOfAccessSizes];
129   FunctionCallee TsanAtomicLoad[kNumberOfAccessSizes];
130   FunctionCallee TsanAtomicStore[kNumberOfAccessSizes];
131   FunctionCallee TsanAtomicRMW[AtomicRMWInst::LAST_BINOP + 1]
132                               [kNumberOfAccessSizes];
133   FunctionCallee TsanAtomicCAS[kNumberOfAccessSizes];
134   FunctionCallee TsanAtomicThreadFence;
135   FunctionCallee TsanAtomicSignalFence;
136   FunctionCallee TsanVptrUpdate;
137   FunctionCallee TsanVptrLoad;
138   FunctionCallee MemmoveFn, MemcpyFn, MemsetFn;
139 };
140 
141 struct ThreadSanitizerLegacyPass : FunctionPass {
142   ThreadSanitizerLegacyPass() : FunctionPass(ID) {}
143   StringRef getPassName() const override;
144   void getAnalysisUsage(AnalysisUsage &AU) const override;
145   bool runOnFunction(Function &F) override;
146   bool doInitialization(Module &M) override;
147   static char ID; // Pass identification, replacement for typeid.
148 private:
149   Optional<ThreadSanitizer> TSan;
150 };
151 
152 void insertModuleCtor(Module &M) {
153   getOrCreateSanitizerCtorAndInitFunctions(
154       M, kTsanModuleCtorName, kTsanInitName, /*InitArgTypes=*/{},
155       /*InitArgs=*/{},
156       // This callback is invoked when the functions are created the first
157       // time. Hook them into the global ctors list in that case:
158       [&](Function *Ctor, FunctionCallee) { appendToGlobalCtors(M, Ctor, 0); });
159 }
160 
161 }  // namespace
162 
163 PreservedAnalyses ThreadSanitizerPass::run(Function &F,
164                                            FunctionAnalysisManager &FAM) {
165   ThreadSanitizer TSan;
166   if (TSan.sanitizeFunction(F, FAM.getResult<TargetLibraryAnalysis>(F)))
167     return PreservedAnalyses::none();
168   return PreservedAnalyses::all();
169 }
170 
171 PreservedAnalyses ThreadSanitizerPass::run(Module &M,
172                                            ModuleAnalysisManager &MAM) {
173   insertModuleCtor(M);
174   return PreservedAnalyses::none();
175 }
176 
177 char ThreadSanitizerLegacyPass::ID = 0;
178 INITIALIZE_PASS_BEGIN(ThreadSanitizerLegacyPass, "tsan",
179                       "ThreadSanitizer: detects data races.", false, false)
180 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
181 INITIALIZE_PASS_END(ThreadSanitizerLegacyPass, "tsan",
182                     "ThreadSanitizer: detects data races.", false, false)
183 
184 StringRef ThreadSanitizerLegacyPass::getPassName() const {
185   return "ThreadSanitizerLegacyPass";
186 }
187 
188 void ThreadSanitizerLegacyPass::getAnalysisUsage(AnalysisUsage &AU) const {
189   AU.addRequired<TargetLibraryInfoWrapperPass>();
190 }
191 
192 bool ThreadSanitizerLegacyPass::doInitialization(Module &M) {
193   insertModuleCtor(M);
194   TSan.emplace();
195   return true;
196 }
197 
198 bool ThreadSanitizerLegacyPass::runOnFunction(Function &F) {
199   auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
200   TSan->sanitizeFunction(F, TLI);
201   return true;
202 }
203 
204 FunctionPass *llvm::createThreadSanitizerLegacyPassPass() {
205   return new ThreadSanitizerLegacyPass();
206 }
207 
208 void ThreadSanitizer::initialize(Module &M) {
209   const DataLayout &DL = M.getDataLayout();
210   IntptrTy = DL.getIntPtrType(M.getContext());
211 
212   IRBuilder<> IRB(M.getContext());
213   AttributeList Attr;
214   Attr = Attr.addAttribute(M.getContext(), AttributeList::FunctionIndex,
215                            Attribute::NoUnwind);
216   // Initialize the callbacks.
217   TsanFuncEntry = M.getOrInsertFunction("__tsan_func_entry", Attr,
218                                         IRB.getVoidTy(), IRB.getInt8PtrTy());
219   TsanFuncExit =
220       M.getOrInsertFunction("__tsan_func_exit", Attr, IRB.getVoidTy());
221   TsanIgnoreBegin = M.getOrInsertFunction("__tsan_ignore_thread_begin", Attr,
222                                           IRB.getVoidTy());
223   TsanIgnoreEnd =
224       M.getOrInsertFunction("__tsan_ignore_thread_end", Attr, IRB.getVoidTy());
225   IntegerType *OrdTy = IRB.getInt32Ty();
226   for (size_t i = 0; i < kNumberOfAccessSizes; ++i) {
227     const unsigned ByteSize = 1U << i;
228     const unsigned BitSize = ByteSize * 8;
229     std::string ByteSizeStr = utostr(ByteSize);
230     std::string BitSizeStr = utostr(BitSize);
231     SmallString<32> ReadName("__tsan_read" + ByteSizeStr);
232     TsanRead[i] = M.getOrInsertFunction(ReadName, Attr, IRB.getVoidTy(),
233                                         IRB.getInt8PtrTy());
234 
235     SmallString<32> WriteName("__tsan_write" + ByteSizeStr);
236     TsanWrite[i] = M.getOrInsertFunction(WriteName, Attr, IRB.getVoidTy(),
237                                          IRB.getInt8PtrTy());
238 
239     SmallString<64> UnalignedReadName("__tsan_unaligned_read" + ByteSizeStr);
240     TsanUnalignedRead[i] = M.getOrInsertFunction(
241         UnalignedReadName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy());
242 
243     SmallString<64> UnalignedWriteName("__tsan_unaligned_write" + ByteSizeStr);
244     TsanUnalignedWrite[i] = M.getOrInsertFunction(
245         UnalignedWriteName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy());
246 
247     SmallString<64> VolatileReadName("__tsan_volatile_read" + ByteSizeStr);
248     TsanVolatileRead[i] = M.getOrInsertFunction(
249         VolatileReadName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy());
250 
251     SmallString<64> VolatileWriteName("__tsan_volatile_write" + ByteSizeStr);
252     TsanVolatileWrite[i] = M.getOrInsertFunction(
253         VolatileWriteName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy());
254 
255     SmallString<64> UnalignedVolatileReadName("__tsan_unaligned_volatile_read" +
256                                               ByteSizeStr);
257     TsanUnalignedVolatileRead[i] = M.getOrInsertFunction(
258         UnalignedVolatileReadName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy());
259 
260     SmallString<64> UnalignedVolatileWriteName(
261         "__tsan_unaligned_volatile_write" + ByteSizeStr);
262     TsanUnalignedVolatileWrite[i] = M.getOrInsertFunction(
263         UnalignedVolatileWriteName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy());
264 
265     Type *Ty = Type::getIntNTy(M.getContext(), BitSize);
266     Type *PtrTy = Ty->getPointerTo();
267     SmallString<32> AtomicLoadName("__tsan_atomic" + BitSizeStr + "_load");
268     TsanAtomicLoad[i] =
269         M.getOrInsertFunction(AtomicLoadName, Attr, Ty, PtrTy, OrdTy);
270 
271     SmallString<32> AtomicStoreName("__tsan_atomic" + BitSizeStr + "_store");
272     TsanAtomicStore[i] = M.getOrInsertFunction(
273         AtomicStoreName, Attr, IRB.getVoidTy(), PtrTy, Ty, OrdTy);
274 
275     for (int op = AtomicRMWInst::FIRST_BINOP;
276         op <= AtomicRMWInst::LAST_BINOP; ++op) {
277       TsanAtomicRMW[op][i] = nullptr;
278       const char *NamePart = nullptr;
279       if (op == AtomicRMWInst::Xchg)
280         NamePart = "_exchange";
281       else if (op == AtomicRMWInst::Add)
282         NamePart = "_fetch_add";
283       else if (op == AtomicRMWInst::Sub)
284         NamePart = "_fetch_sub";
285       else if (op == AtomicRMWInst::And)
286         NamePart = "_fetch_and";
287       else if (op == AtomicRMWInst::Or)
288         NamePart = "_fetch_or";
289       else if (op == AtomicRMWInst::Xor)
290         NamePart = "_fetch_xor";
291       else if (op == AtomicRMWInst::Nand)
292         NamePart = "_fetch_nand";
293       else
294         continue;
295       SmallString<32> RMWName("__tsan_atomic" + itostr(BitSize) + NamePart);
296       TsanAtomicRMW[op][i] =
297           M.getOrInsertFunction(RMWName, Attr, Ty, PtrTy, Ty, OrdTy);
298     }
299 
300     SmallString<32> AtomicCASName("__tsan_atomic" + BitSizeStr +
301                                   "_compare_exchange_val");
302     TsanAtomicCAS[i] = M.getOrInsertFunction(AtomicCASName, Attr, Ty, PtrTy, Ty,
303                                              Ty, OrdTy, OrdTy);
304   }
305   TsanVptrUpdate =
306       M.getOrInsertFunction("__tsan_vptr_update", Attr, IRB.getVoidTy(),
307                             IRB.getInt8PtrTy(), IRB.getInt8PtrTy());
308   TsanVptrLoad = M.getOrInsertFunction("__tsan_vptr_read", Attr,
309                                        IRB.getVoidTy(), IRB.getInt8PtrTy());
310   TsanAtomicThreadFence = M.getOrInsertFunction("__tsan_atomic_thread_fence",
311                                                 Attr, IRB.getVoidTy(), OrdTy);
312   TsanAtomicSignalFence = M.getOrInsertFunction("__tsan_atomic_signal_fence",
313                                                 Attr, IRB.getVoidTy(), OrdTy);
314 
315   MemmoveFn =
316       M.getOrInsertFunction("memmove", Attr, IRB.getInt8PtrTy(),
317                             IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IntptrTy);
318   MemcpyFn =
319       M.getOrInsertFunction("memcpy", Attr, IRB.getInt8PtrTy(),
320                             IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IntptrTy);
321   MemsetFn =
322       M.getOrInsertFunction("memset", Attr, IRB.getInt8PtrTy(),
323                             IRB.getInt8PtrTy(), IRB.getInt32Ty(), IntptrTy);
324 }
325 
326 static bool isVtableAccess(Instruction *I) {
327   if (MDNode *Tag = I->getMetadata(LLVMContext::MD_tbaa))
328     return Tag->isTBAAVtableAccess();
329   return false;
330 }
331 
332 // Do not instrument known races/"benign races" that come from compiler
333 // instrumentatin. The user has no way of suppressing them.
334 static bool shouldInstrumentReadWriteFromAddress(const Module *M, Value *Addr) {
335   // Peel off GEPs and BitCasts.
336   Addr = Addr->stripInBoundsOffsets();
337 
338   if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Addr)) {
339     if (GV->hasSection()) {
340       StringRef SectionName = GV->getSection();
341       // Check if the global is in the PGO counters section.
342       auto OF = Triple(M->getTargetTriple()).getObjectFormat();
343       if (SectionName.endswith(
344               getInstrProfSectionName(IPSK_cnts, OF, /*AddSegmentInfo=*/false)))
345         return false;
346     }
347 
348     // Check if the global is private gcov data.
349     if (GV->getName().startswith("__llvm_gcov") ||
350         GV->getName().startswith("__llvm_gcda"))
351       return false;
352   }
353 
354   // Do not instrument acesses from different address spaces; we cannot deal
355   // with them.
356   if (Addr) {
357     Type *PtrTy = cast<PointerType>(Addr->getType()->getScalarType());
358     if (PtrTy->getPointerAddressSpace() != 0)
359       return false;
360   }
361 
362   return true;
363 }
364 
365 bool ThreadSanitizer::addrPointsToConstantData(Value *Addr) {
366   // If this is a GEP, just analyze its pointer operand.
367   if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Addr))
368     Addr = GEP->getPointerOperand();
369 
370   if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Addr)) {
371     if (GV->isConstant()) {
372       // Reads from constant globals can not race with any writes.
373       NumOmittedReadsFromConstantGlobals++;
374       return true;
375     }
376   } else if (LoadInst *L = dyn_cast<LoadInst>(Addr)) {
377     if (isVtableAccess(L)) {
378       // Reads from a vtable pointer can not race with any writes.
379       NumOmittedReadsFromVtable++;
380       return true;
381     }
382   }
383   return false;
384 }
385 
386 // Instrumenting some of the accesses may be proven redundant.
387 // Currently handled:
388 //  - read-before-write (within same BB, no calls between)
389 //  - not captured variables
390 //
391 // We do not handle some of the patterns that should not survive
392 // after the classic compiler optimizations.
393 // E.g. two reads from the same temp should be eliminated by CSE,
394 // two writes should be eliminated by DSE, etc.
395 //
396 // 'Local' is a vector of insns within the same BB (no calls between).
397 // 'All' is a vector of insns that will be instrumented.
398 void ThreadSanitizer::chooseInstructionsToInstrument(
399     SmallVectorImpl<Instruction *> &Local, SmallVectorImpl<Instruction *> &All,
400     const DataLayout &DL) {
401   SmallPtrSet<Value*, 8> WriteTargets;
402   // Iterate from the end.
403   for (Instruction *I : reverse(Local)) {
404     if (StoreInst *Store = dyn_cast<StoreInst>(I)) {
405       Value *Addr = Store->getPointerOperand();
406       if (!shouldInstrumentReadWriteFromAddress(I->getModule(), Addr))
407         continue;
408       WriteTargets.insert(Addr);
409     } else {
410       LoadInst *Load = cast<LoadInst>(I);
411       Value *Addr = Load->getPointerOperand();
412       if (!shouldInstrumentReadWriteFromAddress(I->getModule(), Addr))
413         continue;
414       if (WriteTargets.count(Addr)) {
415         // We will write to this temp, so no reason to analyze the read.
416         NumOmittedReadsBeforeWrite++;
417         continue;
418       }
419       if (addrPointsToConstantData(Addr)) {
420         // Addr points to some constant data -- it can not race with any writes.
421         continue;
422       }
423     }
424     Value *Addr = isa<StoreInst>(*I)
425         ? cast<StoreInst>(I)->getPointerOperand()
426         : cast<LoadInst>(I)->getPointerOperand();
427     if (isa<AllocaInst>(GetUnderlyingObject(Addr, DL)) &&
428         !PointerMayBeCaptured(Addr, true, true)) {
429       // The variable is addressable but not captured, so it cannot be
430       // referenced from a different thread and participate in a data race
431       // (see llvm/Analysis/CaptureTracking.h for details).
432       NumOmittedNonCaptured++;
433       continue;
434     }
435     All.push_back(I);
436   }
437   Local.clear();
438 }
439 
440 static bool isAtomic(Instruction *I) {
441   // TODO: Ask TTI whether synchronization scope is between threads.
442   if (LoadInst *LI = dyn_cast<LoadInst>(I))
443     return LI->isAtomic() && LI->getSyncScopeID() != SyncScope::SingleThread;
444   if (StoreInst *SI = dyn_cast<StoreInst>(I))
445     return SI->isAtomic() && SI->getSyncScopeID() != SyncScope::SingleThread;
446   if (isa<AtomicRMWInst>(I))
447     return true;
448   if (isa<AtomicCmpXchgInst>(I))
449     return true;
450   if (isa<FenceInst>(I))
451     return true;
452   return false;
453 }
454 
455 void ThreadSanitizer::InsertRuntimeIgnores(Function &F) {
456   IRBuilder<> IRB(F.getEntryBlock().getFirstNonPHI());
457   IRB.CreateCall(TsanIgnoreBegin);
458   EscapeEnumerator EE(F, "tsan_ignore_cleanup", ClHandleCxxExceptions);
459   while (IRBuilder<> *AtExit = EE.Next()) {
460     AtExit->CreateCall(TsanIgnoreEnd);
461   }
462 }
463 
464 bool ThreadSanitizer::sanitizeFunction(Function &F,
465                                        const TargetLibraryInfo &TLI) {
466   // This is required to prevent instrumenting call to __tsan_init from within
467   // the module constructor.
468   if (F.getName() == kTsanModuleCtorName)
469     return false;
470   // Naked functions can not have prologue/epilogue
471   // (__tsan_func_entry/__tsan_func_exit) generated, so don't instrument them at
472   // all.
473   if (F.hasFnAttribute(Attribute::Naked))
474     return false;
475   initialize(*F.getParent());
476   SmallVector<Instruction*, 8> AllLoadsAndStores;
477   SmallVector<Instruction*, 8> LocalLoadsAndStores;
478   SmallVector<Instruction*, 8> AtomicAccesses;
479   SmallVector<Instruction*, 8> MemIntrinCalls;
480   bool Res = false;
481   bool HasCalls = false;
482   bool SanitizeFunction = F.hasFnAttribute(Attribute::SanitizeThread);
483   const DataLayout &DL = F.getParent()->getDataLayout();
484 
485   // Traverse all instructions, collect loads/stores/returns, check for calls.
486   for (auto &BB : F) {
487     for (auto &Inst : BB) {
488       if (isAtomic(&Inst))
489         AtomicAccesses.push_back(&Inst);
490       else if (isa<LoadInst>(Inst) || isa<StoreInst>(Inst))
491         LocalLoadsAndStores.push_back(&Inst);
492       else if (isa<CallInst>(Inst) || isa<InvokeInst>(Inst)) {
493         if (CallInst *CI = dyn_cast<CallInst>(&Inst))
494           maybeMarkSanitizerLibraryCallNoBuiltin(CI, &TLI);
495         if (isa<MemIntrinsic>(Inst))
496           MemIntrinCalls.push_back(&Inst);
497         HasCalls = true;
498         chooseInstructionsToInstrument(LocalLoadsAndStores, AllLoadsAndStores,
499                                        DL);
500       }
501     }
502     chooseInstructionsToInstrument(LocalLoadsAndStores, AllLoadsAndStores, DL);
503   }
504 
505   // We have collected all loads and stores.
506   // FIXME: many of these accesses do not need to be checked for races
507   // (e.g. variables that do not escape, etc).
508 
509   // Instrument memory accesses only if we want to report bugs in the function.
510   if (ClInstrumentMemoryAccesses && SanitizeFunction)
511     for (auto Inst : AllLoadsAndStores) {
512       Res |= instrumentLoadOrStore(Inst, DL);
513     }
514 
515   // Instrument atomic memory accesses in any case (they can be used to
516   // implement synchronization).
517   if (ClInstrumentAtomics)
518     for (auto Inst : AtomicAccesses) {
519       Res |= instrumentAtomic(Inst, DL);
520     }
521 
522   if (ClInstrumentMemIntrinsics && SanitizeFunction)
523     for (auto Inst : MemIntrinCalls) {
524       Res |= instrumentMemIntrinsic(Inst);
525     }
526 
527   if (F.hasFnAttribute("sanitize_thread_no_checking_at_run_time")) {
528     assert(!F.hasFnAttribute(Attribute::SanitizeThread));
529     if (HasCalls)
530       InsertRuntimeIgnores(F);
531   }
532 
533   // Instrument function entry/exit points if there were instrumented accesses.
534   if ((Res || HasCalls) && ClInstrumentFuncEntryExit) {
535     IRBuilder<> IRB(F.getEntryBlock().getFirstNonPHI());
536     Value *ReturnAddress = IRB.CreateCall(
537         Intrinsic::getDeclaration(F.getParent(), Intrinsic::returnaddress),
538         IRB.getInt32(0));
539     IRB.CreateCall(TsanFuncEntry, ReturnAddress);
540 
541     EscapeEnumerator EE(F, "tsan_cleanup", ClHandleCxxExceptions);
542     while (IRBuilder<> *AtExit = EE.Next()) {
543       AtExit->CreateCall(TsanFuncExit, {});
544     }
545     Res = true;
546   }
547   return Res;
548 }
549 
550 bool ThreadSanitizer::instrumentLoadOrStore(Instruction *I,
551                                             const DataLayout &DL) {
552   IRBuilder<> IRB(I);
553   bool IsWrite = isa<StoreInst>(*I);
554   Value *Addr = IsWrite
555       ? cast<StoreInst>(I)->getPointerOperand()
556       : cast<LoadInst>(I)->getPointerOperand();
557 
558   // swifterror memory addresses are mem2reg promoted by instruction selection.
559   // As such they cannot have regular uses like an instrumentation function and
560   // it makes no sense to track them as memory.
561   if (Addr->isSwiftError())
562     return false;
563 
564   int Idx = getMemoryAccessFuncIndex(Addr, DL);
565   if (Idx < 0)
566     return false;
567   if (IsWrite && isVtableAccess(I)) {
568     LLVM_DEBUG(dbgs() << "  VPTR : " << *I << "\n");
569     Value *StoredValue = cast<StoreInst>(I)->getValueOperand();
570     // StoredValue may be a vector type if we are storing several vptrs at once.
571     // In this case, just take the first element of the vector since this is
572     // enough to find vptr races.
573     if (isa<VectorType>(StoredValue->getType()))
574       StoredValue = IRB.CreateExtractElement(
575           StoredValue, ConstantInt::get(IRB.getInt32Ty(), 0));
576     if (StoredValue->getType()->isIntegerTy())
577       StoredValue = IRB.CreateIntToPtr(StoredValue, IRB.getInt8PtrTy());
578     // Call TsanVptrUpdate.
579     IRB.CreateCall(TsanVptrUpdate,
580                    {IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()),
581                     IRB.CreatePointerCast(StoredValue, IRB.getInt8PtrTy())});
582     NumInstrumentedVtableWrites++;
583     return true;
584   }
585   if (!IsWrite && isVtableAccess(I)) {
586     IRB.CreateCall(TsanVptrLoad,
587                    IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()));
588     NumInstrumentedVtableReads++;
589     return true;
590   }
591   const unsigned Alignment = IsWrite
592       ? cast<StoreInst>(I)->getAlignment()
593       : cast<LoadInst>(I)->getAlignment();
594   const bool IsVolatile =
595       ClDistinguishVolatile && (IsWrite ? cast<StoreInst>(I)->isVolatile()
596                                         : cast<LoadInst>(I)->isVolatile());
597   Type *OrigTy = cast<PointerType>(Addr->getType())->getElementType();
598   const uint32_t TypeSize = DL.getTypeStoreSizeInBits(OrigTy);
599   FunctionCallee OnAccessFunc = nullptr;
600   if (Alignment == 0 || Alignment >= 8 || (Alignment % (TypeSize / 8)) == 0) {
601     if (IsVolatile)
602       OnAccessFunc = IsWrite ? TsanVolatileWrite[Idx] : TsanVolatileRead[Idx];
603     else
604       OnAccessFunc = IsWrite ? TsanWrite[Idx] : TsanRead[Idx];
605   } else {
606     if (IsVolatile)
607       OnAccessFunc = IsWrite ? TsanUnalignedVolatileWrite[Idx]
608                              : TsanUnalignedVolatileRead[Idx];
609     else
610       OnAccessFunc = IsWrite ? TsanUnalignedWrite[Idx] : TsanUnalignedRead[Idx];
611   }
612   IRB.CreateCall(OnAccessFunc, IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()));
613   if (IsWrite) NumInstrumentedWrites++;
614   else         NumInstrumentedReads++;
615   return true;
616 }
617 
618 static ConstantInt *createOrdering(IRBuilder<> *IRB, AtomicOrdering ord) {
619   uint32_t v = 0;
620   switch (ord) {
621     case AtomicOrdering::NotAtomic:
622       llvm_unreachable("unexpected atomic ordering!");
623     case AtomicOrdering::Unordered:              LLVM_FALLTHROUGH;
624     case AtomicOrdering::Monotonic:              v = 0; break;
625     // Not specified yet:
626     // case AtomicOrdering::Consume:                v = 1; break;
627     case AtomicOrdering::Acquire:                v = 2; break;
628     case AtomicOrdering::Release:                v = 3; break;
629     case AtomicOrdering::AcquireRelease:         v = 4; break;
630     case AtomicOrdering::SequentiallyConsistent: v = 5; break;
631   }
632   return IRB->getInt32(v);
633 }
634 
635 // If a memset intrinsic gets inlined by the code gen, we will miss races on it.
636 // So, we either need to ensure the intrinsic is not inlined, or instrument it.
637 // We do not instrument memset/memmove/memcpy intrinsics (too complicated),
638 // instead we simply replace them with regular function calls, which are then
639 // intercepted by the run-time.
640 // Since tsan is running after everyone else, the calls should not be
641 // replaced back with intrinsics. If that becomes wrong at some point,
642 // we will need to call e.g. __tsan_memset to avoid the intrinsics.
643 bool ThreadSanitizer::instrumentMemIntrinsic(Instruction *I) {
644   IRBuilder<> IRB(I);
645   if (MemSetInst *M = dyn_cast<MemSetInst>(I)) {
646     IRB.CreateCall(
647         MemsetFn,
648         {IRB.CreatePointerCast(M->getArgOperand(0), IRB.getInt8PtrTy()),
649          IRB.CreateIntCast(M->getArgOperand(1), IRB.getInt32Ty(), false),
650          IRB.CreateIntCast(M->getArgOperand(2), IntptrTy, false)});
651     I->eraseFromParent();
652   } else if (MemTransferInst *M = dyn_cast<MemTransferInst>(I)) {
653     IRB.CreateCall(
654         isa<MemCpyInst>(M) ? MemcpyFn : MemmoveFn,
655         {IRB.CreatePointerCast(M->getArgOperand(0), IRB.getInt8PtrTy()),
656          IRB.CreatePointerCast(M->getArgOperand(1), IRB.getInt8PtrTy()),
657          IRB.CreateIntCast(M->getArgOperand(2), IntptrTy, false)});
658     I->eraseFromParent();
659   }
660   return false;
661 }
662 
663 // Both llvm and ThreadSanitizer atomic operations are based on C++11/C1x
664 // standards.  For background see C++11 standard.  A slightly older, publicly
665 // available draft of the standard (not entirely up-to-date, but close enough
666 // for casual browsing) is available here:
667 // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2011/n3242.pdf
668 // The following page contains more background information:
669 // http://www.hpl.hp.com/personal/Hans_Boehm/c++mm/
670 
671 bool ThreadSanitizer::instrumentAtomic(Instruction *I, const DataLayout &DL) {
672   IRBuilder<> IRB(I);
673   if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
674     Value *Addr = LI->getPointerOperand();
675     int Idx = getMemoryAccessFuncIndex(Addr, DL);
676     if (Idx < 0)
677       return false;
678     const unsigned ByteSize = 1U << Idx;
679     const unsigned BitSize = ByteSize * 8;
680     Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize);
681     Type *PtrTy = Ty->getPointerTo();
682     Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy),
683                      createOrdering(&IRB, LI->getOrdering())};
684     Type *OrigTy = cast<PointerType>(Addr->getType())->getElementType();
685     Value *C = IRB.CreateCall(TsanAtomicLoad[Idx], Args);
686     Value *Cast = IRB.CreateBitOrPointerCast(C, OrigTy);
687     I->replaceAllUsesWith(Cast);
688   } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
689     Value *Addr = SI->getPointerOperand();
690     int Idx = getMemoryAccessFuncIndex(Addr, DL);
691     if (Idx < 0)
692       return false;
693     const unsigned ByteSize = 1U << Idx;
694     const unsigned BitSize = ByteSize * 8;
695     Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize);
696     Type *PtrTy = Ty->getPointerTo();
697     Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy),
698                      IRB.CreateBitOrPointerCast(SI->getValueOperand(), Ty),
699                      createOrdering(&IRB, SI->getOrdering())};
700     CallInst *C = CallInst::Create(TsanAtomicStore[Idx], Args);
701     ReplaceInstWithInst(I, C);
702   } else if (AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I)) {
703     Value *Addr = RMWI->getPointerOperand();
704     int Idx = getMemoryAccessFuncIndex(Addr, DL);
705     if (Idx < 0)
706       return false;
707     FunctionCallee F = TsanAtomicRMW[RMWI->getOperation()][Idx];
708     if (!F)
709       return false;
710     const unsigned ByteSize = 1U << Idx;
711     const unsigned BitSize = ByteSize * 8;
712     Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize);
713     Type *PtrTy = Ty->getPointerTo();
714     Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy),
715                      IRB.CreateIntCast(RMWI->getValOperand(), Ty, false),
716                      createOrdering(&IRB, RMWI->getOrdering())};
717     CallInst *C = CallInst::Create(F, Args);
718     ReplaceInstWithInst(I, C);
719   } else if (AtomicCmpXchgInst *CASI = dyn_cast<AtomicCmpXchgInst>(I)) {
720     Value *Addr = CASI->getPointerOperand();
721     int Idx = getMemoryAccessFuncIndex(Addr, DL);
722     if (Idx < 0)
723       return false;
724     const unsigned ByteSize = 1U << Idx;
725     const unsigned BitSize = ByteSize * 8;
726     Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize);
727     Type *PtrTy = Ty->getPointerTo();
728     Value *CmpOperand =
729       IRB.CreateBitOrPointerCast(CASI->getCompareOperand(), Ty);
730     Value *NewOperand =
731       IRB.CreateBitOrPointerCast(CASI->getNewValOperand(), Ty);
732     Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy),
733                      CmpOperand,
734                      NewOperand,
735                      createOrdering(&IRB, CASI->getSuccessOrdering()),
736                      createOrdering(&IRB, CASI->getFailureOrdering())};
737     CallInst *C = IRB.CreateCall(TsanAtomicCAS[Idx], Args);
738     Value *Success = IRB.CreateICmpEQ(C, CmpOperand);
739     Value *OldVal = C;
740     Type *OrigOldValTy = CASI->getNewValOperand()->getType();
741     if (Ty != OrigOldValTy) {
742       // The value is a pointer, so we need to cast the return value.
743       OldVal = IRB.CreateIntToPtr(C, OrigOldValTy);
744     }
745 
746     Value *Res =
747       IRB.CreateInsertValue(UndefValue::get(CASI->getType()), OldVal, 0);
748     Res = IRB.CreateInsertValue(Res, Success, 1);
749 
750     I->replaceAllUsesWith(Res);
751     I->eraseFromParent();
752   } else if (FenceInst *FI = dyn_cast<FenceInst>(I)) {
753     Value *Args[] = {createOrdering(&IRB, FI->getOrdering())};
754     FunctionCallee F = FI->getSyncScopeID() == SyncScope::SingleThread
755                            ? TsanAtomicSignalFence
756                            : TsanAtomicThreadFence;
757     CallInst *C = CallInst::Create(F, Args);
758     ReplaceInstWithInst(I, C);
759   }
760   return true;
761 }
762 
763 int ThreadSanitizer::getMemoryAccessFuncIndex(Value *Addr,
764                                               const DataLayout &DL) {
765   Type *OrigPtrTy = Addr->getType();
766   Type *OrigTy = cast<PointerType>(OrigPtrTy)->getElementType();
767   assert(OrigTy->isSized());
768   uint32_t TypeSize = DL.getTypeStoreSizeInBits(OrigTy);
769   if (TypeSize != 8  && TypeSize != 16 &&
770       TypeSize != 32 && TypeSize != 64 && TypeSize != 128) {
771     NumAccessesWithBadSize++;
772     // Ignore all unusual sizes.
773     return -1;
774   }
775   size_t Idx = countTrailingZeros(TypeSize / 8);
776   assert(Idx < kNumberOfAccessSizes);
777   return Idx;
778 }
779