1 //===-- tsan_rtl.cpp ------------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of ThreadSanitizer (TSan), a race detector.
10 //
11 // Main file (entry points) for the TSan run-time.
12 //===----------------------------------------------------------------------===//
13
14 #include "tsan_rtl.h"
15
16 #include "sanitizer_common/sanitizer_atomic.h"
17 #include "sanitizer_common/sanitizer_common.h"
18 #include "sanitizer_common/sanitizer_file.h"
19 #include "sanitizer_common/sanitizer_interface_internal.h"
20 #include "sanitizer_common/sanitizer_libc.h"
21 #include "sanitizer_common/sanitizer_placement_new.h"
22 #include "sanitizer_common/sanitizer_stackdepot.h"
23 #include "sanitizer_common/sanitizer_symbolizer.h"
24 #include "tsan_defs.h"
25 #include "tsan_interface.h"
26 #include "tsan_mman.h"
27 #include "tsan_platform.h"
28 #include "tsan_suppressions.h"
29 #include "tsan_symbolize.h"
30 #include "ubsan/ubsan_init.h"
31
32 volatile int __tsan_resumed = 0;
33
__tsan_resume()34 extern "C" void __tsan_resume() {
35 __tsan_resumed = 1;
36 }
37
38 SANITIZER_WEAK_DEFAULT_IMPL
__tsan_test_only_on_fork()39 void __tsan_test_only_on_fork() {}
40
41 namespace __tsan {
42
43 #if !SANITIZER_GO
44 void (*on_initialize)(void);
45 int (*on_finalize)(int);
46 #endif
47
48 #if !SANITIZER_GO && !SANITIZER_APPLE
49 __attribute__((tls_model("initial-exec")))
50 THREADLOCAL char cur_thread_placeholder[sizeof(ThreadState)] ALIGNED(
51 SANITIZER_CACHE_LINE_SIZE);
52 #endif
53 static char ctx_placeholder[sizeof(Context)] ALIGNED(SANITIZER_CACHE_LINE_SIZE);
54 Context *ctx;
55
56 // Can be overriden by a front-end.
57 #ifdef TSAN_EXTERNAL_HOOKS
58 bool OnFinalize(bool failed);
59 void OnInitialize();
60 #else
61 SANITIZER_WEAK_CXX_DEFAULT_IMPL
OnFinalize(bool failed)62 bool OnFinalize(bool failed) {
63 # if !SANITIZER_GO
64 if (on_finalize)
65 return on_finalize(failed);
66 # endif
67 return failed;
68 }
69
70 SANITIZER_WEAK_CXX_DEFAULT_IMPL
OnInitialize()71 void OnInitialize() {
72 # if !SANITIZER_GO
73 if (on_initialize)
74 on_initialize();
75 # endif
76 }
77 #endif
78
TracePartAlloc(ThreadState * thr)79 static TracePart* TracePartAlloc(ThreadState* thr) {
80 TracePart* part = nullptr;
81 {
82 Lock lock(&ctx->slot_mtx);
83 uptr max_parts = Trace::kMinParts + flags()->history_size;
84 Trace* trace = &thr->tctx->trace;
85 if (trace->parts_allocated == max_parts ||
86 ctx->trace_part_finished_excess) {
87 part = ctx->trace_part_recycle.PopFront();
88 DPrintf("#%d: TracePartAlloc: part=%p\n", thr->tid, part);
89 if (part && part->trace) {
90 Trace* trace1 = part->trace;
91 Lock trace_lock(&trace1->mtx);
92 part->trace = nullptr;
93 TracePart* part1 = trace1->parts.PopFront();
94 CHECK_EQ(part, part1);
95 if (trace1->parts_allocated > trace1->parts.Size()) {
96 ctx->trace_part_finished_excess +=
97 trace1->parts_allocated - trace1->parts.Size();
98 trace1->parts_allocated = trace1->parts.Size();
99 }
100 }
101 }
102 if (trace->parts_allocated < max_parts) {
103 trace->parts_allocated++;
104 if (ctx->trace_part_finished_excess)
105 ctx->trace_part_finished_excess--;
106 }
107 if (!part)
108 ctx->trace_part_total_allocated++;
109 else if (ctx->trace_part_recycle_finished)
110 ctx->trace_part_recycle_finished--;
111 }
112 if (!part)
113 part = new (MmapOrDie(sizeof(*part), "TracePart")) TracePart();
114 return part;
115 }
116
TracePartFree(TracePart * part)117 static void TracePartFree(TracePart* part) SANITIZER_REQUIRES(ctx->slot_mtx) {
118 DCHECK(part->trace);
119 part->trace = nullptr;
120 ctx->trace_part_recycle.PushFront(part);
121 }
122
TraceResetForTesting()123 void TraceResetForTesting() {
124 Lock lock(&ctx->slot_mtx);
125 while (auto* part = ctx->trace_part_recycle.PopFront()) {
126 if (auto trace = part->trace)
127 CHECK_EQ(trace->parts.PopFront(), part);
128 UnmapOrDie(part, sizeof(*part));
129 }
130 ctx->trace_part_total_allocated = 0;
131 ctx->trace_part_recycle_finished = 0;
132 ctx->trace_part_finished_excess = 0;
133 }
134
DoResetImpl(uptr epoch)135 static void DoResetImpl(uptr epoch) {
136 ThreadRegistryLock lock0(&ctx->thread_registry);
137 Lock lock1(&ctx->slot_mtx);
138 CHECK_EQ(ctx->global_epoch, epoch);
139 ctx->global_epoch++;
140 CHECK(!ctx->resetting);
141 ctx->resetting = true;
142 for (u32 i = ctx->thread_registry.NumThreadsLocked(); i--;) {
143 ThreadContext* tctx = (ThreadContext*)ctx->thread_registry.GetThreadLocked(
144 static_cast<Tid>(i));
145 // Potentially we could purge all ThreadStatusDead threads from the
146 // registry. Since we reset all shadow, they can't race with anything
147 // anymore. However, their tid's can still be stored in some aux places
148 // (e.g. tid of thread that created something).
149 auto trace = &tctx->trace;
150 Lock lock(&trace->mtx);
151 bool attached = tctx->thr && tctx->thr->slot;
152 auto parts = &trace->parts;
153 bool local = false;
154 while (!parts->Empty()) {
155 auto part = parts->Front();
156 local = local || part == trace->local_head;
157 if (local)
158 CHECK(!ctx->trace_part_recycle.Queued(part));
159 else
160 ctx->trace_part_recycle.Remove(part);
161 if (attached && parts->Size() == 1) {
162 // The thread is running and this is the last/current part.
163 // Set the trace position to the end of the current part
164 // to force the thread to call SwitchTracePart and re-attach
165 // to a new slot and allocate a new trace part.
166 // Note: the thread is concurrently modifying the position as well,
167 // so this is only best-effort. The thread can only modify position
168 // within this part, because switching parts is protected by
169 // slot/trace mutexes that we hold here.
170 atomic_store_relaxed(
171 &tctx->thr->trace_pos,
172 reinterpret_cast<uptr>(&part->events[TracePart::kSize]));
173 break;
174 }
175 parts->Remove(part);
176 TracePartFree(part);
177 }
178 CHECK_LE(parts->Size(), 1);
179 trace->local_head = parts->Front();
180 if (tctx->thr && !tctx->thr->slot) {
181 atomic_store_relaxed(&tctx->thr->trace_pos, 0);
182 tctx->thr->trace_prev_pc = 0;
183 }
184 if (trace->parts_allocated > trace->parts.Size()) {
185 ctx->trace_part_finished_excess +=
186 trace->parts_allocated - trace->parts.Size();
187 trace->parts_allocated = trace->parts.Size();
188 }
189 }
190 while (ctx->slot_queue.PopFront()) {
191 }
192 for (auto& slot : ctx->slots) {
193 slot.SetEpoch(kEpochZero);
194 slot.journal.Reset();
195 slot.thr = nullptr;
196 ctx->slot_queue.PushBack(&slot);
197 }
198
199 DPrintf("Resetting shadow...\n");
200 auto shadow_begin = ShadowBeg();
201 auto shadow_end = ShadowEnd();
202 #if SANITIZER_GO
203 CHECK_NE(0, ctx->mapped_shadow_begin);
204 shadow_begin = ctx->mapped_shadow_begin;
205 shadow_end = ctx->mapped_shadow_end;
206 VPrintf(2, "shadow_begin-shadow_end: (0x%zx-0x%zx)\n",
207 shadow_begin, shadow_end);
208 #endif
209
210 #if SANITIZER_WINDOWS
211 auto resetFailed =
212 !ZeroMmapFixedRegion(shadow_begin, shadow_end - shadow_begin);
213 #else
214 auto resetFailed =
215 !MmapFixedSuperNoReserve(shadow_begin, shadow_end-shadow_begin, "shadow");
216 #endif
217 if (resetFailed) {
218 Printf("failed to reset shadow memory\n");
219 Die();
220 }
221 DPrintf("Resetting meta shadow...\n");
222 ctx->metamap.ResetClocks();
223 StoreShadow(&ctx->last_spurious_race, Shadow::kEmpty);
224 ctx->resetting = false;
225 }
226
227 // Clang does not understand locking all slots in the loop:
228 // error: expecting mutex 'slot.mtx' to be held at start of each loop
DoReset(ThreadState * thr,uptr epoch)229 void DoReset(ThreadState* thr, uptr epoch) SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
230 for (auto& slot : ctx->slots) {
231 slot.mtx.Lock();
232 if (UNLIKELY(epoch == 0))
233 epoch = ctx->global_epoch;
234 if (UNLIKELY(epoch != ctx->global_epoch)) {
235 // Epoch can't change once we've locked the first slot.
236 CHECK_EQ(slot.sid, 0);
237 slot.mtx.Unlock();
238 return;
239 }
240 }
241 DPrintf("#%d: DoReset epoch=%lu\n", thr ? thr->tid : -1, epoch);
242 DoResetImpl(epoch);
243 for (auto& slot : ctx->slots) slot.mtx.Unlock();
244 }
245
FlushShadowMemory()246 void FlushShadowMemory() { DoReset(nullptr, 0); }
247
FindSlotAndLock(ThreadState * thr)248 static TidSlot* FindSlotAndLock(ThreadState* thr)
249 SANITIZER_ACQUIRE(thr->slot->mtx) SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
250 CHECK(!thr->slot);
251 TidSlot* slot = nullptr;
252 for (;;) {
253 uptr epoch;
254 {
255 Lock lock(&ctx->slot_mtx);
256 epoch = ctx->global_epoch;
257 if (slot) {
258 // This is an exhausted slot from the previous iteration.
259 if (ctx->slot_queue.Queued(slot))
260 ctx->slot_queue.Remove(slot);
261 thr->slot_locked = false;
262 slot->mtx.Unlock();
263 }
264 for (;;) {
265 slot = ctx->slot_queue.PopFront();
266 if (!slot)
267 break;
268 if (slot->epoch() != kEpochLast) {
269 ctx->slot_queue.PushBack(slot);
270 break;
271 }
272 }
273 }
274 if (!slot) {
275 DoReset(thr, epoch);
276 continue;
277 }
278 slot->mtx.Lock();
279 CHECK(!thr->slot_locked);
280 thr->slot_locked = true;
281 if (slot->thr) {
282 DPrintf("#%d: preempting sid=%d tid=%d\n", thr->tid, (u32)slot->sid,
283 slot->thr->tid);
284 slot->SetEpoch(slot->thr->fast_state.epoch());
285 slot->thr = nullptr;
286 }
287 if (slot->epoch() != kEpochLast)
288 return slot;
289 }
290 }
291
SlotAttachAndLock(ThreadState * thr)292 void SlotAttachAndLock(ThreadState* thr) {
293 TidSlot* slot = FindSlotAndLock(thr);
294 DPrintf("#%d: SlotAttach: slot=%u\n", thr->tid, static_cast<int>(slot->sid));
295 CHECK(!slot->thr);
296 CHECK(!thr->slot);
297 slot->thr = thr;
298 thr->slot = slot;
299 Epoch epoch = EpochInc(slot->epoch());
300 CHECK(!EpochOverflow(epoch));
301 slot->SetEpoch(epoch);
302 thr->fast_state.SetSid(slot->sid);
303 thr->fast_state.SetEpoch(epoch);
304 if (thr->slot_epoch != ctx->global_epoch) {
305 thr->slot_epoch = ctx->global_epoch;
306 thr->clock.Reset();
307 #if !SANITIZER_GO
308 thr->last_sleep_stack_id = kInvalidStackID;
309 thr->last_sleep_clock.Reset();
310 #endif
311 }
312 thr->clock.Set(slot->sid, epoch);
313 slot->journal.PushBack({thr->tid, epoch});
314 }
315
SlotDetachImpl(ThreadState * thr,bool exiting)316 static void SlotDetachImpl(ThreadState* thr, bool exiting) {
317 TidSlot* slot = thr->slot;
318 thr->slot = nullptr;
319 if (thr != slot->thr) {
320 slot = nullptr; // we don't own the slot anymore
321 if (thr->slot_epoch != ctx->global_epoch) {
322 TracePart* part = nullptr;
323 auto* trace = &thr->tctx->trace;
324 {
325 Lock l(&trace->mtx);
326 auto* parts = &trace->parts;
327 // The trace can be completely empty in an unlikely event
328 // the thread is preempted right after it acquired the slot
329 // in ThreadStart and did not trace any events yet.
330 CHECK_LE(parts->Size(), 1);
331 part = parts->PopFront();
332 thr->tctx->trace.local_head = nullptr;
333 atomic_store_relaxed(&thr->trace_pos, 0);
334 thr->trace_prev_pc = 0;
335 }
336 if (part) {
337 Lock l(&ctx->slot_mtx);
338 TracePartFree(part);
339 }
340 }
341 return;
342 }
343 CHECK(exiting || thr->fast_state.epoch() == kEpochLast);
344 slot->SetEpoch(thr->fast_state.epoch());
345 slot->thr = nullptr;
346 }
347
SlotDetach(ThreadState * thr)348 void SlotDetach(ThreadState* thr) {
349 Lock lock(&thr->slot->mtx);
350 SlotDetachImpl(thr, true);
351 }
352
SlotLock(ThreadState * thr)353 void SlotLock(ThreadState* thr) SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
354 DCHECK(!thr->slot_locked);
355 #if SANITIZER_DEBUG
356 // Check these mutexes are not locked.
357 // We can call DoReset from SlotAttachAndLock, which will lock
358 // these mutexes, but it happens only every once in a while.
359 { ThreadRegistryLock lock(&ctx->thread_registry); }
360 { Lock lock(&ctx->slot_mtx); }
361 #endif
362 TidSlot* slot = thr->slot;
363 slot->mtx.Lock();
364 thr->slot_locked = true;
365 if (LIKELY(thr == slot->thr && thr->fast_state.epoch() != kEpochLast))
366 return;
367 SlotDetachImpl(thr, false);
368 thr->slot_locked = false;
369 slot->mtx.Unlock();
370 SlotAttachAndLock(thr);
371 }
372
SlotUnlock(ThreadState * thr)373 void SlotUnlock(ThreadState* thr) {
374 DCHECK(thr->slot_locked);
375 thr->slot_locked = false;
376 thr->slot->mtx.Unlock();
377 }
378
Context()379 Context::Context()
380 : initialized(),
381 report_mtx(MutexTypeReport),
382 nreported(),
383 thread_registry([](Tid tid) -> ThreadContextBase* {
384 return new (Alloc(sizeof(ThreadContext))) ThreadContext(tid);
385 }),
386 racy_mtx(MutexTypeRacy),
387 racy_stacks(),
388 fired_suppressions_mtx(MutexTypeFired),
389 slot_mtx(MutexTypeSlots),
390 resetting() {
391 fired_suppressions.reserve(8);
392 for (uptr i = 0; i < ARRAY_SIZE(slots); i++) {
393 TidSlot* slot = &slots[i];
394 slot->sid = static_cast<Sid>(i);
395 slot_queue.PushBack(slot);
396 }
397 global_epoch = 1;
398 }
399
TidSlot()400 TidSlot::TidSlot() : mtx(MutexTypeSlot) {}
401
402 // The objects are allocated in TLS, so one may rely on zero-initialization.
ThreadState(Tid tid)403 ThreadState::ThreadState(Tid tid)
404 // Do not touch these, rely on zero initialization,
405 // they may be accessed before the ctor.
406 // ignore_reads_and_writes()
407 // ignore_interceptors()
408 : tid(tid) {
409 CHECK_EQ(reinterpret_cast<uptr>(this) % SANITIZER_CACHE_LINE_SIZE, 0);
410 #if !SANITIZER_GO
411 // C/C++ uses fixed size shadow stack.
412 const int kInitStackSize = kShadowStackSize;
413 shadow_stack = static_cast<uptr*>(
414 MmapNoReserveOrDie(kInitStackSize * sizeof(uptr), "shadow stack"));
415 SetShadowRegionHugePageMode(reinterpret_cast<uptr>(shadow_stack),
416 kInitStackSize * sizeof(uptr));
417 #else
418 // Go uses malloc-allocated shadow stack with dynamic size.
419 const int kInitStackSize = 8;
420 shadow_stack = static_cast<uptr*>(Alloc(kInitStackSize * sizeof(uptr)));
421 #endif
422 shadow_stack_pos = shadow_stack;
423 shadow_stack_end = shadow_stack + kInitStackSize;
424 }
425
426 #if !SANITIZER_GO
MemoryProfiler(u64 uptime)427 void MemoryProfiler(u64 uptime) {
428 if (ctx->memprof_fd == kInvalidFd)
429 return;
430 InternalMmapVector<char> buf(4096);
431 WriteMemoryProfile(buf.data(), buf.size(), uptime);
432 WriteToFile(ctx->memprof_fd, buf.data(), internal_strlen(buf.data()));
433 }
434
InitializeMemoryProfiler()435 static bool InitializeMemoryProfiler() {
436 ctx->memprof_fd = kInvalidFd;
437 const char *fname = flags()->profile_memory;
438 if (!fname || !fname[0])
439 return false;
440 if (internal_strcmp(fname, "stdout") == 0) {
441 ctx->memprof_fd = 1;
442 } else if (internal_strcmp(fname, "stderr") == 0) {
443 ctx->memprof_fd = 2;
444 } else {
445 InternalScopedString filename;
446 filename.append("%s.%d", fname, (int)internal_getpid());
447 ctx->memprof_fd = OpenFile(filename.data(), WrOnly);
448 if (ctx->memprof_fd == kInvalidFd) {
449 Printf("ThreadSanitizer: failed to open memory profile file '%s'\n",
450 filename.data());
451 return false;
452 }
453 }
454 MemoryProfiler(0);
455 return true;
456 }
457
BackgroundThread(void * arg)458 static void *BackgroundThread(void *arg) {
459 // This is a non-initialized non-user thread, nothing to see here.
460 // We don't use ScopedIgnoreInterceptors, because we want ignores to be
461 // enabled even when the thread function exits (e.g. during pthread thread
462 // shutdown code).
463 cur_thread_init()->ignore_interceptors++;
464 const u64 kMs2Ns = 1000 * 1000;
465 const u64 start = NanoTime();
466
467 u64 last_flush = start;
468 uptr last_rss = 0;
469 while (!atomic_load_relaxed(&ctx->stop_background_thread)) {
470 SleepForMillis(100);
471 u64 now = NanoTime();
472
473 // Flush memory if requested.
474 if (flags()->flush_memory_ms > 0) {
475 if (last_flush + flags()->flush_memory_ms * kMs2Ns < now) {
476 VReport(1, "ThreadSanitizer: periodic memory flush\n");
477 FlushShadowMemory();
478 now = last_flush = NanoTime();
479 }
480 }
481 if (flags()->memory_limit_mb > 0) {
482 uptr rss = GetRSS();
483 uptr limit = uptr(flags()->memory_limit_mb) << 20;
484 VReport(1,
485 "ThreadSanitizer: memory flush check"
486 " RSS=%llu LAST=%llu LIMIT=%llu\n",
487 (u64)rss >> 20, (u64)last_rss >> 20, (u64)limit >> 20);
488 if (2 * rss > limit + last_rss) {
489 VReport(1, "ThreadSanitizer: flushing memory due to RSS\n");
490 FlushShadowMemory();
491 rss = GetRSS();
492 now = NanoTime();
493 VReport(1, "ThreadSanitizer: memory flushed RSS=%llu\n",
494 (u64)rss >> 20);
495 }
496 last_rss = rss;
497 }
498
499 MemoryProfiler(now - start);
500
501 // Flush symbolizer cache if requested.
502 if (flags()->flush_symbolizer_ms > 0) {
503 u64 last = atomic_load(&ctx->last_symbolize_time_ns,
504 memory_order_relaxed);
505 if (last != 0 && last + flags()->flush_symbolizer_ms * kMs2Ns < now) {
506 Lock l(&ctx->report_mtx);
507 ScopedErrorReportLock l2;
508 SymbolizeFlush();
509 atomic_store(&ctx->last_symbolize_time_ns, 0, memory_order_relaxed);
510 }
511 }
512 }
513 return nullptr;
514 }
515
StartBackgroundThread()516 static void StartBackgroundThread() {
517 ctx->background_thread = internal_start_thread(&BackgroundThread, 0);
518 }
519
520 #ifndef __mips__
StopBackgroundThread()521 static void StopBackgroundThread() {
522 atomic_store(&ctx->stop_background_thread, 1, memory_order_relaxed);
523 internal_join_thread(ctx->background_thread);
524 ctx->background_thread = 0;
525 }
526 #endif
527 #endif
528
DontNeedShadowFor(uptr addr,uptr size)529 void DontNeedShadowFor(uptr addr, uptr size) {
530 ReleaseMemoryPagesToOS(reinterpret_cast<uptr>(MemToShadow(addr)),
531 reinterpret_cast<uptr>(MemToShadow(addr + size)));
532 }
533
534 #if !SANITIZER_GO
535 // We call UnmapShadow before the actual munmap, at that point we don't yet
536 // know if the provided address/size are sane. We can't call UnmapShadow
537 // after the actual munmap becuase at that point the memory range can
538 // already be reused for something else, so we can't rely on the munmap
539 // return value to understand is the values are sane.
540 // While calling munmap with insane values (non-canonical address, negative
541 // size, etc) is an error, the kernel won't crash. We must also try to not
542 // crash as the failure mode is very confusing (paging fault inside of the
543 // runtime on some derived shadow address).
IsValidMmapRange(uptr addr,uptr size)544 static bool IsValidMmapRange(uptr addr, uptr size) {
545 if (size == 0)
546 return true;
547 if (static_cast<sptr>(size) < 0)
548 return false;
549 if (!IsAppMem(addr) || !IsAppMem(addr + size - 1))
550 return false;
551 // Check that if the start of the region belongs to one of app ranges,
552 // end of the region belongs to the same region.
553 const uptr ranges[][2] = {
554 {LoAppMemBeg(), LoAppMemEnd()},
555 {MidAppMemBeg(), MidAppMemEnd()},
556 {HiAppMemBeg(), HiAppMemEnd()},
557 };
558 for (auto range : ranges) {
559 if (addr >= range[0] && addr < range[1])
560 return addr + size <= range[1];
561 }
562 return false;
563 }
564
UnmapShadow(ThreadState * thr,uptr addr,uptr size)565 void UnmapShadow(ThreadState *thr, uptr addr, uptr size) {
566 if (size == 0 || !IsValidMmapRange(addr, size))
567 return;
568 DontNeedShadowFor(addr, size);
569 ScopedGlobalProcessor sgp;
570 SlotLocker locker(thr, true);
571 ctx->metamap.ResetRange(thr->proc(), addr, size, true);
572 }
573 #endif
574
MapShadow(uptr addr,uptr size)575 void MapShadow(uptr addr, uptr size) {
576 // Ensure thead registry lock held, so as to synchronize
577 // with DoReset, which also access the mapped_shadow_* ctxt fields.
578 ThreadRegistryLock lock0(&ctx->thread_registry);
579 static bool data_mapped = false;
580
581 #if !SANITIZER_GO
582 // Global data is not 64K aligned, but there are no adjacent mappings,
583 // so we can get away with unaligned mapping.
584 // CHECK_EQ(addr, addr & ~((64 << 10) - 1)); // windows wants 64K alignment
585 const uptr kPageSize = GetPageSizeCached();
586 uptr shadow_begin = RoundDownTo((uptr)MemToShadow(addr), kPageSize);
587 uptr shadow_end = RoundUpTo((uptr)MemToShadow(addr + size), kPageSize);
588 if (!MmapFixedNoReserve(shadow_begin, shadow_end - shadow_begin, "shadow"))
589 Die();
590 #else
591 uptr shadow_begin = RoundDownTo((uptr)MemToShadow(addr), (64 << 10));
592 uptr shadow_end = RoundUpTo((uptr)MemToShadow(addr + size), (64 << 10));
593 VPrintf(2, "MapShadow for (0x%zx-0x%zx), begin/end: (0x%zx-0x%zx)\n",
594 addr, addr + size, shadow_begin, shadow_end);
595
596 if (!data_mapped) {
597 // First call maps data+bss.
598 if (!MmapFixedSuperNoReserve(shadow_begin, shadow_end - shadow_begin, "shadow"))
599 Die();
600 } else {
601 VPrintf(2, "ctx->mapped_shadow_{begin,end} = (0x%zx-0x%zx)\n",
602 ctx->mapped_shadow_begin, ctx->mapped_shadow_end);
603 // Second and subsequent calls map heap.
604 if (shadow_end <= ctx->mapped_shadow_end)
605 return;
606 if (ctx->mapped_shadow_begin < shadow_begin)
607 ctx->mapped_shadow_begin = shadow_begin;
608 if (shadow_begin < ctx->mapped_shadow_end)
609 shadow_begin = ctx->mapped_shadow_end;
610 VPrintf(2, "MapShadow begin/end = (0x%zx-0x%zx)\n",
611 shadow_begin, shadow_end);
612 if (!MmapFixedSuperNoReserve(shadow_begin, shadow_end - shadow_begin,
613 "shadow"))
614 Die();
615 ctx->mapped_shadow_end = shadow_end;
616 }
617 #endif
618
619 // Meta shadow is 2:1, so tread carefully.
620 static uptr mapped_meta_end = 0;
621 uptr meta_begin = (uptr)MemToMeta(addr);
622 uptr meta_end = (uptr)MemToMeta(addr + size);
623 meta_begin = RoundDownTo(meta_begin, 64 << 10);
624 meta_end = RoundUpTo(meta_end, 64 << 10);
625 if (!data_mapped) {
626 // First call maps data+bss.
627 data_mapped = true;
628 if (!MmapFixedSuperNoReserve(meta_begin, meta_end - meta_begin,
629 "meta shadow"))
630 Die();
631 } else {
632 // Mapping continuous heap.
633 // Windows wants 64K alignment.
634 meta_begin = RoundDownTo(meta_begin, 64 << 10);
635 meta_end = RoundUpTo(meta_end, 64 << 10);
636 CHECK_GT(meta_end, mapped_meta_end);
637 if (meta_begin < mapped_meta_end)
638 meta_begin = mapped_meta_end;
639 if (!MmapFixedSuperNoReserve(meta_begin, meta_end - meta_begin,
640 "meta shadow"))
641 Die();
642 mapped_meta_end = meta_end;
643 }
644 VPrintf(2, "mapped meta shadow for (0x%zx-0x%zx) at (0x%zx-0x%zx)\n", addr,
645 addr + size, meta_begin, meta_end);
646 }
647
648 #if !SANITIZER_GO
OnStackUnwind(const SignalContext & sig,const void *,BufferedStackTrace * stack)649 static void OnStackUnwind(const SignalContext &sig, const void *,
650 BufferedStackTrace *stack) {
651 stack->Unwind(StackTrace::GetNextInstructionPc(sig.pc), sig.bp, sig.context,
652 common_flags()->fast_unwind_on_fatal);
653 }
654
TsanOnDeadlySignal(int signo,void * siginfo,void * context)655 static void TsanOnDeadlySignal(int signo, void *siginfo, void *context) {
656 HandleDeadlySignal(siginfo, context, GetTid(), &OnStackUnwind, nullptr);
657 }
658 #endif
659
CheckUnwind()660 void CheckUnwind() {
661 // There is high probability that interceptors will check-fail as well,
662 // on the other hand there is no sense in processing interceptors
663 // since we are going to die soon.
664 ScopedIgnoreInterceptors ignore;
665 #if !SANITIZER_GO
666 ThreadState* thr = cur_thread();
667 thr->nomalloc = false;
668 thr->ignore_sync++;
669 thr->ignore_reads_and_writes++;
670 atomic_store_relaxed(&thr->in_signal_handler, 0);
671 #endif
672 PrintCurrentStackSlow(StackTrace::GetCurrentPc());
673 }
674
675 bool is_initialized;
676
Initialize(ThreadState * thr)677 void Initialize(ThreadState *thr) {
678 // Thread safe because done before all threads exist.
679 if (is_initialized)
680 return;
681 is_initialized = true;
682 // We are not ready to handle interceptors yet.
683 ScopedIgnoreInterceptors ignore;
684 SanitizerToolName = "ThreadSanitizer";
685 // Install tool-specific callbacks in sanitizer_common.
686 SetCheckUnwindCallback(CheckUnwind);
687
688 ctx = new(ctx_placeholder) Context;
689 const char *env_name = SANITIZER_GO ? "GORACE" : "TSAN_OPTIONS";
690 const char *options = GetEnv(env_name);
691 CacheBinaryName();
692 CheckASLR();
693 InitializeFlags(&ctx->flags, options, env_name);
694 AvoidCVE_2016_2143();
695 __sanitizer::InitializePlatformEarly();
696 __tsan::InitializePlatformEarly();
697
698 #if !SANITIZER_GO
699 InitializeAllocator();
700 ReplaceSystemMalloc();
701 #endif
702 if (common_flags()->detect_deadlocks)
703 ctx->dd = DDetector::Create(flags());
704 Processor *proc = ProcCreate();
705 ProcWire(proc, thr);
706 InitializeInterceptors();
707 InitializePlatform();
708 InitializeDynamicAnnotations();
709 #if !SANITIZER_GO
710 InitializeShadowMemory();
711 InitializeAllocatorLate();
712 InstallDeadlySignalHandlers(TsanOnDeadlySignal);
713 #endif
714 // Setup correct file descriptor for error reports.
715 __sanitizer_set_report_path(common_flags()->log_path);
716 InitializeSuppressions();
717 #if !SANITIZER_GO
718 InitializeLibIgnore();
719 Symbolizer::GetOrInit()->AddHooks(EnterSymbolizer, ExitSymbolizer);
720 #endif
721
722 VPrintf(1, "***** Running under ThreadSanitizer v3 (pid %d) *****\n",
723 (int)internal_getpid());
724
725 // Initialize thread 0.
726 Tid tid = ThreadCreate(nullptr, 0, 0, true);
727 CHECK_EQ(tid, kMainTid);
728 ThreadStart(thr, tid, GetTid(), ThreadType::Regular);
729 #if TSAN_CONTAINS_UBSAN
730 __ubsan::InitAsPlugin();
731 #endif
732
733 #if !SANITIZER_GO
734 Symbolizer::LateInitialize();
735 if (InitializeMemoryProfiler() || flags()->force_background_thread)
736 MaybeSpawnBackgroundThread();
737 #endif
738 ctx->initialized = true;
739
740 if (flags()->stop_on_start) {
741 Printf("ThreadSanitizer is suspended at startup (pid %d)."
742 " Call __tsan_resume().\n",
743 (int)internal_getpid());
744 while (__tsan_resumed == 0) {}
745 }
746
747 OnInitialize();
748 }
749
MaybeSpawnBackgroundThread()750 void MaybeSpawnBackgroundThread() {
751 // On MIPS, TSan initialization is run before
752 // __pthread_initialize_minimal_internal() is finished, so we can not spawn
753 // new threads.
754 #if !SANITIZER_GO && !defined(__mips__)
755 static atomic_uint32_t bg_thread = {};
756 if (atomic_load(&bg_thread, memory_order_relaxed) == 0 &&
757 atomic_exchange(&bg_thread, 1, memory_order_relaxed) == 0) {
758 StartBackgroundThread();
759 SetSandboxingCallback(StopBackgroundThread);
760 }
761 #endif
762 }
763
Finalize(ThreadState * thr)764 int Finalize(ThreadState *thr) {
765 bool failed = false;
766
767 #if !SANITIZER_GO
768 if (common_flags()->print_module_map == 1)
769 DumpProcessMap();
770 #endif
771
772 if (flags()->atexit_sleep_ms > 0 && ThreadCount(thr) > 1)
773 internal_usleep(u64(flags()->atexit_sleep_ms) * 1000);
774
775 {
776 // Wait for pending reports.
777 ScopedErrorReportLock lock;
778 }
779
780 #if !SANITIZER_GO
781 if (Verbosity()) AllocatorPrintStats();
782 #endif
783
784 ThreadFinalize(thr);
785
786 if (ctx->nreported) {
787 failed = true;
788 #if !SANITIZER_GO
789 Printf("ThreadSanitizer: reported %d warnings\n", ctx->nreported);
790 #else
791 Printf("Found %d data race(s)\n", ctx->nreported);
792 #endif
793 }
794
795 if (common_flags()->print_suppressions)
796 PrintMatchedSuppressions();
797
798 failed = OnFinalize(failed);
799
800 return failed ? common_flags()->exitcode : 0;
801 }
802
803 #if !SANITIZER_GO
ForkBefore(ThreadState * thr,uptr pc)804 void ForkBefore(ThreadState* thr, uptr pc) SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
805 GlobalProcessorLock();
806 // Detaching from the slot makes OnUserFree skip writing to the shadow.
807 // The slot will be locked so any attempts to use it will deadlock anyway.
808 SlotDetach(thr);
809 for (auto& slot : ctx->slots) slot.mtx.Lock();
810 ctx->thread_registry.Lock();
811 ctx->slot_mtx.Lock();
812 ScopedErrorReportLock::Lock();
813 AllocatorLock();
814 // Suppress all reports in the pthread_atfork callbacks.
815 // Reports will deadlock on the report_mtx.
816 // We could ignore sync operations as well,
817 // but so far it's unclear if it will do more good or harm.
818 // Unnecessarily ignoring things can lead to false positives later.
819 thr->suppress_reports++;
820 // On OS X, REAL(fork) can call intercepted functions (OSSpinLockLock), and
821 // we'll assert in CheckNoLocks() unless we ignore interceptors.
822 // On OS X libSystem_atfork_prepare/parent/child callbacks are called
823 // after/before our callbacks and they call free.
824 thr->ignore_interceptors++;
825 // Disables memory write in OnUserAlloc/Free.
826 thr->ignore_reads_and_writes++;
827
828 __tsan_test_only_on_fork();
829 }
830
ForkAfter(ThreadState * thr)831 static void ForkAfter(ThreadState* thr) SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
832 thr->suppress_reports--; // Enabled in ForkBefore.
833 thr->ignore_interceptors--;
834 thr->ignore_reads_and_writes--;
835 AllocatorUnlock();
836 ScopedErrorReportLock::Unlock();
837 ctx->slot_mtx.Unlock();
838 ctx->thread_registry.Unlock();
839 for (auto& slot : ctx->slots) slot.mtx.Unlock();
840 SlotAttachAndLock(thr);
841 SlotUnlock(thr);
842 GlobalProcessorUnlock();
843 }
844
ForkParentAfter(ThreadState * thr,uptr pc)845 void ForkParentAfter(ThreadState* thr, uptr pc) { ForkAfter(thr); }
846
ForkChildAfter(ThreadState * thr,uptr pc,bool start_thread)847 void ForkChildAfter(ThreadState* thr, uptr pc, bool start_thread) {
848 ForkAfter(thr);
849 u32 nthread = ctx->thread_registry.OnFork(thr->tid);
850 VPrintf(1,
851 "ThreadSanitizer: forked new process with pid %d,"
852 " parent had %d threads\n",
853 (int)internal_getpid(), (int)nthread);
854 if (nthread == 1) {
855 if (start_thread)
856 StartBackgroundThread();
857 } else {
858 // We've just forked a multi-threaded process. We cannot reasonably function
859 // after that (some mutexes may be locked before fork). So just enable
860 // ignores for everything in the hope that we will exec soon.
861 ctx->after_multithreaded_fork = true;
862 thr->ignore_interceptors++;
863 thr->suppress_reports++;
864 ThreadIgnoreBegin(thr, pc);
865 ThreadIgnoreSyncBegin(thr, pc);
866 }
867 }
868 #endif
869
870 #if SANITIZER_GO
871 NOINLINE
GrowShadowStack(ThreadState * thr)872 void GrowShadowStack(ThreadState *thr) {
873 const int sz = thr->shadow_stack_end - thr->shadow_stack;
874 const int newsz = 2 * sz;
875 auto *newstack = (uptr *)Alloc(newsz * sizeof(uptr));
876 internal_memcpy(newstack, thr->shadow_stack, sz * sizeof(uptr));
877 Free(thr->shadow_stack);
878 thr->shadow_stack = newstack;
879 thr->shadow_stack_pos = newstack + sz;
880 thr->shadow_stack_end = newstack + newsz;
881 }
882 #endif
883
CurrentStackId(ThreadState * thr,uptr pc)884 StackID CurrentStackId(ThreadState *thr, uptr pc) {
885 #if !SANITIZER_GO
886 if (!thr->is_inited) // May happen during bootstrap.
887 return kInvalidStackID;
888 #endif
889 if (pc != 0) {
890 #if !SANITIZER_GO
891 DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
892 #else
893 if (thr->shadow_stack_pos == thr->shadow_stack_end)
894 GrowShadowStack(thr);
895 #endif
896 thr->shadow_stack_pos[0] = pc;
897 thr->shadow_stack_pos++;
898 }
899 StackID id = StackDepotPut(
900 StackTrace(thr->shadow_stack, thr->shadow_stack_pos - thr->shadow_stack));
901 if (pc != 0)
902 thr->shadow_stack_pos--;
903 return id;
904 }
905
TraceSkipGap(ThreadState * thr)906 static bool TraceSkipGap(ThreadState* thr) {
907 Trace *trace = &thr->tctx->trace;
908 Event *pos = reinterpret_cast<Event *>(atomic_load_relaxed(&thr->trace_pos));
909 DCHECK_EQ(reinterpret_cast<uptr>(pos + 1) & TracePart::kAlignment, 0);
910 auto *part = trace->parts.Back();
911 DPrintf("#%d: TraceSwitchPart enter trace=%p parts=%p-%p pos=%p\n", thr->tid,
912 trace, trace->parts.Front(), part, pos);
913 if (!part)
914 return false;
915 // We can get here when we still have space in the current trace part.
916 // The fast-path check in TraceAcquire has false positives in the middle of
917 // the part. Check if we are indeed at the end of the current part or not,
918 // and fill any gaps with NopEvent's.
919 Event* end = &part->events[TracePart::kSize];
920 DCHECK_GE(pos, &part->events[0]);
921 DCHECK_LE(pos, end);
922 if (pos + 1 < end) {
923 if ((reinterpret_cast<uptr>(pos) & TracePart::kAlignment) ==
924 TracePart::kAlignment)
925 *pos++ = NopEvent;
926 *pos++ = NopEvent;
927 DCHECK_LE(pos + 2, end);
928 atomic_store_relaxed(&thr->trace_pos, reinterpret_cast<uptr>(pos));
929 return true;
930 }
931 // We are indeed at the end.
932 for (; pos < end; pos++) *pos = NopEvent;
933 return false;
934 }
935
936 NOINLINE
TraceSwitchPart(ThreadState * thr)937 void TraceSwitchPart(ThreadState* thr) {
938 if (TraceSkipGap(thr))
939 return;
940 #if !SANITIZER_GO
941 if (ctx->after_multithreaded_fork) {
942 // We just need to survive till exec.
943 TracePart* part = thr->tctx->trace.parts.Back();
944 if (part) {
945 atomic_store_relaxed(&thr->trace_pos,
946 reinterpret_cast<uptr>(&part->events[0]));
947 return;
948 }
949 }
950 #endif
951 TraceSwitchPartImpl(thr);
952 }
953
TraceSwitchPartImpl(ThreadState * thr)954 void TraceSwitchPartImpl(ThreadState* thr) {
955 SlotLocker locker(thr, true);
956 Trace* trace = &thr->tctx->trace;
957 TracePart* part = TracePartAlloc(thr);
958 part->trace = trace;
959 thr->trace_prev_pc = 0;
960 TracePart* recycle = nullptr;
961 // Keep roughly half of parts local to the thread
962 // (not queued into the recycle queue).
963 uptr local_parts = (Trace::kMinParts + flags()->history_size + 1) / 2;
964 {
965 Lock lock(&trace->mtx);
966 if (trace->parts.Empty())
967 trace->local_head = part;
968 if (trace->parts.Size() >= local_parts) {
969 recycle = trace->local_head;
970 trace->local_head = trace->parts.Next(recycle);
971 }
972 trace->parts.PushBack(part);
973 atomic_store_relaxed(&thr->trace_pos,
974 reinterpret_cast<uptr>(&part->events[0]));
975 }
976 // Make this part self-sufficient by restoring the current stack
977 // and mutex set in the beginning of the trace.
978 TraceTime(thr);
979 {
980 // Pathologically large stacks may not fit into the part.
981 // In these cases we log only fixed number of top frames.
982 const uptr kMaxFrames = 1000;
983 // Check that kMaxFrames won't consume the whole part.
984 static_assert(kMaxFrames < TracePart::kSize / 2, "kMaxFrames is too big");
985 uptr* pos = Max(&thr->shadow_stack[0], thr->shadow_stack_pos - kMaxFrames);
986 for (; pos < thr->shadow_stack_pos; pos++) {
987 if (TryTraceFunc(thr, *pos))
988 continue;
989 CHECK(TraceSkipGap(thr));
990 CHECK(TryTraceFunc(thr, *pos));
991 }
992 }
993 for (uptr i = 0; i < thr->mset.Size(); i++) {
994 MutexSet::Desc d = thr->mset.Get(i);
995 for (uptr i = 0; i < d.count; i++)
996 TraceMutexLock(thr, d.write ? EventType::kLock : EventType::kRLock, 0,
997 d.addr, d.stack_id);
998 }
999 // Callers of TraceSwitchPart expect that TraceAcquire will always succeed
1000 // after the call. It's possible that TryTraceFunc/TraceMutexLock above
1001 // filled the trace part exactly up to the TracePart::kAlignment gap
1002 // and the next TraceAcquire won't succeed. Skip the gap to avoid that.
1003 EventFunc *ev;
1004 if (!TraceAcquire(thr, &ev)) {
1005 CHECK(TraceSkipGap(thr));
1006 CHECK(TraceAcquire(thr, &ev));
1007 }
1008 {
1009 Lock lock(&ctx->slot_mtx);
1010 // There is a small chance that the slot may be not queued at this point.
1011 // This can happen if the slot has kEpochLast epoch and another thread
1012 // in FindSlotAndLock discovered that it's exhausted and removed it from
1013 // the slot queue. kEpochLast can happen in 2 cases: (1) if TraceSwitchPart
1014 // was called with the slot locked and epoch already at kEpochLast,
1015 // or (2) if we've acquired a new slot in SlotLock in the beginning
1016 // of the function and the slot was at kEpochLast - 1, so after increment
1017 // in SlotAttachAndLock it become kEpochLast.
1018 if (ctx->slot_queue.Queued(thr->slot)) {
1019 ctx->slot_queue.Remove(thr->slot);
1020 ctx->slot_queue.PushBack(thr->slot);
1021 }
1022 if (recycle)
1023 ctx->trace_part_recycle.PushBack(recycle);
1024 }
1025 DPrintf("#%d: TraceSwitchPart exit parts=%p-%p pos=0x%zx\n", thr->tid,
1026 trace->parts.Front(), trace->parts.Back(),
1027 atomic_load_relaxed(&thr->trace_pos));
1028 }
1029
ThreadIgnoreBegin(ThreadState * thr,uptr pc)1030 void ThreadIgnoreBegin(ThreadState* thr, uptr pc) {
1031 DPrintf("#%d: ThreadIgnoreBegin\n", thr->tid);
1032 thr->ignore_reads_and_writes++;
1033 CHECK_GT(thr->ignore_reads_and_writes, 0);
1034 thr->fast_state.SetIgnoreBit();
1035 #if !SANITIZER_GO
1036 if (pc && !ctx->after_multithreaded_fork)
1037 thr->mop_ignore_set.Add(CurrentStackId(thr, pc));
1038 #endif
1039 }
1040
ThreadIgnoreEnd(ThreadState * thr)1041 void ThreadIgnoreEnd(ThreadState *thr) {
1042 DPrintf("#%d: ThreadIgnoreEnd\n", thr->tid);
1043 CHECK_GT(thr->ignore_reads_and_writes, 0);
1044 thr->ignore_reads_and_writes--;
1045 if (thr->ignore_reads_and_writes == 0) {
1046 thr->fast_state.ClearIgnoreBit();
1047 #if !SANITIZER_GO
1048 thr->mop_ignore_set.Reset();
1049 #endif
1050 }
1051 }
1052
1053 #if !SANITIZER_GO
1054 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
__tsan_testonly_shadow_stack_current_size()1055 uptr __tsan_testonly_shadow_stack_current_size() {
1056 ThreadState *thr = cur_thread();
1057 return thr->shadow_stack_pos - thr->shadow_stack;
1058 }
1059 #endif
1060
ThreadIgnoreSyncBegin(ThreadState * thr,uptr pc)1061 void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc) {
1062 DPrintf("#%d: ThreadIgnoreSyncBegin\n", thr->tid);
1063 thr->ignore_sync++;
1064 CHECK_GT(thr->ignore_sync, 0);
1065 #if !SANITIZER_GO
1066 if (pc && !ctx->after_multithreaded_fork)
1067 thr->sync_ignore_set.Add(CurrentStackId(thr, pc));
1068 #endif
1069 }
1070
ThreadIgnoreSyncEnd(ThreadState * thr)1071 void ThreadIgnoreSyncEnd(ThreadState *thr) {
1072 DPrintf("#%d: ThreadIgnoreSyncEnd\n", thr->tid);
1073 CHECK_GT(thr->ignore_sync, 0);
1074 thr->ignore_sync--;
1075 #if !SANITIZER_GO
1076 if (thr->ignore_sync == 0)
1077 thr->sync_ignore_set.Reset();
1078 #endif
1079 }
1080
operator ==(const MD5Hash & other) const1081 bool MD5Hash::operator==(const MD5Hash &other) const {
1082 return hash[0] == other.hash[0] && hash[1] == other.hash[1];
1083 }
1084
1085 #if SANITIZER_DEBUG
build_consistency_debug()1086 void build_consistency_debug() {}
1087 #else
build_consistency_release()1088 void build_consistency_release() {}
1089 #endif
1090 } // namespace __tsan
1091
1092 #if SANITIZER_CHECK_DEADLOCKS
1093 namespace __sanitizer {
1094 using namespace __tsan;
1095 MutexMeta mutex_meta[] = {
1096 {MutexInvalid, "Invalid", {}},
1097 {MutexThreadRegistry,
1098 "ThreadRegistry",
1099 {MutexTypeSlots, MutexTypeTrace, MutexTypeReport}},
1100 {MutexTypeReport, "Report", {MutexTypeTrace}},
1101 {MutexTypeSyncVar, "SyncVar", {MutexTypeReport, MutexTypeTrace}},
1102 {MutexTypeAnnotations, "Annotations", {}},
1103 {MutexTypeAtExit, "AtExit", {}},
1104 {MutexTypeFired, "Fired", {MutexLeaf}},
1105 {MutexTypeRacy, "Racy", {MutexLeaf}},
1106 {MutexTypeGlobalProc, "GlobalProc", {MutexTypeSlot, MutexTypeSlots}},
1107 {MutexTypeInternalAlloc, "InternalAlloc", {MutexLeaf}},
1108 {MutexTypeTrace, "Trace", {}},
1109 {MutexTypeSlot,
1110 "Slot",
1111 {MutexMulti, MutexTypeTrace, MutexTypeSyncVar, MutexThreadRegistry,
1112 MutexTypeSlots}},
1113 {MutexTypeSlots, "Slots", {MutexTypeTrace, MutexTypeReport}},
1114 {},
1115 };
1116
PrintMutexPC(uptr pc)1117 void PrintMutexPC(uptr pc) { StackTrace(&pc, 1).Print(); }
1118
1119 } // namespace __sanitizer
1120 #endif
1121