//===-- tsan_shadow_test.cpp ----------------------------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file is a part of ThreadSanitizer (TSan), a race detector. // //===----------------------------------------------------------------------===// #include "tsan_platform.h" #include "tsan_rtl.h" #include "gtest/gtest.h" namespace __tsan { void CheckShadow(const Shadow *s, Sid sid, Epoch epoch, uptr addr, uptr size, AccessType typ) { uptr addr1 = 0; uptr size1 = 0; AccessType typ1 = 0; s->GetAccess(&addr1, &size1, &typ1); CHECK_EQ(s->sid(), sid); CHECK_EQ(s->epoch(), epoch); CHECK_EQ(addr1, addr); CHECK_EQ(size1, size); CHECK_EQ(typ1, typ); } TEST(Shadow, Shadow) { Sid sid = static_cast(11); Epoch epoch = static_cast(22); FastState fs; fs.SetSid(sid); fs.SetEpoch(epoch); CHECK_EQ(fs.sid(), sid); CHECK_EQ(fs.epoch(), epoch); CHECK_EQ(fs.GetIgnoreBit(), false); fs.SetIgnoreBit(); CHECK_EQ(fs.GetIgnoreBit(), true); fs.ClearIgnoreBit(); CHECK_EQ(fs.GetIgnoreBit(), false); Shadow s0(fs, 1, 2, kAccessWrite); CheckShadow(&s0, sid, epoch, 1, 2, kAccessWrite); Shadow s1(fs, 2, 3, kAccessRead); CheckShadow(&s1, sid, epoch, 2, 3, kAccessRead); Shadow s2(fs, 0xfffff8 + 4, 1, kAccessWrite | kAccessAtomic); CheckShadow(&s2, sid, epoch, 4, 1, kAccessWrite | kAccessAtomic); Shadow s3(fs, 0xfffff8 + 0, 8, kAccessRead | kAccessAtomic); CheckShadow(&s3, sid, epoch, 0, 8, kAccessRead | kAccessAtomic); CHECK(!s0.IsBothReadsOrAtomic(kAccessRead | kAccessAtomic)); CHECK(!s1.IsBothReadsOrAtomic(kAccessAtomic)); CHECK(!s1.IsBothReadsOrAtomic(kAccessWrite)); CHECK(s1.IsBothReadsOrAtomic(kAccessRead)); CHECK(s2.IsBothReadsOrAtomic(kAccessAtomic)); CHECK(!s2.IsBothReadsOrAtomic(kAccessWrite)); CHECK(!s2.IsBothReadsOrAtomic(kAccessRead)); CHECK(s3.IsBothReadsOrAtomic(kAccessAtomic)); CHECK(!s3.IsBothReadsOrAtomic(kAccessWrite)); CHECK(s3.IsBothReadsOrAtomic(kAccessRead)); CHECK(!s0.IsRWWeakerOrEqual(kAccessRead | kAccessAtomic)); CHECK(s1.IsRWWeakerOrEqual(kAccessWrite)); CHECK(s1.IsRWWeakerOrEqual(kAccessRead)); CHECK(!s1.IsRWWeakerOrEqual(kAccessWrite | kAccessAtomic)); CHECK(!s2.IsRWWeakerOrEqual(kAccessRead | kAccessAtomic)); CHECK(s2.IsRWWeakerOrEqual(kAccessWrite | kAccessAtomic)); CHECK(s2.IsRWWeakerOrEqual(kAccessRead)); CHECK(s2.IsRWWeakerOrEqual(kAccessWrite)); CHECK(s3.IsRWWeakerOrEqual(kAccessRead | kAccessAtomic)); CHECK(s3.IsRWWeakerOrEqual(kAccessWrite | kAccessAtomic)); CHECK(s3.IsRWWeakerOrEqual(kAccessRead)); CHECK(s3.IsRWWeakerOrEqual(kAccessWrite)); Shadow sro(Shadow::kRodata); CheckShadow(&sro, static_cast(0), kEpochZero, 0, 0, kAccessRead); } TEST(Shadow, Mapping) { static int global; int stack; void *heap = malloc(0); free(heap); CHECK(IsAppMem((uptr)&global)); CHECK(IsAppMem((uptr)&stack)); CHECK(IsAppMem((uptr)heap)); CHECK(IsShadowMem(MemToShadow((uptr)&global))); CHECK(IsShadowMem(MemToShadow((uptr)&stack))); CHECK(IsShadowMem(MemToShadow((uptr)heap))); } TEST(Shadow, Celling) { u64 aligned_data[4]; char *data = (char*)aligned_data; CHECK(IsAligned(reinterpret_cast(data), kShadowSize)); RawShadow *s0 = MemToShadow((uptr)&data[0]); CHECK(IsAligned(reinterpret_cast(s0), kShadowSize)); for (unsigned i = 1; i < kShadowCell; i++) CHECK_EQ(s0, MemToShadow((uptr)&data[i])); for (unsigned i = kShadowCell; i < 2*kShadowCell; i++) CHECK_EQ(s0 + kShadowCnt, MemToShadow((uptr)&data[i])); for (unsigned i = 2*kShadowCell; i < 3*kShadowCell; i++) CHECK_EQ(s0 + 2 * kShadowCnt, MemToShadow((uptr)&data[i])); } // Detect is the Mapping has kBroken field. template struct Has { typedef bool Result; }; template bool broken(...) { return false; } template bool broken(uptr what, typename Has::Result = false) { return Mapping::kBroken & what; } struct MappingTest { template static void Apply() { // Easy (but ugly) way to print the mapping name. Printf("%s\n", __PRETTY_FUNCTION__); TestRegion(Mapping::kLoAppMemBeg, Mapping::kLoAppMemEnd); TestRegion(Mapping::kMidAppMemBeg, Mapping::kMidAppMemEnd); TestRegion(Mapping::kHiAppMemBeg, Mapping::kHiAppMemEnd); TestRegion(Mapping::kHeapMemBeg, Mapping::kHeapMemEnd); } template static void TestRegion(uptr beg, uptr end) { if (beg == end) return; Printf("checking region [0x%zx-0x%zx)\n", beg, end); uptr prev = 0; for (uptr p0 = beg; p0 <= end; p0 += (end - beg) / 256) { for (int x = -(int)kShadowCell; x <= (int)kShadowCell; x += kShadowCell) { const uptr p = RoundDown(p0 + x, kShadowCell); if (p < beg || p >= end) continue; const uptr s = MemToShadowImpl::Apply(p); u32 *const m = MemToMetaImpl::Apply(p); const uptr r = ShadowToMemImpl::Apply(s); Printf(" addr=0x%zx: shadow=0x%zx meta=%p reverse=0x%zx\n", p, s, m, r); CHECK(IsAppMemImpl::Apply(p)); if (!broken(kBrokenMapping)) CHECK(IsShadowMemImpl::Apply(s)); CHECK(IsMetaMemImpl::Apply(reinterpret_cast(m))); CHECK_EQ(p, RestoreAddrImpl::Apply(CompressAddr(p))); if (!broken(kBrokenReverseMapping)) CHECK_EQ(p, r); if (prev && !broken(kBrokenLinearity)) { // Ensure that shadow and meta mappings are linear within a single // user range. Lots of code that processes memory ranges assumes it. const uptr prev_s = MemToShadowImpl::Apply(prev); u32 *const prev_m = MemToMetaImpl::Apply(prev); CHECK_EQ(s - prev_s, (p - prev) * kShadowMultiplier); CHECK_EQ(m - prev_m, (p - prev) / kMetaShadowCell); } prev = p; } } } }; TEST(Shadow, AllMappings) { ForEachMapping(); } } // namespace __tsan