1 //===-- tsan_shadow_test.cpp ----------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of ThreadSanitizer (TSan), a race detector.
10 //
11 //===----------------------------------------------------------------------===//
12 #include "tsan_platform.h"
13 #include "tsan_rtl.h"
14 #include "gtest/gtest.h"
15
16 namespace __tsan {
17
CheckShadow(const Shadow * s,Sid sid,Epoch epoch,uptr addr,uptr size,AccessType typ)18 void CheckShadow(const Shadow *s, Sid sid, Epoch epoch, uptr addr, uptr size,
19 AccessType typ) {
20 uptr addr1 = 0;
21 uptr size1 = 0;
22 AccessType typ1 = 0;
23 s->GetAccess(&addr1, &size1, &typ1);
24 CHECK_EQ(s->sid(), sid);
25 CHECK_EQ(s->epoch(), epoch);
26 CHECK_EQ(addr1, addr);
27 CHECK_EQ(size1, size);
28 CHECK_EQ(typ1, typ);
29 }
30
TEST(Shadow,Shadow)31 TEST(Shadow, Shadow) {
32 Sid sid = static_cast<Sid>(11);
33 Epoch epoch = static_cast<Epoch>(22);
34 FastState fs;
35 fs.SetSid(sid);
36 fs.SetEpoch(epoch);
37 CHECK_EQ(fs.sid(), sid);
38 CHECK_EQ(fs.epoch(), epoch);
39 CHECK_EQ(fs.GetIgnoreBit(), false);
40 fs.SetIgnoreBit();
41 CHECK_EQ(fs.GetIgnoreBit(), true);
42 fs.ClearIgnoreBit();
43 CHECK_EQ(fs.GetIgnoreBit(), false);
44
45 Shadow s0(fs, 1, 2, kAccessWrite);
46 CheckShadow(&s0, sid, epoch, 1, 2, kAccessWrite);
47 Shadow s1(fs, 2, 3, kAccessRead);
48 CheckShadow(&s1, sid, epoch, 2, 3, kAccessRead);
49 Shadow s2(fs, 0xfffff8 + 4, 1, kAccessWrite | kAccessAtomic);
50 CheckShadow(&s2, sid, epoch, 4, 1, kAccessWrite | kAccessAtomic);
51 Shadow s3(fs, 0xfffff8 + 0, 8, kAccessRead | kAccessAtomic);
52 CheckShadow(&s3, sid, epoch, 0, 8, kAccessRead | kAccessAtomic);
53
54 CHECK(!s0.IsBothReadsOrAtomic(kAccessRead | kAccessAtomic));
55 CHECK(!s1.IsBothReadsOrAtomic(kAccessAtomic));
56 CHECK(!s1.IsBothReadsOrAtomic(kAccessWrite));
57 CHECK(s1.IsBothReadsOrAtomic(kAccessRead));
58 CHECK(s2.IsBothReadsOrAtomic(kAccessAtomic));
59 CHECK(!s2.IsBothReadsOrAtomic(kAccessWrite));
60 CHECK(!s2.IsBothReadsOrAtomic(kAccessRead));
61 CHECK(s3.IsBothReadsOrAtomic(kAccessAtomic));
62 CHECK(!s3.IsBothReadsOrAtomic(kAccessWrite));
63 CHECK(s3.IsBothReadsOrAtomic(kAccessRead));
64
65 CHECK(!s0.IsRWWeakerOrEqual(kAccessRead | kAccessAtomic));
66 CHECK(s1.IsRWWeakerOrEqual(kAccessWrite));
67 CHECK(s1.IsRWWeakerOrEqual(kAccessRead));
68 CHECK(!s1.IsRWWeakerOrEqual(kAccessWrite | kAccessAtomic));
69
70 CHECK(!s2.IsRWWeakerOrEqual(kAccessRead | kAccessAtomic));
71 CHECK(s2.IsRWWeakerOrEqual(kAccessWrite | kAccessAtomic));
72 CHECK(s2.IsRWWeakerOrEqual(kAccessRead));
73 CHECK(s2.IsRWWeakerOrEqual(kAccessWrite));
74
75 CHECK(s3.IsRWWeakerOrEqual(kAccessRead | kAccessAtomic));
76 CHECK(s3.IsRWWeakerOrEqual(kAccessWrite | kAccessAtomic));
77 CHECK(s3.IsRWWeakerOrEqual(kAccessRead));
78 CHECK(s3.IsRWWeakerOrEqual(kAccessWrite));
79
80 Shadow sro(Shadow::kRodata);
81 CheckShadow(&sro, static_cast<Sid>(0), kEpochZero, 0, 0, kAccessRead);
82 }
83
TEST(Shadow,Mapping)84 TEST(Shadow, Mapping) {
85 static int global;
86 int stack;
87 void *heap = malloc(0);
88 free(heap);
89
90 CHECK(IsAppMem((uptr)&global));
91 CHECK(IsAppMem((uptr)&stack));
92 CHECK(IsAppMem((uptr)heap));
93
94 CHECK(IsShadowMem(MemToShadow((uptr)&global)));
95 CHECK(IsShadowMem(MemToShadow((uptr)&stack)));
96 CHECK(IsShadowMem(MemToShadow((uptr)heap)));
97 }
98
TEST(Shadow,Celling)99 TEST(Shadow, Celling) {
100 u64 aligned_data[4];
101 char *data = (char*)aligned_data;
102 CHECK(IsAligned(reinterpret_cast<uptr>(data), kShadowSize));
103 RawShadow *s0 = MemToShadow((uptr)&data[0]);
104 CHECK(IsAligned(reinterpret_cast<uptr>(s0), kShadowSize));
105 for (unsigned i = 1; i < kShadowCell; i++)
106 CHECK_EQ(s0, MemToShadow((uptr)&data[i]));
107 for (unsigned i = kShadowCell; i < 2*kShadowCell; i++)
108 CHECK_EQ(s0 + kShadowCnt, MemToShadow((uptr)&data[i]));
109 for (unsigned i = 2*kShadowCell; i < 3*kShadowCell; i++)
110 CHECK_EQ(s0 + 2 * kShadowCnt, MemToShadow((uptr)&data[i]));
111 }
112
113 // Detect is the Mapping has kBroken field.
114 template <uptr>
115 struct Has {
116 typedef bool Result;
117 };
118
119 template <typename Mapping>
broken(...)120 bool broken(...) {
121 return false;
122 }
123
124 template <typename Mapping>
broken(uptr what,typename Has<Mapping::kBroken>::Result=false)125 bool broken(uptr what, typename Has<Mapping::kBroken>::Result = false) {
126 return Mapping::kBroken & what;
127 }
128
129 struct MappingTest {
130 template <typename Mapping>
Apply__tsan::MappingTest131 static void Apply() {
132 // Easy (but ugly) way to print the mapping name.
133 Printf("%s\n", __PRETTY_FUNCTION__);
134 TestRegion<Mapping>(Mapping::kLoAppMemBeg, Mapping::kLoAppMemEnd);
135 TestRegion<Mapping>(Mapping::kMidAppMemBeg, Mapping::kMidAppMemEnd);
136 TestRegion<Mapping>(Mapping::kHiAppMemBeg, Mapping::kHiAppMemEnd);
137 TestRegion<Mapping>(Mapping::kHeapMemBeg, Mapping::kHeapMemEnd);
138 }
139
140 template <typename Mapping>
TestRegion__tsan::MappingTest141 static void TestRegion(uptr beg, uptr end) {
142 if (beg == end)
143 return;
144 Printf("checking region [0x%zx-0x%zx)\n", beg, end);
145 uptr prev = 0;
146 for (uptr p0 = beg; p0 <= end; p0 += (end - beg) / 256) {
147 for (int x = -(int)kShadowCell; x <= (int)kShadowCell; x += kShadowCell) {
148 const uptr p = RoundDown(p0 + x, kShadowCell);
149 if (p < beg || p >= end)
150 continue;
151 const uptr s = MemToShadowImpl::Apply<Mapping>(p);
152 u32 *const m = MemToMetaImpl::Apply<Mapping>(p);
153 const uptr r = ShadowToMemImpl::Apply<Mapping>(s);
154 Printf(" addr=0x%zx: shadow=0x%zx meta=%p reverse=0x%zx\n", p, s, m,
155 r);
156 CHECK(IsAppMemImpl::Apply<Mapping>(p));
157 if (!broken<Mapping>(kBrokenMapping))
158 CHECK(IsShadowMemImpl::Apply<Mapping>(s));
159 CHECK(IsMetaMemImpl::Apply<Mapping>(reinterpret_cast<uptr>(m)));
160 CHECK_EQ(p, RestoreAddrImpl::Apply<Mapping>(CompressAddr(p)));
161 if (!broken<Mapping>(kBrokenReverseMapping))
162 CHECK_EQ(p, r);
163 if (prev && !broken<Mapping>(kBrokenLinearity)) {
164 // Ensure that shadow and meta mappings are linear within a single
165 // user range. Lots of code that processes memory ranges assumes it.
166 const uptr prev_s = MemToShadowImpl::Apply<Mapping>(prev);
167 u32 *const prev_m = MemToMetaImpl::Apply<Mapping>(prev);
168 CHECK_EQ(s - prev_s, (p - prev) * kShadowMultiplier);
169 CHECK_EQ(m - prev_m, (p - prev) / kMetaShadowCell);
170 }
171 prev = p;
172 }
173 }
174 }
175 };
176
TEST(Shadow,AllMappings)177 TEST(Shadow, AllMappings) { ForEachMapping<MappingTest>(); }
178
179 } // namespace __tsan
180