1 //===-- hwasan_linux.cpp ----------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 ///
9 /// \file
10 /// This file is a part of HWAddressSanitizer and contains Linux-, NetBSD- and
11 /// FreeBSD-specific code.
12 ///
13 //===----------------------------------------------------------------------===//
14
15 #include "sanitizer_common/sanitizer_platform.h"
16 #if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD
17
18 # include <dlfcn.h>
19 # include <elf.h>
20 # include <errno.h>
21 # include <link.h>
22 # include <pthread.h>
23 # include <signal.h>
24 # include <stdio.h>
25 # include <stdlib.h>
26 # include <sys/prctl.h>
27 # include <sys/resource.h>
28 # include <sys/time.h>
29 # include <unistd.h>
30 # include <unwind.h>
31
32 # include "hwasan.h"
33 # include "hwasan_dynamic_shadow.h"
34 # include "hwasan_interface_internal.h"
35 # include "hwasan_mapping.h"
36 # include "hwasan_report.h"
37 # include "hwasan_thread.h"
38 # include "hwasan_thread_list.h"
39 # include "sanitizer_common/sanitizer_common.h"
40 # include "sanitizer_common/sanitizer_procmaps.h"
41 # include "sanitizer_common/sanitizer_stackdepot.h"
42
43 // Configurations of HWASAN_WITH_INTERCEPTORS and SANITIZER_ANDROID.
44 //
45 // HWASAN_WITH_INTERCEPTORS=OFF, SANITIZER_ANDROID=OFF
46 // Not currently tested.
47 // HWASAN_WITH_INTERCEPTORS=OFF, SANITIZER_ANDROID=ON
48 // Integration tests downstream exist.
49 // HWASAN_WITH_INTERCEPTORS=ON, SANITIZER_ANDROID=OFF
50 // Tested with check-hwasan on x86_64-linux.
51 // HWASAN_WITH_INTERCEPTORS=ON, SANITIZER_ANDROID=ON
52 // Tested with check-hwasan on aarch64-linux-android.
53 # if !SANITIZER_ANDROID
54 SANITIZER_INTERFACE_ATTRIBUTE
55 THREADLOCAL uptr __hwasan_tls;
56 # endif
57
58 namespace __hwasan {
59
60 // With the zero shadow base we can not actually map pages starting from 0.
61 // This constant is somewhat arbitrary.
62 constexpr uptr kZeroBaseShadowStart = 0;
63 constexpr uptr kZeroBaseMaxShadowStart = 1 << 18;
64
ProtectGap(uptr addr,uptr size)65 static void ProtectGap(uptr addr, uptr size) {
66 __sanitizer::ProtectGap(addr, size, kZeroBaseShadowStart,
67 kZeroBaseMaxShadowStart);
68 }
69
70 uptr kLowMemStart;
71 uptr kLowMemEnd;
72 uptr kHighMemStart;
73 uptr kHighMemEnd;
74
PrintRange(uptr start,uptr end,const char * name)75 static void PrintRange(uptr start, uptr end, const char *name) {
76 Printf("|| [%p, %p] || %.*s ||\n", (void *)start, (void *)end, 10, name);
77 }
78
PrintAddressSpaceLayout()79 static void PrintAddressSpaceLayout() {
80 PrintRange(kHighMemStart, kHighMemEnd, "HighMem");
81 if (kHighShadowEnd + 1 < kHighMemStart)
82 PrintRange(kHighShadowEnd + 1, kHighMemStart - 1, "ShadowGap");
83 else
84 CHECK_EQ(kHighShadowEnd + 1, kHighMemStart);
85 PrintRange(kHighShadowStart, kHighShadowEnd, "HighShadow");
86 if (kLowShadowEnd + 1 < kHighShadowStart)
87 PrintRange(kLowShadowEnd + 1, kHighShadowStart - 1, "ShadowGap");
88 else
89 CHECK_EQ(kLowMemEnd + 1, kHighShadowStart);
90 PrintRange(kLowShadowStart, kLowShadowEnd, "LowShadow");
91 if (kLowMemEnd + 1 < kLowShadowStart)
92 PrintRange(kLowMemEnd + 1, kLowShadowStart - 1, "ShadowGap");
93 else
94 CHECK_EQ(kLowMemEnd + 1, kLowShadowStart);
95 PrintRange(kLowMemStart, kLowMemEnd, "LowMem");
96 CHECK_EQ(0, kLowMemStart);
97 }
98
GetHighMemEnd()99 static uptr GetHighMemEnd() {
100 // HighMem covers the upper part of the address space.
101 uptr max_address = GetMaxUserVirtualAddress();
102 // Adjust max address to make sure that kHighMemEnd and kHighMemStart are
103 // properly aligned:
104 max_address |= (GetMmapGranularity() << kShadowScale) - 1;
105 return max_address;
106 }
107
InitializeShadowBaseAddress(uptr shadow_size_bytes)108 static void InitializeShadowBaseAddress(uptr shadow_size_bytes) {
109 __hwasan_shadow_memory_dynamic_address =
110 FindDynamicShadowStart(shadow_size_bytes);
111 }
112
InitializeOsSupport()113 void InitializeOsSupport() {
114 # define PR_SET_TAGGED_ADDR_CTRL 55
115 # define PR_GET_TAGGED_ADDR_CTRL 56
116 # define PR_TAGGED_ADDR_ENABLE (1UL << 0)
117 # define ARCH_GET_UNTAG_MASK 0x4001
118 # define ARCH_ENABLE_TAGGED_ADDR 0x4002
119 // Check we're running on a kernel that can use the tagged address ABI.
120 int local_errno = 0;
121 bool has_abi;
122 # if defined(__x86_64__)
123 has_abi = (internal_iserror(internal_arch_prctl(ARCH_GET_UNTAG_MASK, 0),
124 &local_errno) &&
125 local_errno == EINVAL);
126 # else
127 has_abi =
128 (internal_iserror(internal_prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0),
129 &local_errno) &&
130 local_errno == EINVAL);
131 # endif
132 if (has_abi) {
133 # if SANITIZER_ANDROID || defined(HWASAN_ALIASING_MODE)
134 // Some older Android kernels have the tagged pointer ABI on
135 // unconditionally, and hence don't have the tagged-addr prctl while still
136 // allow the ABI.
137 // If targeting Android and the prctl is not around we assume this is the
138 // case.
139 return;
140 # else
141 if (flags()->fail_without_syscall_abi) {
142 Printf(
143 "FATAL: "
144 "HWAddressSanitizer requires a kernel with tagged address ABI.\n");
145 Die();
146 }
147 # endif
148 }
149
150 // Turn on the tagged address ABI.
151 if ((internal_iserror(internal_prctl(PR_SET_TAGGED_ADDR_CTRL,
152 PR_TAGGED_ADDR_ENABLE, 0, 0, 0)) ||
153 !internal_prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0))) {
154 # if defined(__x86_64__) && !defined(HWASAN_ALIASING_MODE)
155 // Try the new prctl API for Intel LAM. The API is based on a currently
156 // unsubmitted patch to the Linux kernel (as of July 2022) and is thus
157 // subject to change. Patch is here:
158 // https://lore.kernel.org/linux-mm/[email protected]/
159 if (!internal_iserror(
160 internal_arch_prctl(ARCH_ENABLE_TAGGED_ADDR, kTagBits))) {
161 return;
162 }
163 # endif // defined(__x86_64__) && !defined(HWASAN_ALIASING_MODE)
164 if (flags()->fail_without_syscall_abi) {
165 Printf(
166 "FATAL: HWAddressSanitizer failed to enable tagged address syscall "
167 "ABI.\nSuggest check `sysctl abi.tagged_addr_disabled` "
168 "configuration.\n");
169 Die();
170 }
171 }
172 # undef PR_SET_TAGGED_ADDR_CTRL
173 # undef PR_GET_TAGGED_ADDR_CTRL
174 # undef PR_TAGGED_ADDR_ENABLE
175 }
176
InitShadow()177 bool InitShadow() {
178 // Define the entire memory range.
179 kHighMemEnd = GetHighMemEnd();
180
181 // Determine shadow memory base offset.
182 InitializeShadowBaseAddress(MemToShadowSize(kHighMemEnd));
183
184 // Place the low memory first.
185 kLowMemEnd = __hwasan_shadow_memory_dynamic_address - 1;
186 kLowMemStart = 0;
187
188 // Define the low shadow based on the already placed low memory.
189 kLowShadowEnd = MemToShadow(kLowMemEnd);
190 kLowShadowStart = __hwasan_shadow_memory_dynamic_address;
191
192 // High shadow takes whatever memory is left up there (making sure it is not
193 // interfering with low memory in the fixed case).
194 kHighShadowEnd = MemToShadow(kHighMemEnd);
195 kHighShadowStart = Max(kLowMemEnd, MemToShadow(kHighShadowEnd)) + 1;
196
197 // High memory starts where allocated shadow allows.
198 kHighMemStart = ShadowToMem(kHighShadowStart);
199
200 // Check the sanity of the defined memory ranges (there might be gaps).
201 CHECK_EQ(kHighMemStart % GetMmapGranularity(), 0);
202 CHECK_GT(kHighMemStart, kHighShadowEnd);
203 CHECK_GT(kHighShadowEnd, kHighShadowStart);
204 CHECK_GT(kHighShadowStart, kLowMemEnd);
205 CHECK_GT(kLowMemEnd, kLowMemStart);
206 CHECK_GT(kLowShadowEnd, kLowShadowStart);
207 CHECK_GT(kLowShadowStart, kLowMemEnd);
208
209 if (Verbosity())
210 PrintAddressSpaceLayout();
211
212 // Reserve shadow memory.
213 ReserveShadowMemoryRange(kLowShadowStart, kLowShadowEnd, "low shadow");
214 ReserveShadowMemoryRange(kHighShadowStart, kHighShadowEnd, "high shadow");
215
216 // Protect all the gaps.
217 ProtectGap(0, Min(kLowMemStart, kLowShadowStart));
218 if (kLowMemEnd + 1 < kLowShadowStart)
219 ProtectGap(kLowMemEnd + 1, kLowShadowStart - kLowMemEnd - 1);
220 if (kLowShadowEnd + 1 < kHighShadowStart)
221 ProtectGap(kLowShadowEnd + 1, kHighShadowStart - kLowShadowEnd - 1);
222 if (kHighShadowEnd + 1 < kHighMemStart)
223 ProtectGap(kHighShadowEnd + 1, kHighMemStart - kHighShadowEnd - 1);
224
225 return true;
226 }
227
InitThreads()228 void InitThreads() {
229 CHECK(__hwasan_shadow_memory_dynamic_address);
230 uptr guard_page_size = GetMmapGranularity();
231 uptr thread_space_start =
232 __hwasan_shadow_memory_dynamic_address - (1ULL << kShadowBaseAlignment);
233 uptr thread_space_end =
234 __hwasan_shadow_memory_dynamic_address - guard_page_size;
235 ReserveShadowMemoryRange(thread_space_start, thread_space_end - 1,
236 "hwasan threads", /*madvise_shadow*/ false);
237 ProtectGap(thread_space_end,
238 __hwasan_shadow_memory_dynamic_address - thread_space_end);
239 InitThreadList(thread_space_start, thread_space_end - thread_space_start);
240 hwasanThreadList().CreateCurrentThread();
241 }
242
MemIsApp(uptr p)243 bool MemIsApp(uptr p) {
244 // Memory outside the alias range has non-zero tags.
245 # if !defined(HWASAN_ALIASING_MODE)
246 CHECK(GetTagFromPointer(p) == 0);
247 # endif
248
249 return (p >= kHighMemStart && p <= kHighMemEnd) ||
250 (p >= kLowMemStart && p <= kLowMemEnd);
251 }
252
InstallAtExitHandler()253 void InstallAtExitHandler() { atexit(HwasanAtExit); }
254
255 // ---------------------- TSD ---------------- {{{1
256
__hwasan_thread_enter()257 extern "C" void __hwasan_thread_enter() {
258 hwasanThreadList().CreateCurrentThread()->EnsureRandomStateInited();
259 }
260
__hwasan_thread_exit()261 extern "C" void __hwasan_thread_exit() {
262 Thread *t = GetCurrentThread();
263 // Make sure that signal handler can not see a stale current thread pointer.
264 atomic_signal_fence(memory_order_seq_cst);
265 if (t)
266 hwasanThreadList().ReleaseThread(t);
267 }
268
269 # if HWASAN_WITH_INTERCEPTORS
270 static pthread_key_t tsd_key;
271 static bool tsd_key_inited = false;
272
HwasanTSDThreadInit()273 void HwasanTSDThreadInit() {
274 if (tsd_key_inited)
275 CHECK_EQ(0, pthread_setspecific(tsd_key,
276 (void *)GetPthreadDestructorIterations()));
277 }
278
HwasanTSDDtor(void * tsd)279 void HwasanTSDDtor(void *tsd) {
280 uptr iterations = (uptr)tsd;
281 if (iterations > 1) {
282 CHECK_EQ(0, pthread_setspecific(tsd_key, (void *)(iterations - 1)));
283 return;
284 }
285 __hwasan_thread_exit();
286 }
287
HwasanTSDInit()288 void HwasanTSDInit() {
289 CHECK(!tsd_key_inited);
290 tsd_key_inited = true;
291 CHECK_EQ(0, pthread_key_create(&tsd_key, HwasanTSDDtor));
292 }
293 # else
HwasanTSDInit()294 void HwasanTSDInit() {}
HwasanTSDThreadInit()295 void HwasanTSDThreadInit() {}
296 # endif
297
298 # if SANITIZER_ANDROID
GetCurrentThreadLongPtr()299 uptr *GetCurrentThreadLongPtr() { return (uptr *)get_android_tls_ptr(); }
300 # else
GetCurrentThreadLongPtr()301 uptr *GetCurrentThreadLongPtr() { return &__hwasan_tls; }
302 # endif
303
304 # if SANITIZER_ANDROID
AndroidTestTlsSlot()305 void AndroidTestTlsSlot() {
306 uptr kMagicValue = 0x010203040A0B0C0D;
307 uptr *tls_ptr = GetCurrentThreadLongPtr();
308 uptr old_value = *tls_ptr;
309 *tls_ptr = kMagicValue;
310 dlerror();
311 if (*(uptr *)get_android_tls_ptr() != kMagicValue) {
312 Printf(
313 "ERROR: Incompatible version of Android: TLS_SLOT_SANITIZER(6) is used "
314 "for dlerror().\n");
315 Die();
316 }
317 *tls_ptr = old_value;
318 }
319 # else
AndroidTestTlsSlot()320 void AndroidTestTlsSlot() {}
321 # endif
322
GetAccessInfo(siginfo_t * info,ucontext_t * uc)323 static AccessInfo GetAccessInfo(siginfo_t *info, ucontext_t *uc) {
324 // Access type is passed in a platform dependent way (see below) and encoded
325 // as 0xXY, where X&1 is 1 for store, 0 for load, and X&2 is 1 if the error is
326 // recoverable. Valid values of Y are 0 to 4, which are interpreted as
327 // log2(access_size), and 0xF, which means that access size is passed via
328 // platform dependent register (see below).
329 # if defined(__aarch64__)
330 // Access type is encoded in BRK immediate as 0x900 + 0xXY. For Y == 0xF,
331 // access size is stored in X1 register. Access address is always in X0
332 // register.
333 uptr pc = (uptr)info->si_addr;
334 const unsigned code = ((*(u32 *)pc) >> 5) & 0xffff;
335 if ((code & 0xff00) != 0x900)
336 return AccessInfo{}; // Not ours.
337
338 const bool is_store = code & 0x10;
339 const bool recover = code & 0x20;
340 const uptr addr = uc->uc_mcontext.regs[0];
341 const unsigned size_log = code & 0xf;
342 if (size_log > 4 && size_log != 0xf)
343 return AccessInfo{}; // Not ours.
344 const uptr size = size_log == 0xf ? uc->uc_mcontext.regs[1] : 1U << size_log;
345
346 # elif defined(__x86_64__)
347 // Access type is encoded in the instruction following INT3 as
348 // NOP DWORD ptr [EAX + 0x40 + 0xXY]. For Y == 0xF, access size is stored in
349 // RSI register. Access address is always in RDI register.
350 uptr pc = (uptr)uc->uc_mcontext.gregs[REG_RIP];
351 uint8_t *nop = (uint8_t *)pc;
352 if (*nop != 0x0f || *(nop + 1) != 0x1f || *(nop + 2) != 0x40 ||
353 *(nop + 3) < 0x40)
354 return AccessInfo{}; // Not ours.
355 const unsigned code = *(nop + 3);
356
357 const bool is_store = code & 0x10;
358 const bool recover = code & 0x20;
359 const uptr addr = uc->uc_mcontext.gregs[REG_RDI];
360 const unsigned size_log = code & 0xf;
361 if (size_log > 4 && size_log != 0xf)
362 return AccessInfo{}; // Not ours.
363 const uptr size =
364 size_log == 0xf ? uc->uc_mcontext.gregs[REG_RSI] : 1U << size_log;
365
366 # else
367 # error Unsupported architecture
368 # endif
369
370 return AccessInfo{addr, size, is_store, !is_store, recover};
371 }
372
HwasanOnSIGTRAP(int signo,siginfo_t * info,ucontext_t * uc)373 static bool HwasanOnSIGTRAP(int signo, siginfo_t *info, ucontext_t *uc) {
374 AccessInfo ai = GetAccessInfo(info, uc);
375 if (!ai.is_store && !ai.is_load)
376 return false;
377
378 SignalContext sig{info, uc};
379 HandleTagMismatch(ai, StackTrace::GetNextInstructionPc(sig.pc), sig.bp, uc);
380
381 # if defined(__aarch64__)
382 uc->uc_mcontext.pc += 4;
383 # elif defined(__x86_64__)
384 # else
385 # error Unsupported architecture
386 # endif
387 return true;
388 }
389
OnStackUnwind(const SignalContext & sig,const void *,BufferedStackTrace * stack)390 static void OnStackUnwind(const SignalContext &sig, const void *,
391 BufferedStackTrace *stack) {
392 stack->Unwind(StackTrace::GetNextInstructionPc(sig.pc), sig.bp, sig.context,
393 common_flags()->fast_unwind_on_fatal);
394 }
395
HwasanOnDeadlySignal(int signo,void * info,void * context)396 void HwasanOnDeadlySignal(int signo, void *info, void *context) {
397 // Probably a tag mismatch.
398 if (signo == SIGTRAP)
399 if (HwasanOnSIGTRAP(signo, (siginfo_t *)info, (ucontext_t *)context))
400 return;
401
402 HandleDeadlySignal(info, context, GetTid(), &OnStackUnwind, nullptr);
403 }
404
InitStackAndTls(const InitState *)405 void Thread::InitStackAndTls(const InitState *) {
406 uptr tls_size;
407 uptr stack_size;
408 GetThreadStackAndTls(IsMainThread(), &stack_bottom_, &stack_size, &tls_begin_,
409 &tls_size);
410 stack_top_ = stack_bottom_ + stack_size;
411 tls_end_ = tls_begin_ + tls_size;
412 }
413
TagMemoryAligned(uptr p,uptr size,tag_t tag)414 uptr TagMemoryAligned(uptr p, uptr size, tag_t tag) {
415 CHECK(IsAligned(p, kShadowAlignment));
416 CHECK(IsAligned(size, kShadowAlignment));
417 uptr shadow_start = MemToShadow(p);
418 uptr shadow_size = MemToShadowSize(size);
419
420 uptr page_size = GetPageSizeCached();
421 uptr page_start = RoundUpTo(shadow_start, page_size);
422 uptr page_end = RoundDownTo(shadow_start + shadow_size, page_size);
423 uptr threshold = common_flags()->clear_shadow_mmap_threshold;
424 if (SANITIZER_LINUX &&
425 UNLIKELY(page_end >= page_start + threshold && tag == 0)) {
426 internal_memset((void *)shadow_start, tag, page_start - shadow_start);
427 internal_memset((void *)page_end, tag,
428 shadow_start + shadow_size - page_end);
429 // For an anonymous private mapping MADV_DONTNEED will return a zero page on
430 // Linux.
431 ReleaseMemoryPagesToOSAndZeroFill(page_start, page_end);
432 } else {
433 internal_memset((void *)shadow_start, tag, shadow_size);
434 }
435 return AddTagToPointer(p, tag);
436 }
437
HwasanInstallAtForkHandler()438 void HwasanInstallAtForkHandler() {
439 auto before = []() {
440 HwasanAllocatorLock();
441 StackDepotLockAll();
442 };
443 auto after = []() {
444 StackDepotUnlockAll();
445 HwasanAllocatorUnlock();
446 };
447 pthread_atfork(before, after, after);
448 }
449
450 } // namespace __hwasan
451
452 #endif // SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD
453