1 //=-- lsan_allocator.cpp --------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of LeakSanitizer.
10 // See lsan_allocator.h for details.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "lsan_allocator.h"
15 
16 #include "sanitizer_common/sanitizer_allocator.h"
17 #include "sanitizer_common/sanitizer_allocator_checks.h"
18 #include "sanitizer_common/sanitizer_allocator_interface.h"
19 #include "sanitizer_common/sanitizer_allocator_report.h"
20 #include "sanitizer_common/sanitizer_errno.h"
21 #include "sanitizer_common/sanitizer_internal_defs.h"
22 #include "sanitizer_common/sanitizer_stackdepot.h"
23 #include "sanitizer_common/sanitizer_stacktrace.h"
24 #include "lsan_common.h"
25 
26 extern "C" void *memset(void *ptr, int value, uptr num);
27 
28 namespace __lsan {
29 #if defined(__i386__) || defined(__arm__)
30 static const uptr kMaxAllowedMallocSize = 1ULL << 30;
31 #elif defined(__mips64) || defined(__aarch64__)
32 static const uptr kMaxAllowedMallocSize = 4ULL << 30;
33 #else
34 static const uptr kMaxAllowedMallocSize = 8ULL << 30;
35 #endif
36 
37 static Allocator allocator;
38 
39 static uptr max_malloc_size;
40 
41 void InitializeAllocator() {
42   SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
43   allocator.InitLinkerInitialized(
44       common_flags()->allocator_release_to_os_interval_ms);
45   if (common_flags()->max_allocation_size_mb)
46     max_malloc_size = Min(common_flags()->max_allocation_size_mb << 20,
47                           kMaxAllowedMallocSize);
48   else
49     max_malloc_size = kMaxAllowedMallocSize;
50 }
51 
52 void AllocatorThreadFinish() {
53   allocator.SwallowCache(GetAllocatorCache());
54 }
55 
56 static ChunkMetadata *Metadata(const void *p) {
57   return reinterpret_cast<ChunkMetadata *>(allocator.GetMetaData(p));
58 }
59 
60 static void RegisterAllocation(const StackTrace &stack, void *p, uptr size) {
61   if (!p) return;
62   ChunkMetadata *m = Metadata(p);
63   CHECK(m);
64   m->tag = DisabledInThisThread() ? kIgnored : kDirectlyLeaked;
65   m->stack_trace_id = StackDepotPut(stack);
66   m->requested_size = size;
67   atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 1, memory_order_relaxed);
68 }
69 
70 static void RegisterDeallocation(void *p) {
71   if (!p) return;
72   ChunkMetadata *m = Metadata(p);
73   CHECK(m);
74   atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 0, memory_order_relaxed);
75 }
76 
77 static void *ReportAllocationSizeTooBig(uptr size, const StackTrace &stack) {
78   if (AllocatorMayReturnNull()) {
79     Report("WARNING: LeakSanitizer failed to allocate 0x%zx bytes\n", size);
80     return nullptr;
81   }
82   ReportAllocationSizeTooBig(size, max_malloc_size, &stack);
83 }
84 
85 void *Allocate(const StackTrace &stack, uptr size, uptr alignment,
86                bool cleared) {
87   if (size == 0)
88     size = 1;
89   if (size > max_malloc_size)
90     return ReportAllocationSizeTooBig(size, stack);
91   if (UNLIKELY(IsRssLimitExceeded())) {
92     if (AllocatorMayReturnNull())
93       return nullptr;
94     ReportRssLimitExceeded(&stack);
95   }
96   void *p = allocator.Allocate(GetAllocatorCache(), size, alignment);
97   if (UNLIKELY(!p)) {
98     SetAllocatorOutOfMemory();
99     if (AllocatorMayReturnNull())
100       return nullptr;
101     ReportOutOfMemory(size, &stack);
102   }
103   // Do not rely on the allocator to clear the memory (it's slow).
104   if (cleared && allocator.FromPrimary(p))
105     memset(p, 0, size);
106   RegisterAllocation(stack, p, size);
107   RunMallocHooks(p, size);
108   return p;
109 }
110 
111 static void *Calloc(uptr nmemb, uptr size, const StackTrace &stack) {
112   if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
113     if (AllocatorMayReturnNull())
114       return nullptr;
115     ReportCallocOverflow(nmemb, size, &stack);
116   }
117   size *= nmemb;
118   return Allocate(stack, size, 1, true);
119 }
120 
121 void Deallocate(void *p) {
122   RunFreeHooks(p);
123   RegisterDeallocation(p);
124   allocator.Deallocate(GetAllocatorCache(), p);
125 }
126 
127 void *Reallocate(const StackTrace &stack, void *p, uptr new_size,
128                  uptr alignment) {
129   if (new_size > max_malloc_size) {
130     ReportAllocationSizeTooBig(new_size, stack);
131     return nullptr;
132   }
133   RegisterDeallocation(p);
134   void *new_p =
135       allocator.Reallocate(GetAllocatorCache(), p, new_size, alignment);
136   if (new_p)
137     RegisterAllocation(stack, new_p, new_size);
138   else if (new_size != 0)
139     RegisterAllocation(stack, p, new_size);
140   return new_p;
141 }
142 
143 void GetAllocatorCacheRange(uptr *begin, uptr *end) {
144   *begin = (uptr)GetAllocatorCache();
145   *end = *begin + sizeof(AllocatorCache);
146 }
147 
148 uptr GetMallocUsableSize(const void *p) {
149   ChunkMetadata *m = Metadata(p);
150   if (!m) return 0;
151   return m->requested_size;
152 }
153 
154 int lsan_posix_memalign(void **memptr, uptr alignment, uptr size,
155                         const StackTrace &stack) {
156   if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) {
157     if (AllocatorMayReturnNull())
158       return errno_EINVAL;
159     ReportInvalidPosixMemalignAlignment(alignment, &stack);
160   }
161   void *ptr = Allocate(stack, size, alignment, kAlwaysClearMemory);
162   if (UNLIKELY(!ptr))
163     // OOM error is already taken care of by Allocate.
164     return errno_ENOMEM;
165   CHECK(IsAligned((uptr)ptr, alignment));
166   *memptr = ptr;
167   return 0;
168 }
169 
170 void *lsan_aligned_alloc(uptr alignment, uptr size, const StackTrace &stack) {
171   if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) {
172     errno = errno_EINVAL;
173     if (AllocatorMayReturnNull())
174       return nullptr;
175     ReportInvalidAlignedAllocAlignment(size, alignment, &stack);
176   }
177   return SetErrnoOnNull(Allocate(stack, size, alignment, kAlwaysClearMemory));
178 }
179 
180 void *lsan_memalign(uptr alignment, uptr size, const StackTrace &stack) {
181   if (UNLIKELY(!IsPowerOfTwo(alignment))) {
182     errno = errno_EINVAL;
183     if (AllocatorMayReturnNull())
184       return nullptr;
185     ReportInvalidAllocationAlignment(alignment, &stack);
186   }
187   return SetErrnoOnNull(Allocate(stack, size, alignment, kAlwaysClearMemory));
188 }
189 
190 void *lsan_malloc(uptr size, const StackTrace &stack) {
191   return SetErrnoOnNull(Allocate(stack, size, 1, kAlwaysClearMemory));
192 }
193 
194 void lsan_free(void *p) {
195   Deallocate(p);
196 }
197 
198 void *lsan_realloc(void *p, uptr size, const StackTrace &stack) {
199   return SetErrnoOnNull(Reallocate(stack, p, size, 1));
200 }
201 
202 void *lsan_reallocarray(void *ptr, uptr nmemb, uptr size,
203                         const StackTrace &stack) {
204   if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
205     errno = errno_ENOMEM;
206     if (AllocatorMayReturnNull())
207       return nullptr;
208     ReportReallocArrayOverflow(nmemb, size, &stack);
209   }
210   return lsan_realloc(ptr, nmemb * size, stack);
211 }
212 
213 void *lsan_calloc(uptr nmemb, uptr size, const StackTrace &stack) {
214   return SetErrnoOnNull(Calloc(nmemb, size, stack));
215 }
216 
217 void *lsan_valloc(uptr size, const StackTrace &stack) {
218   return SetErrnoOnNull(
219       Allocate(stack, size, GetPageSizeCached(), kAlwaysClearMemory));
220 }
221 
222 void *lsan_pvalloc(uptr size, const StackTrace &stack) {
223   uptr PageSize = GetPageSizeCached();
224   if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) {
225     errno = errno_ENOMEM;
226     if (AllocatorMayReturnNull())
227       return nullptr;
228     ReportPvallocOverflow(size, &stack);
229   }
230   // pvalloc(0) should allocate one page.
231   size = size ? RoundUpTo(size, PageSize) : PageSize;
232   return SetErrnoOnNull(Allocate(stack, size, PageSize, kAlwaysClearMemory));
233 }
234 
235 uptr lsan_mz_size(const void *p) {
236   return GetMallocUsableSize(p);
237 }
238 
239 ///// Interface to the common LSan module. /////
240 
241 void LockAllocator() {
242   allocator.ForceLock();
243 }
244 
245 void UnlockAllocator() {
246   allocator.ForceUnlock();
247 }
248 
249 void GetAllocatorGlobalRange(uptr *begin, uptr *end) {
250   *begin = (uptr)&allocator;
251   *end = *begin + sizeof(allocator);
252 }
253 
254 uptr PointsIntoChunk(void* p) {
255   uptr addr = reinterpret_cast<uptr>(p);
256   uptr chunk = reinterpret_cast<uptr>(allocator.GetBlockBeginFastLocked(p));
257   if (!chunk) return 0;
258   // LargeMmapAllocator considers pointers to the meta-region of a chunk to be
259   // valid, but we don't want that.
260   if (addr < chunk) return 0;
261   ChunkMetadata *m = Metadata(reinterpret_cast<void *>(chunk));
262   CHECK(m);
263   if (!m->allocated)
264     return 0;
265   if (addr < chunk + m->requested_size)
266     return chunk;
267   if (IsSpecialCaseOfOperatorNew0(chunk, m->requested_size, addr))
268     return chunk;
269   return 0;
270 }
271 
272 uptr GetUserBegin(uptr chunk) {
273   return chunk;
274 }
275 
276 LsanMetadata::LsanMetadata(uptr chunk) {
277   metadata_ = Metadata(reinterpret_cast<void *>(chunk));
278   CHECK(metadata_);
279 }
280 
281 bool LsanMetadata::allocated() const {
282   return reinterpret_cast<ChunkMetadata *>(metadata_)->allocated;
283 }
284 
285 ChunkTag LsanMetadata::tag() const {
286   return reinterpret_cast<ChunkMetadata *>(metadata_)->tag;
287 }
288 
289 void LsanMetadata::set_tag(ChunkTag value) {
290   reinterpret_cast<ChunkMetadata *>(metadata_)->tag = value;
291 }
292 
293 uptr LsanMetadata::requested_size() const {
294   return reinterpret_cast<ChunkMetadata *>(metadata_)->requested_size;
295 }
296 
297 u32 LsanMetadata::stack_trace_id() const {
298   return reinterpret_cast<ChunkMetadata *>(metadata_)->stack_trace_id;
299 }
300 
301 void ForEachChunk(ForEachChunkCallback callback, void *arg) {
302   allocator.ForEachChunk(callback, arg);
303 }
304 
305 IgnoreObjectResult IgnoreObjectLocked(const void *p) {
306   void *chunk = allocator.GetBlockBegin(p);
307   if (!chunk || p < chunk) return kIgnoreObjectInvalid;
308   ChunkMetadata *m = Metadata(chunk);
309   CHECK(m);
310   if (m->allocated && (uptr)p < (uptr)chunk + m->requested_size) {
311     if (m->tag == kIgnored)
312       return kIgnoreObjectAlreadyIgnored;
313     m->tag = kIgnored;
314     return kIgnoreObjectSuccess;
315   } else {
316     return kIgnoreObjectInvalid;
317   }
318 }
319 
320 void GetAdditionalThreadContextPtrs(ThreadContextBase *tctx, void *ptrs) {
321   // This function can be used to treat memory reachable from `tctx` as live.
322   // This is useful for threads that have been created but not yet started.
323 
324   // This is currently a no-op because the LSan `pthread_create()` interceptor
325   // blocks until the child thread starts which keeps the thread's `arg` pointer
326   // live.
327 }
328 
329 } // namespace __lsan
330 
331 using namespace __lsan;
332 
333 extern "C" {
334 SANITIZER_INTERFACE_ATTRIBUTE
335 uptr __sanitizer_get_current_allocated_bytes() {
336   uptr stats[AllocatorStatCount];
337   allocator.GetStats(stats);
338   return stats[AllocatorStatAllocated];
339 }
340 
341 SANITIZER_INTERFACE_ATTRIBUTE
342 uptr __sanitizer_get_heap_size() {
343   uptr stats[AllocatorStatCount];
344   allocator.GetStats(stats);
345   return stats[AllocatorStatMapped];
346 }
347 
348 SANITIZER_INTERFACE_ATTRIBUTE
349 uptr __sanitizer_get_free_bytes() { return 0; }
350 
351 SANITIZER_INTERFACE_ATTRIBUTE
352 uptr __sanitizer_get_unmapped_bytes() { return 0; }
353 
354 SANITIZER_INTERFACE_ATTRIBUTE
355 uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }
356 
357 SANITIZER_INTERFACE_ATTRIBUTE
358 int __sanitizer_get_ownership(const void *p) { return Metadata(p) != nullptr; }
359 
360 SANITIZER_INTERFACE_ATTRIBUTE
361 uptr __sanitizer_get_allocated_size(const void *p) {
362   return GetMallocUsableSize(p);
363 }
364 
365 } // extern "C"
366