1ae1fc9baSNico Weber //=-- lsan_allocator.cpp --------------------------------------------------===//
2ae1fc9baSNico Weber //
3ae1fc9baSNico Weber // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4ae1fc9baSNico Weber // See https://llvm.org/LICENSE.txt for license information.
5ae1fc9baSNico Weber // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6ae1fc9baSNico Weber //
7ae1fc9baSNico Weber //===----------------------------------------------------------------------===//
8ae1fc9baSNico Weber //
9ae1fc9baSNico Weber // This file is a part of LeakSanitizer.
10ae1fc9baSNico Weber // See lsan_allocator.h for details.
11ae1fc9baSNico Weber //
12ae1fc9baSNico Weber //===----------------------------------------------------------------------===//
13ae1fc9baSNico Weber
14ae1fc9baSNico Weber #include "lsan_allocator.h"
15ae1fc9baSNico Weber
16ae1fc9baSNico Weber #include "sanitizer_common/sanitizer_allocator.h"
17ae1fc9baSNico Weber #include "sanitizer_common/sanitizer_allocator_checks.h"
18ae1fc9baSNico Weber #include "sanitizer_common/sanitizer_allocator_interface.h"
19ae1fc9baSNico Weber #include "sanitizer_common/sanitizer_allocator_report.h"
20ae1fc9baSNico Weber #include "sanitizer_common/sanitizer_errno.h"
21ae1fc9baSNico Weber #include "sanitizer_common/sanitizer_internal_defs.h"
22ae1fc9baSNico Weber #include "sanitizer_common/sanitizer_stackdepot.h"
23ae1fc9baSNico Weber #include "sanitizer_common/sanitizer_stacktrace.h"
24ae1fc9baSNico Weber #include "lsan_common.h"
25ae1fc9baSNico Weber
26ae1fc9baSNico Weber extern "C" void *memset(void *ptr, int value, uptr num);
27ae1fc9baSNico Weber
28ae1fc9baSNico Weber namespace __lsan {
29ae1fc9baSNico Weber #if defined(__i386__) || defined(__arm__)
3033e3554eSClemens Wasser static const uptr kMaxAllowedMallocSize = 1ULL << 30;
31ae1fc9baSNico Weber #elif defined(__mips64) || defined(__aarch64__)
3233e3554eSClemens Wasser static const uptr kMaxAllowedMallocSize = 4ULL << 30;
33ae1fc9baSNico Weber #else
3433e3554eSClemens Wasser static const uptr kMaxAllowedMallocSize = 8ULL << 30;
35ae1fc9baSNico Weber #endif
36ae1fc9baSNico Weber
37ae1fc9baSNico Weber static Allocator allocator;
38ae1fc9baSNico Weber
397904bd94SMatt Morehouse static uptr max_malloc_size;
407904bd94SMatt Morehouse
InitializeAllocator()41ae1fc9baSNico Weber void InitializeAllocator() {
42ae1fc9baSNico Weber SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
43ae1fc9baSNico Weber allocator.InitLinkerInitialized(
44ae1fc9baSNico Weber common_flags()->allocator_release_to_os_interval_ms);
457904bd94SMatt Morehouse if (common_flags()->max_allocation_size_mb)
467904bd94SMatt Morehouse max_malloc_size = Min(common_flags()->max_allocation_size_mb << 20,
477904bd94SMatt Morehouse kMaxAllowedMallocSize);
487904bd94SMatt Morehouse else
497904bd94SMatt Morehouse max_malloc_size = kMaxAllowedMallocSize;
50ae1fc9baSNico Weber }
51ae1fc9baSNico Weber
AllocatorThreadFinish()52ae1fc9baSNico Weber void AllocatorThreadFinish() {
53ae1fc9baSNico Weber allocator.SwallowCache(GetAllocatorCache());
54ae1fc9baSNico Weber }
55ae1fc9baSNico Weber
Metadata(const void * p)56ae1fc9baSNico Weber static ChunkMetadata *Metadata(const void *p) {
57ae1fc9baSNico Weber return reinterpret_cast<ChunkMetadata *>(allocator.GetMetaData(p));
58ae1fc9baSNico Weber }
59ae1fc9baSNico Weber
RegisterAllocation(const StackTrace & stack,void * p,uptr size)60ae1fc9baSNico Weber static void RegisterAllocation(const StackTrace &stack, void *p, uptr size) {
61ae1fc9baSNico Weber if (!p) return;
62ae1fc9baSNico Weber ChunkMetadata *m = Metadata(p);
63ae1fc9baSNico Weber CHECK(m);
64ae1fc9baSNico Weber m->tag = DisabledInThisThread() ? kIgnored : kDirectlyLeaked;
65ae1fc9baSNico Weber m->stack_trace_id = StackDepotPut(stack);
66ae1fc9baSNico Weber m->requested_size = size;
67ae1fc9baSNico Weber atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 1, memory_order_relaxed);
68ae1fc9baSNico Weber }
69ae1fc9baSNico Weber
RegisterDeallocation(void * p)70ae1fc9baSNico Weber static void RegisterDeallocation(void *p) {
71ae1fc9baSNico Weber if (!p) return;
72ae1fc9baSNico Weber ChunkMetadata *m = Metadata(p);
73ae1fc9baSNico Weber CHECK(m);
74ae1fc9baSNico Weber atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 0, memory_order_relaxed);
75ae1fc9baSNico Weber }
76ae1fc9baSNico Weber
ReportAllocationSizeTooBig(uptr size,const StackTrace & stack)77ae1fc9baSNico Weber static void *ReportAllocationSizeTooBig(uptr size, const StackTrace &stack) {
78ae1fc9baSNico Weber if (AllocatorMayReturnNull()) {
79ae1fc9baSNico Weber Report("WARNING: LeakSanitizer failed to allocate 0x%zx bytes\n", size);
80ae1fc9baSNico Weber return nullptr;
81ae1fc9baSNico Weber }
827904bd94SMatt Morehouse ReportAllocationSizeTooBig(size, max_malloc_size, &stack);
83ae1fc9baSNico Weber }
84ae1fc9baSNico Weber
Allocate(const StackTrace & stack,uptr size,uptr alignment,bool cleared)85ae1fc9baSNico Weber void *Allocate(const StackTrace &stack, uptr size, uptr alignment,
86ae1fc9baSNico Weber bool cleared) {
87ae1fc9baSNico Weber if (size == 0)
88ae1fc9baSNico Weber size = 1;
897904bd94SMatt Morehouse if (size > max_malloc_size)
90ae1fc9baSNico Weber return ReportAllocationSizeTooBig(size, stack);
9163180012SVitaly Buka if (UNLIKELY(IsRssLimitExceeded())) {
9263180012SVitaly Buka if (AllocatorMayReturnNull())
9363180012SVitaly Buka return nullptr;
9463180012SVitaly Buka ReportRssLimitExceeded(&stack);
9563180012SVitaly Buka }
96ae1fc9baSNico Weber void *p = allocator.Allocate(GetAllocatorCache(), size, alignment);
97ae1fc9baSNico Weber if (UNLIKELY(!p)) {
98ae1fc9baSNico Weber SetAllocatorOutOfMemory();
99ae1fc9baSNico Weber if (AllocatorMayReturnNull())
100ae1fc9baSNico Weber return nullptr;
101ae1fc9baSNico Weber ReportOutOfMemory(size, &stack);
102ae1fc9baSNico Weber }
103ae1fc9baSNico Weber // Do not rely on the allocator to clear the memory (it's slow).
104ae1fc9baSNico Weber if (cleared && allocator.FromPrimary(p))
105ae1fc9baSNico Weber memset(p, 0, size);
106ae1fc9baSNico Weber RegisterAllocation(stack, p, size);
107ae1fc9baSNico Weber RunMallocHooks(p, size);
108ae1fc9baSNico Weber return p;
109ae1fc9baSNico Weber }
110ae1fc9baSNico Weber
Calloc(uptr nmemb,uptr size,const StackTrace & stack)111ae1fc9baSNico Weber static void *Calloc(uptr nmemb, uptr size, const StackTrace &stack) {
112ae1fc9baSNico Weber if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
113ae1fc9baSNico Weber if (AllocatorMayReturnNull())
114ae1fc9baSNico Weber return nullptr;
115ae1fc9baSNico Weber ReportCallocOverflow(nmemb, size, &stack);
116ae1fc9baSNico Weber }
117ae1fc9baSNico Weber size *= nmemb;
118ae1fc9baSNico Weber return Allocate(stack, size, 1, true);
119ae1fc9baSNico Weber }
120ae1fc9baSNico Weber
Deallocate(void * p)121ae1fc9baSNico Weber void Deallocate(void *p) {
122ae1fc9baSNico Weber RunFreeHooks(p);
123ae1fc9baSNico Weber RegisterDeallocation(p);
124ae1fc9baSNico Weber allocator.Deallocate(GetAllocatorCache(), p);
125ae1fc9baSNico Weber }
126ae1fc9baSNico Weber
Reallocate(const StackTrace & stack,void * p,uptr new_size,uptr alignment)127ae1fc9baSNico Weber void *Reallocate(const StackTrace &stack, void *p, uptr new_size,
128ae1fc9baSNico Weber uptr alignment) {
1297904bd94SMatt Morehouse if (new_size > max_malloc_size) {
1301daa48f0SFangrui Song ReportAllocationSizeTooBig(new_size, stack);
1311daa48f0SFangrui Song return nullptr;
132ae1fc9baSNico Weber }
1331daa48f0SFangrui Song RegisterDeallocation(p);
1341daa48f0SFangrui Song void *new_p =
1351daa48f0SFangrui Song allocator.Reallocate(GetAllocatorCache(), p, new_size, alignment);
1361daa48f0SFangrui Song if (new_p)
1371daa48f0SFangrui Song RegisterAllocation(stack, new_p, new_size);
1381daa48f0SFangrui Song else if (new_size != 0)
139ae1fc9baSNico Weber RegisterAllocation(stack, p, new_size);
1401daa48f0SFangrui Song return new_p;
141ae1fc9baSNico Weber }
142ae1fc9baSNico Weber
GetAllocatorCacheRange(uptr * begin,uptr * end)143ae1fc9baSNico Weber void GetAllocatorCacheRange(uptr *begin, uptr *end) {
144ae1fc9baSNico Weber *begin = (uptr)GetAllocatorCache();
145ae1fc9baSNico Weber *end = *begin + sizeof(AllocatorCache);
146ae1fc9baSNico Weber }
147ae1fc9baSNico Weber
GetMallocUsableSize(const void * p)148ae1fc9baSNico Weber uptr GetMallocUsableSize(const void *p) {
149*7788b0c0SVitaly Buka if (!p)
150*7788b0c0SVitaly Buka return 0;
151ae1fc9baSNico Weber ChunkMetadata *m = Metadata(p);
152ae1fc9baSNico Weber if (!m) return 0;
153ae1fc9baSNico Weber return m->requested_size;
154ae1fc9baSNico Weber }
155ae1fc9baSNico Weber
lsan_posix_memalign(void ** memptr,uptr alignment,uptr size,const StackTrace & stack)156ae1fc9baSNico Weber int lsan_posix_memalign(void **memptr, uptr alignment, uptr size,
157ae1fc9baSNico Weber const StackTrace &stack) {
158ae1fc9baSNico Weber if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) {
159ae1fc9baSNico Weber if (AllocatorMayReturnNull())
160ae1fc9baSNico Weber return errno_EINVAL;
161ae1fc9baSNico Weber ReportInvalidPosixMemalignAlignment(alignment, &stack);
162ae1fc9baSNico Weber }
163ae1fc9baSNico Weber void *ptr = Allocate(stack, size, alignment, kAlwaysClearMemory);
164ae1fc9baSNico Weber if (UNLIKELY(!ptr))
165ae1fc9baSNico Weber // OOM error is already taken care of by Allocate.
166ae1fc9baSNico Weber return errno_ENOMEM;
167ae1fc9baSNico Weber CHECK(IsAligned((uptr)ptr, alignment));
168ae1fc9baSNico Weber *memptr = ptr;
169ae1fc9baSNico Weber return 0;
170ae1fc9baSNico Weber }
171ae1fc9baSNico Weber
lsan_aligned_alloc(uptr alignment,uptr size,const StackTrace & stack)172ae1fc9baSNico Weber void *lsan_aligned_alloc(uptr alignment, uptr size, const StackTrace &stack) {
173ae1fc9baSNico Weber if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) {
174ae1fc9baSNico Weber errno = errno_EINVAL;
175ae1fc9baSNico Weber if (AllocatorMayReturnNull())
176ae1fc9baSNico Weber return nullptr;
177ae1fc9baSNico Weber ReportInvalidAlignedAllocAlignment(size, alignment, &stack);
178ae1fc9baSNico Weber }
179ae1fc9baSNico Weber return SetErrnoOnNull(Allocate(stack, size, alignment, kAlwaysClearMemory));
180ae1fc9baSNico Weber }
181ae1fc9baSNico Weber
lsan_memalign(uptr alignment,uptr size,const StackTrace & stack)182ae1fc9baSNico Weber void *lsan_memalign(uptr alignment, uptr size, const StackTrace &stack) {
183ae1fc9baSNico Weber if (UNLIKELY(!IsPowerOfTwo(alignment))) {
184ae1fc9baSNico Weber errno = errno_EINVAL;
185ae1fc9baSNico Weber if (AllocatorMayReturnNull())
186ae1fc9baSNico Weber return nullptr;
187ae1fc9baSNico Weber ReportInvalidAllocationAlignment(alignment, &stack);
188ae1fc9baSNico Weber }
189ae1fc9baSNico Weber return SetErrnoOnNull(Allocate(stack, size, alignment, kAlwaysClearMemory));
190ae1fc9baSNico Weber }
191ae1fc9baSNico Weber
lsan_malloc(uptr size,const StackTrace & stack)192ae1fc9baSNico Weber void *lsan_malloc(uptr size, const StackTrace &stack) {
193ae1fc9baSNico Weber return SetErrnoOnNull(Allocate(stack, size, 1, kAlwaysClearMemory));
194ae1fc9baSNico Weber }
195ae1fc9baSNico Weber
lsan_free(void * p)196ae1fc9baSNico Weber void lsan_free(void *p) {
197ae1fc9baSNico Weber Deallocate(p);
198ae1fc9baSNico Weber }
199ae1fc9baSNico Weber
lsan_realloc(void * p,uptr size,const StackTrace & stack)200ae1fc9baSNico Weber void *lsan_realloc(void *p, uptr size, const StackTrace &stack) {
201ae1fc9baSNico Weber return SetErrnoOnNull(Reallocate(stack, p, size, 1));
202ae1fc9baSNico Weber }
203ae1fc9baSNico Weber
lsan_reallocarray(void * ptr,uptr nmemb,uptr size,const StackTrace & stack)204ae1fc9baSNico Weber void *lsan_reallocarray(void *ptr, uptr nmemb, uptr size,
205ae1fc9baSNico Weber const StackTrace &stack) {
206ae1fc9baSNico Weber if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
207ae1fc9baSNico Weber errno = errno_ENOMEM;
208ae1fc9baSNico Weber if (AllocatorMayReturnNull())
209ae1fc9baSNico Weber return nullptr;
210ae1fc9baSNico Weber ReportReallocArrayOverflow(nmemb, size, &stack);
211ae1fc9baSNico Weber }
212ae1fc9baSNico Weber return lsan_realloc(ptr, nmemb * size, stack);
213ae1fc9baSNico Weber }
214ae1fc9baSNico Weber
lsan_calloc(uptr nmemb,uptr size,const StackTrace & stack)215ae1fc9baSNico Weber void *lsan_calloc(uptr nmemb, uptr size, const StackTrace &stack) {
216ae1fc9baSNico Weber return SetErrnoOnNull(Calloc(nmemb, size, stack));
217ae1fc9baSNico Weber }
218ae1fc9baSNico Weber
lsan_valloc(uptr size,const StackTrace & stack)219ae1fc9baSNico Weber void *lsan_valloc(uptr size, const StackTrace &stack) {
220ae1fc9baSNico Weber return SetErrnoOnNull(
221ae1fc9baSNico Weber Allocate(stack, size, GetPageSizeCached(), kAlwaysClearMemory));
222ae1fc9baSNico Weber }
223ae1fc9baSNico Weber
lsan_pvalloc(uptr size,const StackTrace & stack)224ae1fc9baSNico Weber void *lsan_pvalloc(uptr size, const StackTrace &stack) {
225ae1fc9baSNico Weber uptr PageSize = GetPageSizeCached();
226ae1fc9baSNico Weber if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) {
227ae1fc9baSNico Weber errno = errno_ENOMEM;
228ae1fc9baSNico Weber if (AllocatorMayReturnNull())
229ae1fc9baSNico Weber return nullptr;
230ae1fc9baSNico Weber ReportPvallocOverflow(size, &stack);
231ae1fc9baSNico Weber }
232ae1fc9baSNico Weber // pvalloc(0) should allocate one page.
233ae1fc9baSNico Weber size = size ? RoundUpTo(size, PageSize) : PageSize;
234ae1fc9baSNico Weber return SetErrnoOnNull(Allocate(stack, size, PageSize, kAlwaysClearMemory));
235ae1fc9baSNico Weber }
236ae1fc9baSNico Weber
lsan_mz_size(const void * p)237ae1fc9baSNico Weber uptr lsan_mz_size(const void *p) {
238ae1fc9baSNico Weber return GetMallocUsableSize(p);
239ae1fc9baSNico Weber }
240ae1fc9baSNico Weber
241ae1fc9baSNico Weber ///// Interface to the common LSan module. /////
242ae1fc9baSNico Weber
LockAllocator()243ae1fc9baSNico Weber void LockAllocator() {
244ae1fc9baSNico Weber allocator.ForceLock();
245ae1fc9baSNico Weber }
246ae1fc9baSNico Weber
UnlockAllocator()247ae1fc9baSNico Weber void UnlockAllocator() {
248ae1fc9baSNico Weber allocator.ForceUnlock();
249ae1fc9baSNico Weber }
250ae1fc9baSNico Weber
GetAllocatorGlobalRange(uptr * begin,uptr * end)251ae1fc9baSNico Weber void GetAllocatorGlobalRange(uptr *begin, uptr *end) {
252ae1fc9baSNico Weber *begin = (uptr)&allocator;
253ae1fc9baSNico Weber *end = *begin + sizeof(allocator);
254ae1fc9baSNico Weber }
255ae1fc9baSNico Weber
PointsIntoChunk(void * p)256ae1fc9baSNico Weber uptr PointsIntoChunk(void* p) {
257ae1fc9baSNico Weber uptr addr = reinterpret_cast<uptr>(p);
258ae1fc9baSNico Weber uptr chunk = reinterpret_cast<uptr>(allocator.GetBlockBeginFastLocked(p));
259ae1fc9baSNico Weber if (!chunk) return 0;
260ae1fc9baSNico Weber // LargeMmapAllocator considers pointers to the meta-region of a chunk to be
261ae1fc9baSNico Weber // valid, but we don't want that.
262ae1fc9baSNico Weber if (addr < chunk) return 0;
263ae1fc9baSNico Weber ChunkMetadata *m = Metadata(reinterpret_cast<void *>(chunk));
264ae1fc9baSNico Weber CHECK(m);
265ae1fc9baSNico Weber if (!m->allocated)
266ae1fc9baSNico Weber return 0;
267ae1fc9baSNico Weber if (addr < chunk + m->requested_size)
268ae1fc9baSNico Weber return chunk;
269ae1fc9baSNico Weber if (IsSpecialCaseOfOperatorNew0(chunk, m->requested_size, addr))
270ae1fc9baSNico Weber return chunk;
271ae1fc9baSNico Weber return 0;
272ae1fc9baSNico Weber }
273ae1fc9baSNico Weber
GetUserBegin(uptr chunk)274ae1fc9baSNico Weber uptr GetUserBegin(uptr chunk) {
275ae1fc9baSNico Weber return chunk;
276ae1fc9baSNico Weber }
277ae1fc9baSNico Weber
LsanMetadata(uptr chunk)278ae1fc9baSNico Weber LsanMetadata::LsanMetadata(uptr chunk) {
279ae1fc9baSNico Weber metadata_ = Metadata(reinterpret_cast<void *>(chunk));
280ae1fc9baSNico Weber CHECK(metadata_);
281ae1fc9baSNico Weber }
282ae1fc9baSNico Weber
allocated() const283ae1fc9baSNico Weber bool LsanMetadata::allocated() const {
284ae1fc9baSNico Weber return reinterpret_cast<ChunkMetadata *>(metadata_)->allocated;
285ae1fc9baSNico Weber }
286ae1fc9baSNico Weber
tag() const287ae1fc9baSNico Weber ChunkTag LsanMetadata::tag() const {
288ae1fc9baSNico Weber return reinterpret_cast<ChunkMetadata *>(metadata_)->tag;
289ae1fc9baSNico Weber }
290ae1fc9baSNico Weber
set_tag(ChunkTag value)291ae1fc9baSNico Weber void LsanMetadata::set_tag(ChunkTag value) {
292ae1fc9baSNico Weber reinterpret_cast<ChunkMetadata *>(metadata_)->tag = value;
293ae1fc9baSNico Weber }
294ae1fc9baSNico Weber
requested_size() const295ae1fc9baSNico Weber uptr LsanMetadata::requested_size() const {
296ae1fc9baSNico Weber return reinterpret_cast<ChunkMetadata *>(metadata_)->requested_size;
297ae1fc9baSNico Weber }
298ae1fc9baSNico Weber
stack_trace_id() const299ae1fc9baSNico Weber u32 LsanMetadata::stack_trace_id() const {
300ae1fc9baSNico Weber return reinterpret_cast<ChunkMetadata *>(metadata_)->stack_trace_id;
301ae1fc9baSNico Weber }
302ae1fc9baSNico Weber
ForEachChunk(ForEachChunkCallback callback,void * arg)303ae1fc9baSNico Weber void ForEachChunk(ForEachChunkCallback callback, void *arg) {
304ae1fc9baSNico Weber allocator.ForEachChunk(callback, arg);
305ae1fc9baSNico Weber }
306ae1fc9baSNico Weber
IgnoreObjectLocked(const void * p)307ae1fc9baSNico Weber IgnoreObjectResult IgnoreObjectLocked(const void *p) {
308ae1fc9baSNico Weber void *chunk = allocator.GetBlockBegin(p);
309ae1fc9baSNico Weber if (!chunk || p < chunk) return kIgnoreObjectInvalid;
310ae1fc9baSNico Weber ChunkMetadata *m = Metadata(chunk);
311ae1fc9baSNico Weber CHECK(m);
312ae1fc9baSNico Weber if (m->allocated && (uptr)p < (uptr)chunk + m->requested_size) {
313ae1fc9baSNico Weber if (m->tag == kIgnored)
314ae1fc9baSNico Weber return kIgnoreObjectAlreadyIgnored;
315ae1fc9baSNico Weber m->tag = kIgnored;
316ae1fc9baSNico Weber return kIgnoreObjectSuccess;
317ae1fc9baSNico Weber } else {
318ae1fc9baSNico Weber return kIgnoreObjectInvalid;
319ae1fc9baSNico Weber }
320ae1fc9baSNico Weber }
321dd922bc2SDan Liew
GetAdditionalThreadContextPtrs(ThreadContextBase * tctx,void * ptrs)322dd922bc2SDan Liew void GetAdditionalThreadContextPtrs(ThreadContextBase *tctx, void *ptrs) {
323dd922bc2SDan Liew // This function can be used to treat memory reachable from `tctx` as live.
324dd922bc2SDan Liew // This is useful for threads that have been created but not yet started.
325dd922bc2SDan Liew
326dd922bc2SDan Liew // This is currently a no-op because the LSan `pthread_create()` interceptor
327dd922bc2SDan Liew // blocks until the child thread starts which keeps the thread's `arg` pointer
328dd922bc2SDan Liew // live.
329dd922bc2SDan Liew }
330dd922bc2SDan Liew
331ae1fc9baSNico Weber } // namespace __lsan
332ae1fc9baSNico Weber
333ae1fc9baSNico Weber using namespace __lsan;
334ae1fc9baSNico Weber
335ae1fc9baSNico Weber extern "C" {
336ae1fc9baSNico Weber SANITIZER_INTERFACE_ATTRIBUTE
__sanitizer_get_current_allocated_bytes()337ae1fc9baSNico Weber uptr __sanitizer_get_current_allocated_bytes() {
338ae1fc9baSNico Weber uptr stats[AllocatorStatCount];
339ae1fc9baSNico Weber allocator.GetStats(stats);
340ae1fc9baSNico Weber return stats[AllocatorStatAllocated];
341ae1fc9baSNico Weber }
342ae1fc9baSNico Weber
343ae1fc9baSNico Weber SANITIZER_INTERFACE_ATTRIBUTE
__sanitizer_get_heap_size()344ae1fc9baSNico Weber uptr __sanitizer_get_heap_size() {
345ae1fc9baSNico Weber uptr stats[AllocatorStatCount];
346ae1fc9baSNico Weber allocator.GetStats(stats);
347ae1fc9baSNico Weber return stats[AllocatorStatMapped];
348ae1fc9baSNico Weber }
349ae1fc9baSNico Weber
350ae1fc9baSNico Weber SANITIZER_INTERFACE_ATTRIBUTE
__sanitizer_get_free_bytes()351ae1fc9baSNico Weber uptr __sanitizer_get_free_bytes() { return 0; }
352ae1fc9baSNico Weber
353ae1fc9baSNico Weber SANITIZER_INTERFACE_ATTRIBUTE
__sanitizer_get_unmapped_bytes()354ae1fc9baSNico Weber uptr __sanitizer_get_unmapped_bytes() { return 0; }
355ae1fc9baSNico Weber
356ae1fc9baSNico Weber SANITIZER_INTERFACE_ATTRIBUTE
__sanitizer_get_estimated_allocated_size(uptr size)357ae1fc9baSNico Weber uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }
358ae1fc9baSNico Weber
359ae1fc9baSNico Weber SANITIZER_INTERFACE_ATTRIBUTE
__sanitizer_get_ownership(const void * p)360ae1fc9baSNico Weber int __sanitizer_get_ownership(const void *p) { return Metadata(p) != nullptr; }
361ae1fc9baSNico Weber
362ae1fc9baSNico Weber SANITIZER_INTERFACE_ATTRIBUTE
__sanitizer_get_allocated_size(const void * p)363ae1fc9baSNico Weber uptr __sanitizer_get_allocated_size(const void *p) {
364ae1fc9baSNico Weber return GetMallocUsableSize(p);
365ae1fc9baSNico Weber }
366ae1fc9baSNico Weber
367ae1fc9baSNico Weber } // extern "C"
368