13d4bba30STeresa Johnson //===-- memprof_allocator.cpp --------------------------------------------===//
23d4bba30STeresa Johnson //
33d4bba30STeresa Johnson // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
43d4bba30STeresa Johnson // See https://llvm.org/LICENSE.txt for license information.
53d4bba30STeresa Johnson // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
63d4bba30STeresa Johnson //
73d4bba30STeresa Johnson //===----------------------------------------------------------------------===//
83d4bba30STeresa Johnson //
93d4bba30STeresa Johnson // This file is a part of MemProfiler, a memory profiler.
103d4bba30STeresa Johnson //
113d4bba30STeresa Johnson // Implementation of MemProf's memory allocator, which uses the allocator
123d4bba30STeresa Johnson // from sanitizer_common.
133d4bba30STeresa Johnson //
143d4bba30STeresa Johnson //===----------------------------------------------------------------------===//
153d4bba30STeresa Johnson
163d4bba30STeresa Johnson #include "memprof_allocator.h"
173d4bba30STeresa Johnson #include "memprof_mapping.h"
181243cef2SSnehasish Kumar #include "memprof_mibmap.h"
19545866cbSSnehasish Kumar #include "memprof_rawprofile.h"
203d4bba30STeresa Johnson #include "memprof_stack.h"
213d4bba30STeresa Johnson #include "memprof_thread.h"
228306968bSSnehasish Kumar #include "profile/MemProfData.inc"
233d4bba30STeresa Johnson #include "sanitizer_common/sanitizer_allocator_checks.h"
243d4bba30STeresa Johnson #include "sanitizer_common/sanitizer_allocator_interface.h"
253d4bba30STeresa Johnson #include "sanitizer_common/sanitizer_allocator_report.h"
263d4bba30STeresa Johnson #include "sanitizer_common/sanitizer_errno.h"
273d4bba30STeresa Johnson #include "sanitizer_common/sanitizer_file.h"
283d4bba30STeresa Johnson #include "sanitizer_common/sanitizer_flags.h"
293d4bba30STeresa Johnson #include "sanitizer_common/sanitizer_internal_defs.h"
303d4bba30STeresa Johnson #include "sanitizer_common/sanitizer_list.h"
31545866cbSSnehasish Kumar #include "sanitizer_common/sanitizer_procmaps.h"
323d4bba30STeresa Johnson #include "sanitizer_common/sanitizer_stackdepot.h"
33545866cbSSnehasish Kumar #include "sanitizer_common/sanitizer_vector.h"
343d4bba30STeresa Johnson
353d4bba30STeresa Johnson #include <sched.h>
36ed4fbe6dSPetr Hosek #include <time.h>
373d4bba30STeresa Johnson
383d4bba30STeresa Johnson namespace __memprof {
398306968bSSnehasish Kumar namespace {
408306968bSSnehasish Kumar using ::llvm::memprof::MemInfoBlock;
418306968bSSnehasish Kumar
Print(const MemInfoBlock & M,const u64 id,bool print_terse)428306968bSSnehasish Kumar void Print(const MemInfoBlock &M, const u64 id, bool print_terse) {
438306968bSSnehasish Kumar u64 p;
448306968bSSnehasish Kumar
458306968bSSnehasish Kumar if (print_terse) {
46f89319b8SSnehasish Kumar p = M.TotalSize * 100 / M.AllocCount;
47f89319b8SSnehasish Kumar Printf("MIB:%llu/%u/%llu.%02llu/%u/%u/", id, M.AllocCount, p / 100, p % 100,
48f89319b8SSnehasish Kumar M.MinSize, M.MaxSize);
49f89319b8SSnehasish Kumar p = M.TotalAccessCount * 100 / M.AllocCount;
50f89319b8SSnehasish Kumar Printf("%llu.%02llu/%llu/%llu/", p / 100, p % 100, M.MinAccessCount,
51f89319b8SSnehasish Kumar M.MaxAccessCount);
52f89319b8SSnehasish Kumar p = M.TotalLifetime * 100 / M.AllocCount;
53f89319b8SSnehasish Kumar Printf("%llu.%02llu/%u/%u/", p / 100, p % 100, M.MinLifetime,
54f89319b8SSnehasish Kumar M.MaxLifetime);
55f89319b8SSnehasish Kumar Printf("%u/%u/%u/%u\n", M.NumMigratedCpu, M.NumLifetimeOverlaps,
56f89319b8SSnehasish Kumar M.NumSameAllocCpu, M.NumSameDeallocCpu);
578306968bSSnehasish Kumar } else {
58f89319b8SSnehasish Kumar p = M.TotalSize * 100 / M.AllocCount;
598306968bSSnehasish Kumar Printf("Memory allocation stack id = %llu\n", id);
608306968bSSnehasish Kumar Printf("\talloc_count %u, size (ave/min/max) %llu.%02llu / %u / %u\n",
61f89319b8SSnehasish Kumar M.AllocCount, p / 100, p % 100, M.MinSize, M.MaxSize);
62f89319b8SSnehasish Kumar p = M.TotalAccessCount * 100 / M.AllocCount;
638306968bSSnehasish Kumar Printf("\taccess_count (ave/min/max): %llu.%02llu / %llu / %llu\n", p / 100,
64f89319b8SSnehasish Kumar p % 100, M.MinAccessCount, M.MaxAccessCount);
65f89319b8SSnehasish Kumar p = M.TotalLifetime * 100 / M.AllocCount;
668306968bSSnehasish Kumar Printf("\tlifetime (ave/min/max): %llu.%02llu / %u / %u\n", p / 100,
67f89319b8SSnehasish Kumar p % 100, M.MinLifetime, M.MaxLifetime);
688306968bSSnehasish Kumar Printf("\tnum migrated: %u, num lifetime overlaps: %u, num same alloc "
698306968bSSnehasish Kumar "cpu: %u, num same dealloc_cpu: %u\n",
70f89319b8SSnehasish Kumar M.NumMigratedCpu, M.NumLifetimeOverlaps, M.NumSameAllocCpu,
71f89319b8SSnehasish Kumar M.NumSameDeallocCpu);
728306968bSSnehasish Kumar }
738306968bSSnehasish Kumar }
748306968bSSnehasish Kumar } // namespace
753d4bba30STeresa Johnson
GetCpuId(void)763d4bba30STeresa Johnson static int GetCpuId(void) {
773d4bba30STeresa Johnson // _memprof_preinit is called via the preinit_array, which subsequently calls
783d4bba30STeresa Johnson // malloc. Since this is before _dl_init calls VDSO_SETUP, sched_getcpu
793d4bba30STeresa Johnson // will seg fault as the address of __vdso_getcpu will be null.
803d4bba30STeresa Johnson if (!memprof_init_done)
813d4bba30STeresa Johnson return -1;
823d4bba30STeresa Johnson return sched_getcpu();
833d4bba30STeresa Johnson }
843d4bba30STeresa Johnson
853d4bba30STeresa Johnson // Compute the timestamp in ms.
GetTimestamp(void)863d4bba30STeresa Johnson static int GetTimestamp(void) {
873d4bba30STeresa Johnson // timespec_get will segfault if called from dl_init
883d4bba30STeresa Johnson if (!memprof_timestamp_inited) {
893d4bba30STeresa Johnson // By returning 0, this will be effectively treated as being
903d4bba30STeresa Johnson // timestamped at memprof init time (when memprof_init_timestamp_s
913d4bba30STeresa Johnson // is initialized).
923d4bba30STeresa Johnson return 0;
933d4bba30STeresa Johnson }
943d4bba30STeresa Johnson timespec ts;
95d7e71b5dSJeroen Dobbelaere clock_gettime(CLOCK_REALTIME, &ts);
963d4bba30STeresa Johnson return (ts.tv_sec - memprof_init_timestamp_s) * 1000 + ts.tv_nsec / 1000000;
973d4bba30STeresa Johnson }
983d4bba30STeresa Johnson
993d4bba30STeresa Johnson static MemprofAllocator &get_allocator();
1003d4bba30STeresa Johnson
1013d4bba30STeresa Johnson // The memory chunk allocated from the underlying allocator looks like this:
1023d4bba30STeresa Johnson // H H U U U U U U
1033d4bba30STeresa Johnson // H -- ChunkHeader (32 bytes)
1043d4bba30STeresa Johnson // U -- user memory.
1053d4bba30STeresa Johnson
1063d4bba30STeresa Johnson // If there is left padding before the ChunkHeader (due to use of memalign),
1073d4bba30STeresa Johnson // we store a magic value in the first uptr word of the memory block and
1083d4bba30STeresa Johnson // store the address of ChunkHeader in the next uptr.
1093d4bba30STeresa Johnson // M B L L L L L L L L L H H U U U U U U
1103d4bba30STeresa Johnson // | ^
1113d4bba30STeresa Johnson // ---------------------|
1123d4bba30STeresa Johnson // M -- magic value kAllocBegMagic
1133d4bba30STeresa Johnson // B -- address of ChunkHeader pointing to the first 'H'
1143d4bba30STeresa Johnson
1153d4bba30STeresa Johnson constexpr uptr kMaxAllowedMallocBits = 40;
1163d4bba30STeresa Johnson
1173d4bba30STeresa Johnson // Should be no more than 32-bytes
1183d4bba30STeresa Johnson struct ChunkHeader {
1193d4bba30STeresa Johnson // 1-st 4 bytes.
1203d4bba30STeresa Johnson u32 alloc_context_id;
1213d4bba30STeresa Johnson // 2-nd 4 bytes
1223d4bba30STeresa Johnson u32 cpu_id;
1233d4bba30STeresa Johnson // 3-rd 4 bytes
1243d4bba30STeresa Johnson u32 timestamp_ms;
1253d4bba30STeresa Johnson // 4-th 4 bytes
1263d4bba30STeresa Johnson // Note only 1 bit is needed for this flag if we need space in the future for
1273d4bba30STeresa Johnson // more fields.
1283d4bba30STeresa Johnson u32 from_memalign;
1293d4bba30STeresa Johnson // 5-th and 6-th 4 bytes
1303d4bba30STeresa Johnson // The max size of an allocation is 2^40 (kMaxAllowedMallocSize), so this
1313d4bba30STeresa Johnson // could be shrunk to kMaxAllowedMallocBits if we need space in the future for
1323d4bba30STeresa Johnson // more fields.
1333d4bba30STeresa Johnson atomic_uint64_t user_requested_size;
1343d4bba30STeresa Johnson // 23 bits available
1353d4bba30STeresa Johnson // 7-th and 8-th 4 bytes
1363d4bba30STeresa Johnson u64 data_type_id; // TODO: hash of type name
1373d4bba30STeresa Johnson };
1383d4bba30STeresa Johnson
1393d4bba30STeresa Johnson static const uptr kChunkHeaderSize = sizeof(ChunkHeader);
1403d4bba30STeresa Johnson COMPILER_CHECK(kChunkHeaderSize == 32);
1413d4bba30STeresa Johnson
1423d4bba30STeresa Johnson struct MemprofChunk : ChunkHeader {
Beg__memprof::MemprofChunk1433d4bba30STeresa Johnson uptr Beg() { return reinterpret_cast<uptr>(this) + kChunkHeaderSize; }
UsedSize__memprof::MemprofChunk1443d4bba30STeresa Johnson uptr UsedSize() {
1453d4bba30STeresa Johnson return atomic_load(&user_requested_size, memory_order_relaxed);
1463d4bba30STeresa Johnson }
AllocBeg__memprof::MemprofChunk1473d4bba30STeresa Johnson void *AllocBeg() {
1483d4bba30STeresa Johnson if (from_memalign)
1493d4bba30STeresa Johnson return get_allocator().GetBlockBegin(reinterpret_cast<void *>(this));
1503d4bba30STeresa Johnson return reinterpret_cast<void *>(this);
1513d4bba30STeresa Johnson }
1523d4bba30STeresa Johnson };
1533d4bba30STeresa Johnson
1543d4bba30STeresa Johnson class LargeChunkHeader {
1553d4bba30STeresa Johnson static constexpr uptr kAllocBegMagic =
1563d4bba30STeresa Johnson FIRST_32_SECOND_64(0xCC6E96B9, 0xCC6E96B9CC6E96B9ULL);
1573d4bba30STeresa Johnson atomic_uintptr_t magic;
1583d4bba30STeresa Johnson MemprofChunk *chunk_header;
1593d4bba30STeresa Johnson
1603d4bba30STeresa Johnson public:
Get() const1613d4bba30STeresa Johnson MemprofChunk *Get() const {
1623d4bba30STeresa Johnson return atomic_load(&magic, memory_order_acquire) == kAllocBegMagic
1633d4bba30STeresa Johnson ? chunk_header
1643d4bba30STeresa Johnson : nullptr;
1653d4bba30STeresa Johnson }
1663d4bba30STeresa Johnson
Set(MemprofChunk * p)1673d4bba30STeresa Johnson void Set(MemprofChunk *p) {
1683d4bba30STeresa Johnson if (p) {
1693d4bba30STeresa Johnson chunk_header = p;
1703d4bba30STeresa Johnson atomic_store(&magic, kAllocBegMagic, memory_order_release);
1713d4bba30STeresa Johnson return;
1723d4bba30STeresa Johnson }
1733d4bba30STeresa Johnson
1743d4bba30STeresa Johnson uptr old = kAllocBegMagic;
1753d4bba30STeresa Johnson if (!atomic_compare_exchange_strong(&magic, &old, 0,
1763d4bba30STeresa Johnson memory_order_release)) {
1773d4bba30STeresa Johnson CHECK_EQ(old, kAllocBegMagic);
1783d4bba30STeresa Johnson }
1793d4bba30STeresa Johnson }
1803d4bba30STeresa Johnson };
1813d4bba30STeresa Johnson
FlushUnneededMemProfShadowMemory(uptr p,uptr size)1823d4bba30STeresa Johnson void FlushUnneededMemProfShadowMemory(uptr p, uptr size) {
1833d4bba30STeresa Johnson // Since memprof's mapping is compacting, the shadow chunk may be
1843d4bba30STeresa Johnson // not page-aligned, so we only flush the page-aligned portion.
1853d4bba30STeresa Johnson ReleaseMemoryPagesToOS(MemToShadow(p), MemToShadow(p + size));
1863d4bba30STeresa Johnson }
1873d4bba30STeresa Johnson
OnMap(uptr p,uptr size) const1883d4bba30STeresa Johnson void MemprofMapUnmapCallback::OnMap(uptr p, uptr size) const {
1893d4bba30STeresa Johnson // Statistics.
1903d4bba30STeresa Johnson MemprofStats &thread_stats = GetCurrentThreadStats();
1913d4bba30STeresa Johnson thread_stats.mmaps++;
1923d4bba30STeresa Johnson thread_stats.mmaped += size;
1933d4bba30STeresa Johnson }
OnUnmap(uptr p,uptr size) const1943d4bba30STeresa Johnson void MemprofMapUnmapCallback::OnUnmap(uptr p, uptr size) const {
1953d4bba30STeresa Johnson // We are about to unmap a chunk of user memory.
1963d4bba30STeresa Johnson // Mark the corresponding shadow memory as not needed.
1973d4bba30STeresa Johnson FlushUnneededMemProfShadowMemory(p, size);
1983d4bba30STeresa Johnson // Statistics.
1993d4bba30STeresa Johnson MemprofStats &thread_stats = GetCurrentThreadStats();
2003d4bba30STeresa Johnson thread_stats.munmaps++;
2013d4bba30STeresa Johnson thread_stats.munmaped += size;
2023d4bba30STeresa Johnson }
2033d4bba30STeresa Johnson
GetAllocatorCache(MemprofThreadLocalMallocStorage * ms)2043d4bba30STeresa Johnson AllocatorCache *GetAllocatorCache(MemprofThreadLocalMallocStorage *ms) {
2053d4bba30STeresa Johnson CHECK(ms);
2063d4bba30STeresa Johnson return &ms->allocator_cache;
2073d4bba30STeresa Johnson }
2083d4bba30STeresa Johnson
2093d4bba30STeresa Johnson // Accumulates the access count from the shadow for the given pointer and size.
GetShadowCount(uptr p,u32 size)2103d4bba30STeresa Johnson u64 GetShadowCount(uptr p, u32 size) {
2113d4bba30STeresa Johnson u64 *shadow = (u64 *)MEM_TO_SHADOW(p);
2123d4bba30STeresa Johnson u64 *shadow_end = (u64 *)MEM_TO_SHADOW(p + size);
2133d4bba30STeresa Johnson u64 count = 0;
2143d4bba30STeresa Johnson for (; shadow <= shadow_end; shadow++)
2153d4bba30STeresa Johnson count += *shadow;
2163d4bba30STeresa Johnson return count;
2173d4bba30STeresa Johnson }
2183d4bba30STeresa Johnson
2193d4bba30STeresa Johnson // Clears the shadow counters (when memory is allocated).
ClearShadow(uptr addr,uptr size)2203d4bba30STeresa Johnson void ClearShadow(uptr addr, uptr size) {
2213d4bba30STeresa Johnson CHECK(AddrIsAlignedByGranularity(addr));
2223d4bba30STeresa Johnson CHECK(AddrIsInMem(addr));
2233d4bba30STeresa Johnson CHECK(AddrIsAlignedByGranularity(addr + size));
2243d4bba30STeresa Johnson CHECK(AddrIsInMem(addr + size - SHADOW_GRANULARITY));
2253d4bba30STeresa Johnson CHECK(REAL(memset));
2263d4bba30STeresa Johnson uptr shadow_beg = MEM_TO_SHADOW(addr);
2273d4bba30STeresa Johnson uptr shadow_end = MEM_TO_SHADOW(addr + size - SHADOW_GRANULARITY) + 1;
2283d4bba30STeresa Johnson if (shadow_end - shadow_beg < common_flags()->clear_shadow_mmap_threshold) {
2293d4bba30STeresa Johnson REAL(memset)((void *)shadow_beg, 0, shadow_end - shadow_beg);
2303d4bba30STeresa Johnson } else {
2313d4bba30STeresa Johnson uptr page_size = GetPageSizeCached();
2323d4bba30STeresa Johnson uptr page_beg = RoundUpTo(shadow_beg, page_size);
2333d4bba30STeresa Johnson uptr page_end = RoundDownTo(shadow_end, page_size);
2343d4bba30STeresa Johnson
2353d4bba30STeresa Johnson if (page_beg >= page_end) {
2363d4bba30STeresa Johnson REAL(memset)((void *)shadow_beg, 0, shadow_end - shadow_beg);
2373d4bba30STeresa Johnson } else {
2383d4bba30STeresa Johnson if (page_beg != shadow_beg) {
2393d4bba30STeresa Johnson REAL(memset)((void *)shadow_beg, 0, page_beg - shadow_beg);
2403d4bba30STeresa Johnson }
2413d4bba30STeresa Johnson if (page_end != shadow_end) {
2423d4bba30STeresa Johnson REAL(memset)((void *)page_end, 0, shadow_end - page_end);
2433d4bba30STeresa Johnson }
2443d4bba30STeresa Johnson ReserveShadowMemoryRange(page_beg, page_end - 1, nullptr);
2453d4bba30STeresa Johnson }
2463d4bba30STeresa Johnson }
2473d4bba30STeresa Johnson }
2483d4bba30STeresa Johnson
2493d4bba30STeresa Johnson struct Allocator {
2503d4bba30STeresa Johnson static const uptr kMaxAllowedMallocSize = 1ULL << kMaxAllowedMallocBits;
2513d4bba30STeresa Johnson
2523d4bba30STeresa Johnson MemprofAllocator allocator;
2533d4bba30STeresa Johnson StaticSpinMutex fallback_mutex;
2543d4bba30STeresa Johnson AllocatorCache fallback_allocator_cache;
2553d4bba30STeresa Johnson
2563d4bba30STeresa Johnson uptr max_user_defined_malloc_size;
2573d4bba30STeresa Johnson
2581243cef2SSnehasish Kumar // Holds the mapping of stack ids to MemInfoBlocks.
2591243cef2SSnehasish Kumar MIBMapTy MIBMap;
2601243cef2SSnehasish Kumar
261545866cbSSnehasish Kumar atomic_uint8_t destructing;
262545866cbSSnehasish Kumar atomic_uint8_t constructed;
263545866cbSSnehasish Kumar bool print_text;
2643d4bba30STeresa Johnson
2653d4bba30STeresa Johnson // ------------------- Initialization ------------------------
Allocator__memprof::Allocator266545866cbSSnehasish Kumar explicit Allocator(LinkerInitialized) : print_text(flags()->print_text) {
267545866cbSSnehasish Kumar atomic_store_relaxed(&destructing, 0);
268545866cbSSnehasish Kumar atomic_store_relaxed(&constructed, 1);
269545866cbSSnehasish Kumar }
270545866cbSSnehasish Kumar
~Allocator__memprof::Allocator271545866cbSSnehasish Kumar ~Allocator() {
272545866cbSSnehasish Kumar atomic_store_relaxed(&destructing, 1);
273545866cbSSnehasish Kumar FinishAndWrite();
274545866cbSSnehasish Kumar }
2753d4bba30STeresa Johnson
PrintCallback__memprof::Allocator2761243cef2SSnehasish Kumar static void PrintCallback(const uptr Key, LockedMemInfoBlock *const &Value,
2771243cef2SSnehasish Kumar void *Arg) {
2781243cef2SSnehasish Kumar SpinMutexLock(&Value->mutex);
2798306968bSSnehasish Kumar Print(Value->mib, Key, bool(Arg));
2801243cef2SSnehasish Kumar }
2811243cef2SSnehasish Kumar
FinishAndWrite__memprof::Allocator282545866cbSSnehasish Kumar void FinishAndWrite() {
283545866cbSSnehasish Kumar if (print_text && common_flags()->print_module_map)
2841243cef2SSnehasish Kumar DumpProcessMap();
285545866cbSSnehasish Kumar
2863d4bba30STeresa Johnson allocator.ForceLock();
287545866cbSSnehasish Kumar
288545866cbSSnehasish Kumar InsertLiveBlocks();
289545866cbSSnehasish Kumar if (print_text) {
290a4b92d61SSnehasish Kumar if (!flags()->print_terse)
291a4b92d61SSnehasish Kumar Printf("Recorded MIBs (incl. live on exit):\n");
292545866cbSSnehasish Kumar MIBMap.ForEach(PrintCallback,
293545866cbSSnehasish Kumar reinterpret_cast<void *>(flags()->print_terse));
294545866cbSSnehasish Kumar StackDepotPrintAll();
295545866cbSSnehasish Kumar } else {
296545866cbSSnehasish Kumar // Serialize the contents to a raw profile. Format documented in
297545866cbSSnehasish Kumar // memprof_rawprofile.h.
298545866cbSSnehasish Kumar char *Buffer = nullptr;
299545866cbSSnehasish Kumar
300545866cbSSnehasish Kumar MemoryMappingLayout Layout(/*cache_enabled=*/true);
301545866cbSSnehasish Kumar u64 BytesSerialized = SerializeToRawProfile(MIBMap, Layout, Buffer);
302545866cbSSnehasish Kumar CHECK(Buffer && BytesSerialized && "could not serialize to buffer");
303545866cbSSnehasish Kumar report_file.Write(Buffer, BytesSerialized);
304545866cbSSnehasish Kumar }
305545866cbSSnehasish Kumar
306545866cbSSnehasish Kumar allocator.ForceUnlock();
307545866cbSSnehasish Kumar }
308545866cbSSnehasish Kumar
309545866cbSSnehasish Kumar // Inserts any blocks which have been allocated but not yet deallocated.
InsertLiveBlocks__memprof::Allocator310545866cbSSnehasish Kumar void InsertLiveBlocks() {
3113d4bba30STeresa Johnson allocator.ForEachChunk(
3123d4bba30STeresa Johnson [](uptr chunk, void *alloc) {
3133d4bba30STeresa Johnson u64 user_requested_size;
3141243cef2SSnehasish Kumar Allocator *A = (Allocator *)alloc;
3153d4bba30STeresa Johnson MemprofChunk *m =
3161243cef2SSnehasish Kumar A->GetMemprofChunk((void *)chunk, user_requested_size);
3173d4bba30STeresa Johnson if (!m)
3183d4bba30STeresa Johnson return;
3193d4bba30STeresa Johnson uptr user_beg = ((uptr)m) + kChunkHeaderSize;
3203d4bba30STeresa Johnson u64 c = GetShadowCount(user_beg, user_requested_size);
3213d4bba30STeresa Johnson long curtime = GetTimestamp();
3223d4bba30STeresa Johnson MemInfoBlock newMIB(user_requested_size, c, m->timestamp_ms, curtime,
3233d4bba30STeresa Johnson m->cpu_id, GetCpuId());
3241243cef2SSnehasish Kumar InsertOrMerge(m->alloc_context_id, newMIB, A->MIBMap);
3253d4bba30STeresa Johnson },
3263d4bba30STeresa Johnson this);
3273d4bba30STeresa Johnson }
3283d4bba30STeresa Johnson
InitLinkerInitialized__memprof::Allocator3293d4bba30STeresa Johnson void InitLinkerInitialized() {
3303d4bba30STeresa Johnson SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
3313d4bba30STeresa Johnson allocator.InitLinkerInitialized(
3323d4bba30STeresa Johnson common_flags()->allocator_release_to_os_interval_ms);
3333d4bba30STeresa Johnson max_user_defined_malloc_size = common_flags()->max_allocation_size_mb
3343d4bba30STeresa Johnson ? common_flags()->max_allocation_size_mb
3353d4bba30STeresa Johnson << 20
3363d4bba30STeresa Johnson : kMaxAllowedMallocSize;
3373d4bba30STeresa Johnson }
3383d4bba30STeresa Johnson
3393d4bba30STeresa Johnson // -------------------- Allocation/Deallocation routines ---------------
Allocate__memprof::Allocator3403d4bba30STeresa Johnson void *Allocate(uptr size, uptr alignment, BufferedStackTrace *stack,
3413d4bba30STeresa Johnson AllocType alloc_type) {
3423d4bba30STeresa Johnson if (UNLIKELY(!memprof_inited))
3433d4bba30STeresa Johnson MemprofInitFromRtl();
344d48d8670SVitaly Buka if (UNLIKELY(IsRssLimitExceeded())) {
3453d4bba30STeresa Johnson if (AllocatorMayReturnNull())
3463d4bba30STeresa Johnson return nullptr;
3473d4bba30STeresa Johnson ReportRssLimitExceeded(stack);
3483d4bba30STeresa Johnson }
3493d4bba30STeresa Johnson CHECK(stack);
3503d4bba30STeresa Johnson const uptr min_alignment = MEMPROF_ALIGNMENT;
3513d4bba30STeresa Johnson if (alignment < min_alignment)
3523d4bba30STeresa Johnson alignment = min_alignment;
3533d4bba30STeresa Johnson if (size == 0) {
3543d4bba30STeresa Johnson // We'd be happy to avoid allocating memory for zero-size requests, but
3553d4bba30STeresa Johnson // some programs/tests depend on this behavior and assume that malloc
3563d4bba30STeresa Johnson // would not return NULL even for zero-size allocations. Moreover, it
3573d4bba30STeresa Johnson // looks like operator new should never return NULL, and results of
3583d4bba30STeresa Johnson // consecutive "new" calls must be different even if the allocated size
3593d4bba30STeresa Johnson // is zero.
3603d4bba30STeresa Johnson size = 1;
3613d4bba30STeresa Johnson }
3623d4bba30STeresa Johnson CHECK(IsPowerOfTwo(alignment));
3633d4bba30STeresa Johnson uptr rounded_size = RoundUpTo(size, alignment);
3643d4bba30STeresa Johnson uptr needed_size = rounded_size + kChunkHeaderSize;
3653d4bba30STeresa Johnson if (alignment > min_alignment)
3663d4bba30STeresa Johnson needed_size += alignment;
3673d4bba30STeresa Johnson CHECK(IsAligned(needed_size, min_alignment));
3683d4bba30STeresa Johnson if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize ||
3693d4bba30STeresa Johnson size > max_user_defined_malloc_size) {
3703d4bba30STeresa Johnson if (AllocatorMayReturnNull()) {
37124252474STeresa Johnson Report("WARNING: MemProfiler failed to allocate 0x%zx bytes\n", size);
3723d4bba30STeresa Johnson return nullptr;
3733d4bba30STeresa Johnson }
3743d4bba30STeresa Johnson uptr malloc_limit =
3753d4bba30STeresa Johnson Min(kMaxAllowedMallocSize, max_user_defined_malloc_size);
3763d4bba30STeresa Johnson ReportAllocationSizeTooBig(size, malloc_limit, stack);
3773d4bba30STeresa Johnson }
3783d4bba30STeresa Johnson
3793d4bba30STeresa Johnson MemprofThread *t = GetCurrentThread();
3803d4bba30STeresa Johnson void *allocated;
3813d4bba30STeresa Johnson if (t) {
3823d4bba30STeresa Johnson AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
3833d4bba30STeresa Johnson allocated = allocator.Allocate(cache, needed_size, 8);
3843d4bba30STeresa Johnson } else {
3853d4bba30STeresa Johnson SpinMutexLock l(&fallback_mutex);
3863d4bba30STeresa Johnson AllocatorCache *cache = &fallback_allocator_cache;
3873d4bba30STeresa Johnson allocated = allocator.Allocate(cache, needed_size, 8);
3883d4bba30STeresa Johnson }
3893d4bba30STeresa Johnson if (UNLIKELY(!allocated)) {
3903d4bba30STeresa Johnson SetAllocatorOutOfMemory();
3913d4bba30STeresa Johnson if (AllocatorMayReturnNull())
3923d4bba30STeresa Johnson return nullptr;
3933d4bba30STeresa Johnson ReportOutOfMemory(size, stack);
3943d4bba30STeresa Johnson }
3953d4bba30STeresa Johnson
3963d4bba30STeresa Johnson uptr alloc_beg = reinterpret_cast<uptr>(allocated);
3973d4bba30STeresa Johnson uptr alloc_end = alloc_beg + needed_size;
3983d4bba30STeresa Johnson uptr beg_plus_header = alloc_beg + kChunkHeaderSize;
3993d4bba30STeresa Johnson uptr user_beg = beg_plus_header;
4003d4bba30STeresa Johnson if (!IsAligned(user_beg, alignment))
4013d4bba30STeresa Johnson user_beg = RoundUpTo(user_beg, alignment);
4023d4bba30STeresa Johnson uptr user_end = user_beg + size;
4033d4bba30STeresa Johnson CHECK_LE(user_end, alloc_end);
4043d4bba30STeresa Johnson uptr chunk_beg = user_beg - kChunkHeaderSize;
4053d4bba30STeresa Johnson MemprofChunk *m = reinterpret_cast<MemprofChunk *>(chunk_beg);
4063d4bba30STeresa Johnson m->from_memalign = alloc_beg != chunk_beg;
4073d4bba30STeresa Johnson CHECK(size);
4083d4bba30STeresa Johnson
4093d4bba30STeresa Johnson m->cpu_id = GetCpuId();
4103d4bba30STeresa Johnson m->timestamp_ms = GetTimestamp();
4113d4bba30STeresa Johnson m->alloc_context_id = StackDepotPut(*stack);
4123d4bba30STeresa Johnson
4133d4bba30STeresa Johnson uptr size_rounded_down_to_granularity =
4143d4bba30STeresa Johnson RoundDownTo(size, SHADOW_GRANULARITY);
4153d4bba30STeresa Johnson if (size_rounded_down_to_granularity)
4163d4bba30STeresa Johnson ClearShadow(user_beg, size_rounded_down_to_granularity);
4173d4bba30STeresa Johnson
4183d4bba30STeresa Johnson MemprofStats &thread_stats = GetCurrentThreadStats();
4193d4bba30STeresa Johnson thread_stats.mallocs++;
4203d4bba30STeresa Johnson thread_stats.malloced += size;
4213d4bba30STeresa Johnson thread_stats.malloced_overhead += needed_size - size;
4223d4bba30STeresa Johnson if (needed_size > SizeClassMap::kMaxSize)
4233d4bba30STeresa Johnson thread_stats.malloc_large++;
4243d4bba30STeresa Johnson else
4253d4bba30STeresa Johnson thread_stats.malloced_by_size[SizeClassMap::ClassID(needed_size)]++;
4263d4bba30STeresa Johnson
4273d4bba30STeresa Johnson void *res = reinterpret_cast<void *>(user_beg);
4283d4bba30STeresa Johnson atomic_store(&m->user_requested_size, size, memory_order_release);
4293d4bba30STeresa Johnson if (alloc_beg != chunk_beg) {
4303d4bba30STeresa Johnson CHECK_LE(alloc_beg + sizeof(LargeChunkHeader), chunk_beg);
4313d4bba30STeresa Johnson reinterpret_cast<LargeChunkHeader *>(alloc_beg)->Set(m);
4323d4bba30STeresa Johnson }
433*b84673b3SVitaly Buka RunMallocHooks(res, size);
4343d4bba30STeresa Johnson return res;
4353d4bba30STeresa Johnson }
4363d4bba30STeresa Johnson
Deallocate__memprof::Allocator4373d4bba30STeresa Johnson void Deallocate(void *ptr, uptr delete_size, uptr delete_alignment,
4383d4bba30STeresa Johnson BufferedStackTrace *stack, AllocType alloc_type) {
4393d4bba30STeresa Johnson uptr p = reinterpret_cast<uptr>(ptr);
4403d4bba30STeresa Johnson if (p == 0)
4413d4bba30STeresa Johnson return;
4423d4bba30STeresa Johnson
443*b84673b3SVitaly Buka RunFreeHooks(ptr);
4443d4bba30STeresa Johnson
4453d4bba30STeresa Johnson uptr chunk_beg = p - kChunkHeaderSize;
4463d4bba30STeresa Johnson MemprofChunk *m = reinterpret_cast<MemprofChunk *>(chunk_beg);
4473d4bba30STeresa Johnson
4483d4bba30STeresa Johnson u64 user_requested_size =
4493d4bba30STeresa Johnson atomic_exchange(&m->user_requested_size, 0, memory_order_acquire);
450545866cbSSnehasish Kumar if (memprof_inited && memprof_init_done &&
451545866cbSSnehasish Kumar atomic_load_relaxed(&constructed) &&
452545866cbSSnehasish Kumar !atomic_load_relaxed(&destructing)) {
4533d4bba30STeresa Johnson u64 c = GetShadowCount(p, user_requested_size);
4543d4bba30STeresa Johnson long curtime = GetTimestamp();
4553d4bba30STeresa Johnson
4563d4bba30STeresa Johnson MemInfoBlock newMIB(user_requested_size, c, m->timestamp_ms, curtime,
4573d4bba30STeresa Johnson m->cpu_id, GetCpuId());
4581243cef2SSnehasish Kumar InsertOrMerge(m->alloc_context_id, newMIB, MIBMap);
4593d4bba30STeresa Johnson }
4603d4bba30STeresa Johnson
4613d4bba30STeresa Johnson MemprofStats &thread_stats = GetCurrentThreadStats();
4623d4bba30STeresa Johnson thread_stats.frees++;
4633d4bba30STeresa Johnson thread_stats.freed += user_requested_size;
4643d4bba30STeresa Johnson
4653d4bba30STeresa Johnson void *alloc_beg = m->AllocBeg();
4663d4bba30STeresa Johnson if (alloc_beg != m) {
4673d4bba30STeresa Johnson // Clear the magic value, as allocator internals may overwrite the
4683d4bba30STeresa Johnson // contents of deallocated chunk, confusing GetMemprofChunk lookup.
4693d4bba30STeresa Johnson reinterpret_cast<LargeChunkHeader *>(alloc_beg)->Set(nullptr);
4703d4bba30STeresa Johnson }
4713d4bba30STeresa Johnson
4723d4bba30STeresa Johnson MemprofThread *t = GetCurrentThread();
4733d4bba30STeresa Johnson if (t) {
4743d4bba30STeresa Johnson AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
4753d4bba30STeresa Johnson allocator.Deallocate(cache, alloc_beg);
4763d4bba30STeresa Johnson } else {
4773d4bba30STeresa Johnson SpinMutexLock l(&fallback_mutex);
4783d4bba30STeresa Johnson AllocatorCache *cache = &fallback_allocator_cache;
4793d4bba30STeresa Johnson allocator.Deallocate(cache, alloc_beg);
4803d4bba30STeresa Johnson }
4813d4bba30STeresa Johnson }
4823d4bba30STeresa Johnson
Reallocate__memprof::Allocator4833d4bba30STeresa Johnson void *Reallocate(void *old_ptr, uptr new_size, BufferedStackTrace *stack) {
4843d4bba30STeresa Johnson CHECK(old_ptr && new_size);
4853d4bba30STeresa Johnson uptr p = reinterpret_cast<uptr>(old_ptr);
4863d4bba30STeresa Johnson uptr chunk_beg = p - kChunkHeaderSize;
4873d4bba30STeresa Johnson MemprofChunk *m = reinterpret_cast<MemprofChunk *>(chunk_beg);
4883d4bba30STeresa Johnson
4893d4bba30STeresa Johnson MemprofStats &thread_stats = GetCurrentThreadStats();
4903d4bba30STeresa Johnson thread_stats.reallocs++;
4913d4bba30STeresa Johnson thread_stats.realloced += new_size;
4923d4bba30STeresa Johnson
4933d4bba30STeresa Johnson void *new_ptr = Allocate(new_size, 8, stack, FROM_MALLOC);
4943d4bba30STeresa Johnson if (new_ptr) {
4953d4bba30STeresa Johnson CHECK_NE(REAL(memcpy), nullptr);
4963d4bba30STeresa Johnson uptr memcpy_size = Min(new_size, m->UsedSize());
4973d4bba30STeresa Johnson REAL(memcpy)(new_ptr, old_ptr, memcpy_size);
4983d4bba30STeresa Johnson Deallocate(old_ptr, 0, 0, stack, FROM_MALLOC);
4993d4bba30STeresa Johnson }
5003d4bba30STeresa Johnson return new_ptr;
5013d4bba30STeresa Johnson }
5023d4bba30STeresa Johnson
Calloc__memprof::Allocator5033d4bba30STeresa Johnson void *Calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
5043d4bba30STeresa Johnson if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
5053d4bba30STeresa Johnson if (AllocatorMayReturnNull())
5063d4bba30STeresa Johnson return nullptr;
5073d4bba30STeresa Johnson ReportCallocOverflow(nmemb, size, stack);
5083d4bba30STeresa Johnson }
5093d4bba30STeresa Johnson void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC);
5103d4bba30STeresa Johnson // If the memory comes from the secondary allocator no need to clear it
5113d4bba30STeresa Johnson // as it comes directly from mmap.
5123d4bba30STeresa Johnson if (ptr && allocator.FromPrimary(ptr))
5133d4bba30STeresa Johnson REAL(memset)(ptr, 0, nmemb * size);
5143d4bba30STeresa Johnson return ptr;
5153d4bba30STeresa Johnson }
5163d4bba30STeresa Johnson
CommitBack__memprof::Allocator5173d4bba30STeresa Johnson void CommitBack(MemprofThreadLocalMallocStorage *ms,
5183d4bba30STeresa Johnson BufferedStackTrace *stack) {
5193d4bba30STeresa Johnson AllocatorCache *ac = GetAllocatorCache(ms);
5203d4bba30STeresa Johnson allocator.SwallowCache(ac);
5213d4bba30STeresa Johnson }
5223d4bba30STeresa Johnson
5233d4bba30STeresa Johnson // -------------------------- Chunk lookup ----------------------
5243d4bba30STeresa Johnson
5253d4bba30STeresa Johnson // Assumes alloc_beg == allocator.GetBlockBegin(alloc_beg).
GetMemprofChunk__memprof::Allocator5263d4bba30STeresa Johnson MemprofChunk *GetMemprofChunk(void *alloc_beg, u64 &user_requested_size) {
5273d4bba30STeresa Johnson if (!alloc_beg)
5283d4bba30STeresa Johnson return nullptr;
5293d4bba30STeresa Johnson MemprofChunk *p = reinterpret_cast<LargeChunkHeader *>(alloc_beg)->Get();
5303d4bba30STeresa Johnson if (!p) {
5313d4bba30STeresa Johnson if (!allocator.FromPrimary(alloc_beg))
5323d4bba30STeresa Johnson return nullptr;
5333d4bba30STeresa Johnson p = reinterpret_cast<MemprofChunk *>(alloc_beg);
5343d4bba30STeresa Johnson }
5353d4bba30STeresa Johnson // The size is reset to 0 on deallocation (and a min of 1 on
5363d4bba30STeresa Johnson // allocation).
5373d4bba30STeresa Johnson user_requested_size =
5383d4bba30STeresa Johnson atomic_load(&p->user_requested_size, memory_order_acquire);
5393d4bba30STeresa Johnson if (user_requested_size)
5403d4bba30STeresa Johnson return p;
5413d4bba30STeresa Johnson return nullptr;
5423d4bba30STeresa Johnson }
5433d4bba30STeresa Johnson
GetMemprofChunkByAddr__memprof::Allocator5443d4bba30STeresa Johnson MemprofChunk *GetMemprofChunkByAddr(uptr p, u64 &user_requested_size) {
5453d4bba30STeresa Johnson void *alloc_beg = allocator.GetBlockBegin(reinterpret_cast<void *>(p));
5463d4bba30STeresa Johnson return GetMemprofChunk(alloc_beg, user_requested_size);
5473d4bba30STeresa Johnson }
5483d4bba30STeresa Johnson
AllocationSize__memprof::Allocator5493d4bba30STeresa Johnson uptr AllocationSize(uptr p) {
5503d4bba30STeresa Johnson u64 user_requested_size;
5513d4bba30STeresa Johnson MemprofChunk *m = GetMemprofChunkByAddr(p, user_requested_size);
5523d4bba30STeresa Johnson if (!m)
5533d4bba30STeresa Johnson return 0;
5543d4bba30STeresa Johnson if (m->Beg() != p)
5553d4bba30STeresa Johnson return 0;
5563d4bba30STeresa Johnson return user_requested_size;
5573d4bba30STeresa Johnson }
5583d4bba30STeresa Johnson
Purge__memprof::Allocator5593d4bba30STeresa Johnson void Purge(BufferedStackTrace *stack) { allocator.ForceReleaseToOS(); }
5603d4bba30STeresa Johnson
PrintStats__memprof::Allocator5613d4bba30STeresa Johnson void PrintStats() { allocator.PrintStats(); }
5623d4bba30STeresa Johnson
ForceLock__memprof::Allocator563765921deSDmitry Vyukov void ForceLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
5643d4bba30STeresa Johnson allocator.ForceLock();
5653d4bba30STeresa Johnson fallback_mutex.Lock();
5663d4bba30STeresa Johnson }
5673d4bba30STeresa Johnson
ForceUnlock__memprof::Allocator568765921deSDmitry Vyukov void ForceUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
5693d4bba30STeresa Johnson fallback_mutex.Unlock();
5703d4bba30STeresa Johnson allocator.ForceUnlock();
5713d4bba30STeresa Johnson }
5723d4bba30STeresa Johnson };
5733d4bba30STeresa Johnson
5743d4bba30STeresa Johnson static Allocator instance(LINKER_INITIALIZED);
5753d4bba30STeresa Johnson
get_allocator()5763d4bba30STeresa Johnson static MemprofAllocator &get_allocator() { return instance.allocator; }
5773d4bba30STeresa Johnson
InitializeAllocator()5783d4bba30STeresa Johnson void InitializeAllocator() { instance.InitLinkerInitialized(); }
5793d4bba30STeresa Johnson
CommitBack()5803d4bba30STeresa Johnson void MemprofThreadLocalMallocStorage::CommitBack() {
5813d4bba30STeresa Johnson GET_STACK_TRACE_MALLOC;
5823d4bba30STeresa Johnson instance.CommitBack(this, &stack);
5833d4bba30STeresa Johnson }
5843d4bba30STeresa Johnson
PrintInternalAllocatorStats()5853d4bba30STeresa Johnson void PrintInternalAllocatorStats() { instance.PrintStats(); }
5863d4bba30STeresa Johnson
memprof_free(void * ptr,BufferedStackTrace * stack,AllocType alloc_type)5873d4bba30STeresa Johnson void memprof_free(void *ptr, BufferedStackTrace *stack, AllocType alloc_type) {
5883d4bba30STeresa Johnson instance.Deallocate(ptr, 0, 0, stack, alloc_type);
5893d4bba30STeresa Johnson }
5903d4bba30STeresa Johnson
memprof_delete(void * ptr,uptr size,uptr alignment,BufferedStackTrace * stack,AllocType alloc_type)5913d4bba30STeresa Johnson void memprof_delete(void *ptr, uptr size, uptr alignment,
5923d4bba30STeresa Johnson BufferedStackTrace *stack, AllocType alloc_type) {
5933d4bba30STeresa Johnson instance.Deallocate(ptr, size, alignment, stack, alloc_type);
5943d4bba30STeresa Johnson }
5953d4bba30STeresa Johnson
memprof_malloc(uptr size,BufferedStackTrace * stack)5963d4bba30STeresa Johnson void *memprof_malloc(uptr size, BufferedStackTrace *stack) {
5973d4bba30STeresa Johnson return SetErrnoOnNull(instance.Allocate(size, 8, stack, FROM_MALLOC));
5983d4bba30STeresa Johnson }
5993d4bba30STeresa Johnson
memprof_calloc(uptr nmemb,uptr size,BufferedStackTrace * stack)6003d4bba30STeresa Johnson void *memprof_calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
6013d4bba30STeresa Johnson return SetErrnoOnNull(instance.Calloc(nmemb, size, stack));
6023d4bba30STeresa Johnson }
6033d4bba30STeresa Johnson
memprof_reallocarray(void * p,uptr nmemb,uptr size,BufferedStackTrace * stack)6043d4bba30STeresa Johnson void *memprof_reallocarray(void *p, uptr nmemb, uptr size,
6053d4bba30STeresa Johnson BufferedStackTrace *stack) {
6063d4bba30STeresa Johnson if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
6073d4bba30STeresa Johnson errno = errno_ENOMEM;
6083d4bba30STeresa Johnson if (AllocatorMayReturnNull())
6093d4bba30STeresa Johnson return nullptr;
6103d4bba30STeresa Johnson ReportReallocArrayOverflow(nmemb, size, stack);
6113d4bba30STeresa Johnson }
6123d4bba30STeresa Johnson return memprof_realloc(p, nmemb * size, stack);
6133d4bba30STeresa Johnson }
6143d4bba30STeresa Johnson
memprof_realloc(void * p,uptr size,BufferedStackTrace * stack)6153d4bba30STeresa Johnson void *memprof_realloc(void *p, uptr size, BufferedStackTrace *stack) {
6163d4bba30STeresa Johnson if (!p)
6173d4bba30STeresa Johnson return SetErrnoOnNull(instance.Allocate(size, 8, stack, FROM_MALLOC));
6183d4bba30STeresa Johnson if (size == 0) {
6193d4bba30STeresa Johnson if (flags()->allocator_frees_and_returns_null_on_realloc_zero) {
6203d4bba30STeresa Johnson instance.Deallocate(p, 0, 0, stack, FROM_MALLOC);
6213d4bba30STeresa Johnson return nullptr;
6223d4bba30STeresa Johnson }
6233d4bba30STeresa Johnson // Allocate a size of 1 if we shouldn't free() on Realloc to 0
6243d4bba30STeresa Johnson size = 1;
6253d4bba30STeresa Johnson }
6263d4bba30STeresa Johnson return SetErrnoOnNull(instance.Reallocate(p, size, stack));
6273d4bba30STeresa Johnson }
6283d4bba30STeresa Johnson
memprof_valloc(uptr size,BufferedStackTrace * stack)6293d4bba30STeresa Johnson void *memprof_valloc(uptr size, BufferedStackTrace *stack) {
6303d4bba30STeresa Johnson return SetErrnoOnNull(
6313d4bba30STeresa Johnson instance.Allocate(size, GetPageSizeCached(), stack, FROM_MALLOC));
6323d4bba30STeresa Johnson }
6333d4bba30STeresa Johnson
memprof_pvalloc(uptr size,BufferedStackTrace * stack)6343d4bba30STeresa Johnson void *memprof_pvalloc(uptr size, BufferedStackTrace *stack) {
6353d4bba30STeresa Johnson uptr PageSize = GetPageSizeCached();
6363d4bba30STeresa Johnson if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) {
6373d4bba30STeresa Johnson errno = errno_ENOMEM;
6383d4bba30STeresa Johnson if (AllocatorMayReturnNull())
6393d4bba30STeresa Johnson return nullptr;
6403d4bba30STeresa Johnson ReportPvallocOverflow(size, stack);
6413d4bba30STeresa Johnson }
6423d4bba30STeresa Johnson // pvalloc(0) should allocate one page.
6433d4bba30STeresa Johnson size = size ? RoundUpTo(size, PageSize) : PageSize;
6443d4bba30STeresa Johnson return SetErrnoOnNull(instance.Allocate(size, PageSize, stack, FROM_MALLOC));
6453d4bba30STeresa Johnson }
6463d4bba30STeresa Johnson
memprof_memalign(uptr alignment,uptr size,BufferedStackTrace * stack,AllocType alloc_type)6473d4bba30STeresa Johnson void *memprof_memalign(uptr alignment, uptr size, BufferedStackTrace *stack,
6483d4bba30STeresa Johnson AllocType alloc_type) {
6493d4bba30STeresa Johnson if (UNLIKELY(!IsPowerOfTwo(alignment))) {
6503d4bba30STeresa Johnson errno = errno_EINVAL;
6513d4bba30STeresa Johnson if (AllocatorMayReturnNull())
6523d4bba30STeresa Johnson return nullptr;
6533d4bba30STeresa Johnson ReportInvalidAllocationAlignment(alignment, stack);
6543d4bba30STeresa Johnson }
6553d4bba30STeresa Johnson return SetErrnoOnNull(instance.Allocate(size, alignment, stack, alloc_type));
6563d4bba30STeresa Johnson }
6573d4bba30STeresa Johnson
memprof_aligned_alloc(uptr alignment,uptr size,BufferedStackTrace * stack)6583d4bba30STeresa Johnson void *memprof_aligned_alloc(uptr alignment, uptr size,
6593d4bba30STeresa Johnson BufferedStackTrace *stack) {
6603d4bba30STeresa Johnson if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) {
6613d4bba30STeresa Johnson errno = errno_EINVAL;
6623d4bba30STeresa Johnson if (AllocatorMayReturnNull())
6633d4bba30STeresa Johnson return nullptr;
6643d4bba30STeresa Johnson ReportInvalidAlignedAllocAlignment(size, alignment, stack);
6653d4bba30STeresa Johnson }
6663d4bba30STeresa Johnson return SetErrnoOnNull(instance.Allocate(size, alignment, stack, FROM_MALLOC));
6673d4bba30STeresa Johnson }
6683d4bba30STeresa Johnson
memprof_posix_memalign(void ** memptr,uptr alignment,uptr size,BufferedStackTrace * stack)6693d4bba30STeresa Johnson int memprof_posix_memalign(void **memptr, uptr alignment, uptr size,
6703d4bba30STeresa Johnson BufferedStackTrace *stack) {
6713d4bba30STeresa Johnson if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) {
6723d4bba30STeresa Johnson if (AllocatorMayReturnNull())
6733d4bba30STeresa Johnson return errno_EINVAL;
6743d4bba30STeresa Johnson ReportInvalidPosixMemalignAlignment(alignment, stack);
6753d4bba30STeresa Johnson }
6763d4bba30STeresa Johnson void *ptr = instance.Allocate(size, alignment, stack, FROM_MALLOC);
6773d4bba30STeresa Johnson if (UNLIKELY(!ptr))
6783d4bba30STeresa Johnson // OOM error is already taken care of by Allocate.
6793d4bba30STeresa Johnson return errno_ENOMEM;
6803d4bba30STeresa Johnson CHECK(IsAligned((uptr)ptr, alignment));
6813d4bba30STeresa Johnson *memptr = ptr;
6823d4bba30STeresa Johnson return 0;
6833d4bba30STeresa Johnson }
6843d4bba30STeresa Johnson
memprof_malloc_usable_size(const void * ptr,uptr pc,uptr bp)6853d4bba30STeresa Johnson uptr memprof_malloc_usable_size(const void *ptr, uptr pc, uptr bp) {
6863d4bba30STeresa Johnson if (!ptr)
6873d4bba30STeresa Johnson return 0;
6883d4bba30STeresa Johnson uptr usable_size = instance.AllocationSize(reinterpret_cast<uptr>(ptr));
6893d4bba30STeresa Johnson return usable_size;
6903d4bba30STeresa Johnson }
6913d4bba30STeresa Johnson
6923d4bba30STeresa Johnson } // namespace __memprof
6933d4bba30STeresa Johnson
6943d4bba30STeresa Johnson // ---------------------- Interface ---------------- {{{1
6953d4bba30STeresa Johnson using namespace __memprof;
6963d4bba30STeresa Johnson
__sanitizer_get_estimated_allocated_size(uptr size)6973d4bba30STeresa Johnson uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }
6983d4bba30STeresa Johnson
__sanitizer_get_ownership(const void * p)6993d4bba30STeresa Johnson int __sanitizer_get_ownership(const void *p) {
7003d4bba30STeresa Johnson return memprof_malloc_usable_size(p, 0, 0) != 0;
7013d4bba30STeresa Johnson }
7023d4bba30STeresa Johnson
__sanitizer_get_allocated_size(const void * p)7033d4bba30STeresa Johnson uptr __sanitizer_get_allocated_size(const void *p) {
7043d4bba30STeresa Johnson return memprof_malloc_usable_size(p, 0, 0);
7053d4bba30STeresa Johnson }
706a75b2e87STeresa Johnson
__memprof_profile_dump()707a75b2e87STeresa Johnson int __memprof_profile_dump() {
708545866cbSSnehasish Kumar instance.FinishAndWrite();
709a75b2e87STeresa Johnson // In the future we may want to return non-zero if there are any errors
710a75b2e87STeresa Johnson // detected during the dumping process.
711a75b2e87STeresa Johnson return 0;
712a75b2e87STeresa Johnson }
713