1 //===------------------------ fallback_malloc.cpp -------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 // Define _LIBCPP_BUILDING_LIBRARY to ensure _LIBCPP_HAS_NO_LIBRARY_ALIGNED_ALLOCATION 10 // is only defined when libc aligned allocation is not available. 11 #define _LIBCPP_BUILDING_LIBRARY 12 #include "fallback_malloc.h" 13 14 #include <__threading_support> 15 16 #include <cstdlib> // for malloc, calloc, free 17 #include <cstring> // for memset 18 19 // A small, simple heap manager based (loosely) on 20 // the startup heap manager from FreeBSD, optimized for space. 21 // 22 // Manages a fixed-size memory pool, supports malloc and free only. 23 // No support for realloc. 24 // 25 // Allocates chunks in multiples of four bytes, with a four byte header 26 // for each chunk. The overhead of each chunk is kept low by keeping pointers 27 // as two byte offsets within the heap, rather than (4 or 8 byte) pointers. 28 29 namespace { 30 31 // When POSIX threads are not available, make the mutex operations a nop 32 #ifndef _LIBCXXABI_HAS_NO_THREADS 33 _LIBCPP_SAFE_STATIC 34 static std::__libcpp_mutex_t heap_mutex = _LIBCPP_MUTEX_INITIALIZER; 35 #else 36 static void* heap_mutex = 0; 37 #endif 38 39 class mutexor { 40 public: 41 #ifndef _LIBCXXABI_HAS_NO_THREADS 42 mutexor(std::__libcpp_mutex_t* m) : mtx_(m) { 43 std::__libcpp_mutex_lock(mtx_); 44 } 45 ~mutexor() { std::__libcpp_mutex_unlock(mtx_); } 46 #else 47 mutexor(void*) {} 48 ~mutexor() {} 49 #endif 50 private: 51 mutexor(const mutexor& rhs); 52 mutexor& operator=(const mutexor& rhs); 53 #ifndef _LIBCXXABI_HAS_NO_THREADS 54 std::__libcpp_mutex_t* mtx_; 55 #endif 56 }; 57 58 static const size_t HEAP_SIZE = 512; 59 char heap[HEAP_SIZE] __attribute__((aligned)); 60 61 typedef unsigned short heap_offset; 62 typedef unsigned short heap_size; 63 64 struct heap_node { 65 heap_offset next_node; // offset into heap 66 heap_size len; // size in units of "sizeof(heap_node)" 67 }; 68 69 static const heap_node* list_end = 70 (heap_node*)(&heap[HEAP_SIZE]); // one past the end of the heap 71 static heap_node* freelist = NULL; 72 73 heap_node* node_from_offset(const heap_offset offset) { 74 return (heap_node*)(heap + (offset * sizeof(heap_node))); 75 } 76 77 heap_offset offset_from_node(const heap_node* ptr) { 78 return static_cast<heap_offset>( 79 static_cast<size_t>(reinterpret_cast<const char*>(ptr) - heap) / 80 sizeof(heap_node)); 81 } 82 83 void init_heap() { 84 freelist = (heap_node*)heap; 85 freelist->next_node = offset_from_node(list_end); 86 freelist->len = HEAP_SIZE / sizeof(heap_node); 87 } 88 89 // How big a chunk we allocate 90 size_t alloc_size(size_t len) { 91 return (len + sizeof(heap_node) - 1) / sizeof(heap_node) + 1; 92 } 93 94 bool is_fallback_ptr(void* ptr) { 95 return ptr >= heap && ptr < (heap + HEAP_SIZE); 96 } 97 98 void* fallback_malloc(size_t len) { 99 heap_node *p, *prev; 100 const size_t nelems = alloc_size(len); 101 mutexor mtx(&heap_mutex); 102 103 if (NULL == freelist) 104 init_heap(); 105 106 // Walk the free list, looking for a "big enough" chunk 107 for (p = freelist, prev = 0; p && p != list_end; 108 prev = p, p = node_from_offset(p->next_node)) { 109 110 if (p->len > nelems) { // chunk is larger, shorten, and return the tail 111 heap_node* q; 112 113 p->len = static_cast<heap_size>(p->len - nelems); 114 q = p + p->len; 115 q->next_node = 0; 116 q->len = static_cast<heap_size>(nelems); 117 return (void*)(q + 1); 118 } 119 120 if (p->len == nelems) { // exact size match 121 if (prev == 0) 122 freelist = node_from_offset(p->next_node); 123 else 124 prev->next_node = p->next_node; 125 p->next_node = 0; 126 return (void*)(p + 1); 127 } 128 } 129 return NULL; // couldn't find a spot big enough 130 } 131 132 // Return the start of the next block 133 heap_node* after(struct heap_node* p) { return p + p->len; } 134 135 void fallback_free(void* ptr) { 136 struct heap_node* cp = ((struct heap_node*)ptr) - 1; // retrieve the chunk 137 struct heap_node *p, *prev; 138 139 mutexor mtx(&heap_mutex); 140 141 #ifdef DEBUG_FALLBACK_MALLOC 142 std::cout << "Freeing item at " << offset_from_node(cp) << " of size " 143 << cp->len << std::endl; 144 #endif 145 146 for (p = freelist, prev = 0; p && p != list_end; 147 prev = p, p = node_from_offset(p->next_node)) { 148 #ifdef DEBUG_FALLBACK_MALLOC 149 std::cout << " p, cp, after (p), after(cp) " << offset_from_node(p) << ' ' 150 << offset_from_node(cp) << ' ' << offset_from_node(after(p)) 151 << ' ' << offset_from_node(after(cp)) << std::endl; 152 #endif 153 if (after(p) == cp) { 154 #ifdef DEBUG_FALLBACK_MALLOC 155 std::cout << " Appending onto chunk at " << offset_from_node(p) 156 << std::endl; 157 #endif 158 p->len = static_cast<heap_size>( 159 p->len + cp->len); // make the free heap_node larger 160 return; 161 } else if (after(cp) == p) { // there's a free heap_node right after 162 #ifdef DEBUG_FALLBACK_MALLOC 163 std::cout << " Appending free chunk at " << offset_from_node(p) 164 << std::endl; 165 #endif 166 cp->len = static_cast<heap_size>(cp->len + p->len); 167 if (prev == 0) { 168 freelist = cp; 169 cp->next_node = p->next_node; 170 } else 171 prev->next_node = offset_from_node(cp); 172 return; 173 } 174 } 175 // Nothing to merge with, add it to the start of the free list 176 #ifdef DEBUG_FALLBACK_MALLOC 177 std::cout << " Making new free list entry " << offset_from_node(cp) 178 << std::endl; 179 #endif 180 cp->next_node = offset_from_node(freelist); 181 freelist = cp; 182 } 183 184 #ifdef INSTRUMENT_FALLBACK_MALLOC 185 size_t print_free_list() { 186 struct heap_node *p, *prev; 187 heap_size total_free = 0; 188 if (NULL == freelist) 189 init_heap(); 190 191 for (p = freelist, prev = 0; p && p != list_end; 192 prev = p, p = node_from_offset(p->next_node)) { 193 std::cout << (prev == 0 ? "" : " ") << "Offset: " << offset_from_node(p) 194 << "\tsize: " << p->len << " Next: " << p->next_node << std::endl; 195 total_free += p->len; 196 } 197 std::cout << "Total Free space: " << total_free << std::endl; 198 return total_free; 199 } 200 #endif 201 } // end unnamed namespace 202 203 namespace __cxxabiv1 { 204 205 struct __attribute__((aligned)) __aligned_type {}; 206 207 void* __aligned_malloc_with_fallback(size_t size) { 208 #if defined(_WIN32) 209 if (void* dest = _aligned_malloc(size, alignof(__aligned_type))) 210 return dest; 211 #elif defined(_LIBCPP_HAS_NO_LIBRARY_ALIGNED_ALLOCATION) 212 if (void* dest = std::malloc(size)) 213 return dest; 214 #else 215 if (size == 0) 216 size = 1; 217 void* dest; 218 if (::posix_memalign(&dest, __alignof(__aligned_type), size) == 0) 219 return dest; 220 #endif 221 return fallback_malloc(size); 222 } 223 224 void* __calloc_with_fallback(size_t count, size_t size) { 225 void* ptr = std::calloc(count, size); 226 if (NULL != ptr) 227 return ptr; 228 // if calloc fails, fall back to emergency stash 229 ptr = fallback_malloc(size * count); 230 if (NULL != ptr) 231 std::memset(ptr, 0, size * count); 232 return ptr; 233 } 234 235 void __aligned_free_with_fallback(void* ptr) { 236 if (is_fallback_ptr(ptr)) 237 fallback_free(ptr); 238 else { 239 #if defined(_WIN32) 240 ::_aligned_free(ptr); 241 #else 242 std::free(ptr); 243 #endif 244 } 245 } 246 247 void __free_with_fallback(void* ptr) { 248 if (is_fallback_ptr(ptr)) 249 fallback_free(ptr); 250 else 251 std::free(ptr); 252 } 253 254 } // namespace __cxxabiv1 255