1 //===------------------------ fallback_malloc.cpp -------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is dual licensed under the MIT and the University of Illinois Open 6 // Source Licenses. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 10 #include "fallback_malloc.h" 11 12 #include "config.h" 13 #include <__threading_support> 14 15 #include <cstdlib> // for malloc, calloc, free 16 #include <cstring> // for memset 17 18 // A small, simple heap manager based (loosely) on 19 // the startup heap manager from FreeBSD, optimized for space. 20 // 21 // Manages a fixed-size memory pool, supports malloc and free only. 22 // No support for realloc. 23 // 24 // Allocates chunks in multiples of four bytes, with a four byte header 25 // for each chunk. The overhead of each chunk is kept low by keeping pointers 26 // as two byte offsets within the heap, rather than (4 or 8 byte) pointers. 27 28 namespace { 29 30 // When POSIX threads are not available, make the mutex operations a nop 31 #ifndef _LIBCXXABI_HAS_NO_THREADS 32 _LIBCPP_SAFE_STATIC 33 static std::__libcpp_mutex_t heap_mutex = _LIBCPP_MUTEX_INITIALIZER; 34 #else 35 static void* heap_mutex = 0; 36 #endif 37 38 class mutexor { 39 public: 40 #ifndef _LIBCXXABI_HAS_NO_THREADS 41 mutexor(std::__libcpp_mutex_t* m) : mtx_(m) { 42 std::__libcpp_mutex_lock(mtx_); 43 } 44 ~mutexor() { std::__libcpp_mutex_unlock(mtx_); } 45 #else 46 mutexor(void*) {} 47 ~mutexor() {} 48 #endif 49 private: 50 mutexor(const mutexor& rhs); 51 mutexor& operator=(const mutexor& rhs); 52 #ifndef _LIBCXXABI_HAS_NO_THREADS 53 std::__libcpp_mutex_t* mtx_; 54 #endif 55 }; 56 57 static const size_t HEAP_SIZE = 512; 58 char heap[HEAP_SIZE] __attribute__((aligned)); 59 60 typedef unsigned short heap_offset; 61 typedef unsigned short heap_size; 62 63 struct heap_node { 64 heap_offset next_node; // offset into heap 65 heap_size len; // size in units of "sizeof(heap_node)" 66 }; 67 68 static const heap_node* list_end = 69 (heap_node*)(&heap[HEAP_SIZE]); // one past the end of the heap 70 static heap_node* freelist = NULL; 71 72 heap_node* node_from_offset(const heap_offset offset) { 73 return (heap_node*)(heap + (offset * sizeof(heap_node))); 74 } 75 76 heap_offset offset_from_node(const heap_node* ptr) { 77 return static_cast<heap_offset>( 78 static_cast<size_t>(reinterpret_cast<const char*>(ptr) - heap) / 79 sizeof(heap_node)); 80 } 81 82 void init_heap() { 83 freelist = (heap_node*)heap; 84 freelist->next_node = offset_from_node(list_end); 85 freelist->len = HEAP_SIZE / sizeof(heap_node); 86 } 87 88 // How big a chunk we allocate 89 size_t alloc_size(size_t len) { 90 return (len + sizeof(heap_node) - 1) / sizeof(heap_node) + 1; 91 } 92 93 bool is_fallback_ptr(void* ptr) { 94 return ptr >= heap && ptr < (heap + HEAP_SIZE); 95 } 96 97 void* fallback_malloc(size_t len) { 98 heap_node *p, *prev; 99 const size_t nelems = alloc_size(len); 100 mutexor mtx(&heap_mutex); 101 102 if (NULL == freelist) 103 init_heap(); 104 105 // Walk the free list, looking for a "big enough" chunk 106 for (p = freelist, prev = 0; p && p != list_end; 107 prev = p, p = node_from_offset(p->next_node)) { 108 109 if (p->len > nelems) { // chunk is larger, shorten, and return the tail 110 heap_node* q; 111 112 p->len = static_cast<heap_size>(p->len - nelems); 113 q = p + p->len; 114 q->next_node = 0; 115 q->len = static_cast<heap_size>(nelems); 116 return (void*)(q + 1); 117 } 118 119 if (p->len == nelems) { // exact size match 120 if (prev == 0) 121 freelist = node_from_offset(p->next_node); 122 else 123 prev->next_node = p->next_node; 124 p->next_node = 0; 125 return (void*)(p + 1); 126 } 127 } 128 return NULL; // couldn't find a spot big enough 129 } 130 131 // Return the start of the next block 132 heap_node* after(struct heap_node* p) { return p + p->len; } 133 134 void fallback_free(void* ptr) { 135 struct heap_node* cp = ((struct heap_node*)ptr) - 1; // retrieve the chunk 136 struct heap_node *p, *prev; 137 138 mutexor mtx(&heap_mutex); 139 140 #ifdef DEBUG_FALLBACK_MALLOC 141 std::cout << "Freeing item at " << offset_from_node(cp) << " of size " 142 << cp->len << std::endl; 143 #endif 144 145 for (p = freelist, prev = 0; p && p != list_end; 146 prev = p, p = node_from_offset(p->next_node)) { 147 #ifdef DEBUG_FALLBACK_MALLOC 148 std::cout << " p, cp, after (p), after(cp) " << offset_from_node(p) << ' ' 149 << offset_from_node(cp) << ' ' << offset_from_node(after(p)) 150 << ' ' << offset_from_node(after(cp)) << std::endl; 151 #endif 152 if (after(p) == cp) { 153 #ifdef DEBUG_FALLBACK_MALLOC 154 std::cout << " Appending onto chunk at " << offset_from_node(p) 155 << std::endl; 156 #endif 157 p->len = static_cast<heap_size>( 158 p->len + cp->len); // make the free heap_node larger 159 return; 160 } else if (after(cp) == p) { // there's a free heap_node right after 161 #ifdef DEBUG_FALLBACK_MALLOC 162 std::cout << " Appending free chunk at " << offset_from_node(p) 163 << std::endl; 164 #endif 165 cp->len = static_cast<heap_size>(cp->len + p->len); 166 if (prev == 0) { 167 freelist = cp; 168 cp->next_node = p->next_node; 169 } else 170 prev->next_node = offset_from_node(cp); 171 return; 172 } 173 } 174 // Nothing to merge with, add it to the start of the free list 175 #ifdef DEBUG_FALLBACK_MALLOC 176 std::cout << " Making new free list entry " << offset_from_node(cp) 177 << std::endl; 178 #endif 179 cp->next_node = offset_from_node(freelist); 180 freelist = cp; 181 } 182 183 #ifdef INSTRUMENT_FALLBACK_MALLOC 184 size_t print_free_list() { 185 struct heap_node *p, *prev; 186 heap_size total_free = 0; 187 if (NULL == freelist) 188 init_heap(); 189 190 for (p = freelist, prev = 0; p && p != list_end; 191 prev = p, p = node_from_offset(p->next_node)) { 192 std::cout << (prev == 0 ? "" : " ") << "Offset: " << offset_from_node(p) 193 << "\tsize: " << p->len << " Next: " << p->next_node << std::endl; 194 total_free += p->len; 195 } 196 std::cout << "Total Free space: " << total_free << std::endl; 197 return total_free; 198 } 199 #endif 200 } // end unnamed namespace 201 202 namespace __cxxabiv1 { 203 204 struct __attribute__((aligned)) __aligned_type {}; 205 206 void* __aligned_malloc_with_fallback(size_t size) { 207 #if defined(_WIN32) 208 if (void* dest = _aligned_malloc(size, alignof(__aligned_type))) 209 return dest; 210 #elif defined(_LIBCPP_HAS_NO_ALIGNED_ALLOCATION) 211 if (void* dest = std::malloc(size)) 212 return dest; 213 #else 214 if (size == 0) 215 size = 1; 216 void* dest; 217 if (::posix_memalign(&dest, alignof(__aligned_type), size) == 0) 218 return dest; 219 #endif 220 return fallback_malloc(size); 221 } 222 223 void* __calloc_with_fallback(size_t count, size_t size) { 224 void* ptr = std::calloc(count, size); 225 if (NULL != ptr) 226 return ptr; 227 // if calloc fails, fall back to emergency stash 228 ptr = fallback_malloc(size * count); 229 if (NULL != ptr) 230 std::memset(ptr, 0, size * count); 231 return ptr; 232 } 233 234 void __aligned_free_with_fallback(void* ptr) { 235 if (is_fallback_ptr(ptr)) 236 fallback_free(ptr); 237 else { 238 #if defined(_WIN32) 239 ::_aligned_free(ptr); 240 #else 241 std::free(ptr); 242 #endif 243 } 244 } 245 246 void __free_with_fallback(void* ptr) { 247 if (is_fallback_ptr(ptr)) 248 fallback_free(ptr); 249 else 250 std::free(ptr); 251 } 252 253 } // namespace __cxxabiv1 254