1 //===----------------------------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8
9 #include "fallback_malloc.h"
10
11 #include <__threading_support>
12 #ifndef _LIBCXXABI_HAS_NO_THREADS
13 #if defined(__ELF__) && defined(_LIBCXXABI_LINK_PTHREAD_LIB)
14 #pragma comment(lib, "pthread")
15 #endif
16 #endif
17
18 #include <stdlib.h> // for malloc, calloc, free
19 #include <string.h> // for memset
20 #include <new> // for std::__libcpp_aligned_{alloc,free}
21
22 // A small, simple heap manager based (loosely) on
23 // the startup heap manager from FreeBSD, optimized for space.
24 //
25 // Manages a fixed-size memory pool, supports malloc and free only.
26 // No support for realloc.
27 //
28 // Allocates chunks in multiples of four bytes, with a four byte header
29 // for each chunk. The overhead of each chunk is kept low by keeping pointers
30 // as two byte offsets within the heap, rather than (4 or 8 byte) pointers.
31
32 namespace {
33
34 // When POSIX threads are not available, make the mutex operations a nop
35 #ifndef _LIBCXXABI_HAS_NO_THREADS
36 static _LIBCPP_CONSTINIT std::__libcpp_mutex_t heap_mutex = _LIBCPP_MUTEX_INITIALIZER;
37 #else
38 static _LIBCPP_CONSTINIT void* heap_mutex = 0;
39 #endif
40
41 class mutexor {
42 public:
43 #ifndef _LIBCXXABI_HAS_NO_THREADS
mutexor(std::__libcpp_mutex_t * m)44 mutexor(std::__libcpp_mutex_t* m) : mtx_(m) {
45 std::__libcpp_mutex_lock(mtx_);
46 }
~mutexor()47 ~mutexor() { std::__libcpp_mutex_unlock(mtx_); }
48 #else
49 mutexor(void*) {}
50 ~mutexor() {}
51 #endif
52 private:
53 mutexor(const mutexor& rhs);
54 mutexor& operator=(const mutexor& rhs);
55 #ifndef _LIBCXXABI_HAS_NO_THREADS
56 std::__libcpp_mutex_t* mtx_;
57 #endif
58 };
59
60 static const size_t HEAP_SIZE = 512;
61 char heap[HEAP_SIZE] __attribute__((aligned));
62
63 typedef unsigned short heap_offset;
64 typedef unsigned short heap_size;
65
66 struct heap_node {
67 heap_offset next_node; // offset into heap
68 heap_size len; // size in units of "sizeof(heap_node)"
69 };
70
71 static const heap_node* list_end =
72 (heap_node*)(&heap[HEAP_SIZE]); // one past the end of the heap
73 static heap_node* freelist = NULL;
74
node_from_offset(const heap_offset offset)75 heap_node* node_from_offset(const heap_offset offset) {
76 return (heap_node*)(heap + (offset * sizeof(heap_node)));
77 }
78
offset_from_node(const heap_node * ptr)79 heap_offset offset_from_node(const heap_node* ptr) {
80 return static_cast<heap_offset>(
81 static_cast<size_t>(reinterpret_cast<const char*>(ptr) - heap) /
82 sizeof(heap_node));
83 }
84
init_heap()85 void init_heap() {
86 freelist = (heap_node*)heap;
87 freelist->next_node = offset_from_node(list_end);
88 freelist->len = HEAP_SIZE / sizeof(heap_node);
89 }
90
91 // How big a chunk we allocate
alloc_size(size_t len)92 size_t alloc_size(size_t len) {
93 return (len + sizeof(heap_node) - 1) / sizeof(heap_node) + 1;
94 }
95
is_fallback_ptr(void * ptr)96 bool is_fallback_ptr(void* ptr) {
97 return ptr >= heap && ptr < (heap + HEAP_SIZE);
98 }
99
fallback_malloc(size_t len)100 void* fallback_malloc(size_t len) {
101 heap_node *p, *prev;
102 const size_t nelems = alloc_size(len);
103 mutexor mtx(&heap_mutex);
104
105 if (NULL == freelist)
106 init_heap();
107
108 // Walk the free list, looking for a "big enough" chunk
109 for (p = freelist, prev = 0; p && p != list_end;
110 prev = p, p = node_from_offset(p->next_node)) {
111
112 if (p->len > nelems) { // chunk is larger, shorten, and return the tail
113 heap_node* q;
114
115 p->len = static_cast<heap_size>(p->len - nelems);
116 q = p + p->len;
117 q->next_node = 0;
118 q->len = static_cast<heap_size>(nelems);
119 return (void*)(q + 1);
120 }
121
122 if (p->len == nelems) { // exact size match
123 if (prev == 0)
124 freelist = node_from_offset(p->next_node);
125 else
126 prev->next_node = p->next_node;
127 p->next_node = 0;
128 return (void*)(p + 1);
129 }
130 }
131 return NULL; // couldn't find a spot big enough
132 }
133
134 // Return the start of the next block
after(struct heap_node * p)135 heap_node* after(struct heap_node* p) { return p + p->len; }
136
fallback_free(void * ptr)137 void fallback_free(void* ptr) {
138 struct heap_node* cp = ((struct heap_node*)ptr) - 1; // retrieve the chunk
139 struct heap_node *p, *prev;
140
141 mutexor mtx(&heap_mutex);
142
143 #ifdef DEBUG_FALLBACK_MALLOC
144 std::printf("Freeing item at %d of size %d\n", offset_from_node(cp), cp->len);
145 #endif
146
147 for (p = freelist, prev = 0; p && p != list_end;
148 prev = p, p = node_from_offset(p->next_node)) {
149 #ifdef DEBUG_FALLBACK_MALLOC
150 std::printf(" p=%d, cp=%d, after(p)=%d, after(cp)=%d\n",
151 offset_from_node(p), offset_from_node(cp),
152 offset_from_node(after(p)), offset_from_node(after(cp)));
153 #endif
154 if (after(p) == cp) {
155 #ifdef DEBUG_FALLBACK_MALLOC
156 std::printf(" Appending onto chunk at %d\n", offset_from_node(p));
157 #endif
158 p->len = static_cast<heap_size>(
159 p->len + cp->len); // make the free heap_node larger
160 return;
161 } else if (after(cp) == p) { // there's a free heap_node right after
162 #ifdef DEBUG_FALLBACK_MALLOC
163 std::printf(" Appending free chunk at %d\n", offset_from_node(p));
164 #endif
165 cp->len = static_cast<heap_size>(cp->len + p->len);
166 if (prev == 0) {
167 freelist = cp;
168 cp->next_node = p->next_node;
169 } else
170 prev->next_node = offset_from_node(cp);
171 return;
172 }
173 }
174 // Nothing to merge with, add it to the start of the free list
175 #ifdef DEBUG_FALLBACK_MALLOC
176 std::printf(" Making new free list entry %d\n", offset_from_node(cp));
177 #endif
178 cp->next_node = offset_from_node(freelist);
179 freelist = cp;
180 }
181
182 #ifdef INSTRUMENT_FALLBACK_MALLOC
print_free_list()183 size_t print_free_list() {
184 struct heap_node *p, *prev;
185 heap_size total_free = 0;
186 if (NULL == freelist)
187 init_heap();
188
189 for (p = freelist, prev = 0; p && p != list_end;
190 prev = p, p = node_from_offset(p->next_node)) {
191 std::printf("%sOffset: %d\tsize: %d Next: %d\n",
192 (prev == 0 ? "" : " "), offset_from_node(p), p->len, p->next_node);
193 total_free += p->len;
194 }
195 std::printf("Total Free space: %d\n", total_free);
196 return total_free;
197 }
198 #endif
199 } // end unnamed namespace
200
201 namespace __cxxabiv1 {
202
203 struct __attribute__((aligned)) __aligned_type {};
204
__aligned_malloc_with_fallback(size_t size)205 void* __aligned_malloc_with_fallback(size_t size) {
206 #if defined(_WIN32)
207 if (void* dest = std::__libcpp_aligned_alloc(alignof(__aligned_type), size))
208 return dest;
209 #elif defined(_LIBCPP_HAS_NO_LIBRARY_ALIGNED_ALLOCATION)
210 if (void* dest = ::malloc(size))
211 return dest;
212 #else
213 if (size == 0)
214 size = 1;
215 if (void* dest = std::__libcpp_aligned_alloc(__alignof(__aligned_type), size))
216 return dest;
217 #endif
218 return fallback_malloc(size);
219 }
220
__calloc_with_fallback(size_t count,size_t size)221 void* __calloc_with_fallback(size_t count, size_t size) {
222 void* ptr = ::calloc(count, size);
223 if (NULL != ptr)
224 return ptr;
225 // if calloc fails, fall back to emergency stash
226 ptr = fallback_malloc(size * count);
227 if (NULL != ptr)
228 ::memset(ptr, 0, size * count);
229 return ptr;
230 }
231
__aligned_free_with_fallback(void * ptr)232 void __aligned_free_with_fallback(void* ptr) {
233 if (is_fallback_ptr(ptr))
234 fallback_free(ptr);
235 else {
236 #if defined(_LIBCPP_HAS_NO_LIBRARY_ALIGNED_ALLOCATION)
237 ::free(ptr);
238 #else
239 std::__libcpp_aligned_free(ptr);
240 #endif
241 }
242 }
243
__free_with_fallback(void * ptr)244 void __free_with_fallback(void* ptr) {
245 if (is_fallback_ptr(ptr))
246 fallback_free(ptr);
247 else
248 ::free(ptr);
249 }
250
251 } // namespace __cxxabiv1
252