1 //===------------------------ fallback_malloc.cpp -------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 // Define _LIBCPP_BUILDING_LIBRARY to ensure _LIBCPP_HAS_NO_LIBRARY_ALIGNED_ALLOCATION
10 // is only defined when libc aligned allocation is not available.
11 #define _LIBCPP_BUILDING_LIBRARY
12 #include "fallback_malloc.h"
13 
14 #include <__threading_support>
15 #ifndef _LIBCXXABI_HAS_NO_THREADS
16 #if defined(__ELF__) && defined(_LIBCXXABI_LINK_PTHREAD_LIB)
17 #pragma comment(lib, "pthread")
18 #endif
19 #endif
20 
21 #include <stdlib.h> // for malloc, calloc, free
22 #include <string.h> // for memset
23 
24 //  A small, simple heap manager based (loosely) on
25 //  the startup heap manager from FreeBSD, optimized for space.
26 //
27 //  Manages a fixed-size memory pool, supports malloc and free only.
28 //  No support for realloc.
29 //
30 //  Allocates chunks in multiples of four bytes, with a four byte header
31 //  for each chunk. The overhead of each chunk is kept low by keeping pointers
32 //  as two byte offsets within the heap, rather than (4 or 8 byte) pointers.
33 
34 namespace {
35 
36 // When POSIX threads are not available, make the mutex operations a nop
37 #ifndef _LIBCXXABI_HAS_NO_THREADS
38 _LIBCPP_SAFE_STATIC
39 static std::__libcpp_mutex_t heap_mutex = _LIBCPP_MUTEX_INITIALIZER;
40 #else
41 static void* heap_mutex = 0;
42 #endif
43 
44 class mutexor {
45 public:
46 #ifndef _LIBCXXABI_HAS_NO_THREADS
47   mutexor(std::__libcpp_mutex_t* m) : mtx_(m) {
48     std::__libcpp_mutex_lock(mtx_);
49   }
50   ~mutexor() { std::__libcpp_mutex_unlock(mtx_); }
51 #else
52   mutexor(void*) {}
53   ~mutexor() {}
54 #endif
55 private:
56   mutexor(const mutexor& rhs);
57   mutexor& operator=(const mutexor& rhs);
58 #ifndef _LIBCXXABI_HAS_NO_THREADS
59   std::__libcpp_mutex_t* mtx_;
60 #endif
61 };
62 
63 static const size_t HEAP_SIZE = 512;
64 char heap[HEAP_SIZE] __attribute__((aligned));
65 
66 typedef unsigned short heap_offset;
67 typedef unsigned short heap_size;
68 
69 struct heap_node {
70   heap_offset next_node; // offset into heap
71   heap_size len;         // size in units of "sizeof(heap_node)"
72 };
73 
74 static const heap_node* list_end =
75     (heap_node*)(&heap[HEAP_SIZE]); // one past the end of the heap
76 static heap_node* freelist = NULL;
77 
78 heap_node* node_from_offset(const heap_offset offset) {
79   return (heap_node*)(heap + (offset * sizeof(heap_node)));
80 }
81 
82 heap_offset offset_from_node(const heap_node* ptr) {
83   return static_cast<heap_offset>(
84       static_cast<size_t>(reinterpret_cast<const char*>(ptr) - heap) /
85       sizeof(heap_node));
86 }
87 
88 void init_heap() {
89   freelist = (heap_node*)heap;
90   freelist->next_node = offset_from_node(list_end);
91   freelist->len = HEAP_SIZE / sizeof(heap_node);
92 }
93 
94 //  How big a chunk we allocate
95 size_t alloc_size(size_t len) {
96   return (len + sizeof(heap_node) - 1) / sizeof(heap_node) + 1;
97 }
98 
99 bool is_fallback_ptr(void* ptr) {
100   return ptr >= heap && ptr < (heap + HEAP_SIZE);
101 }
102 
103 void* fallback_malloc(size_t len) {
104   heap_node *p, *prev;
105   const size_t nelems = alloc_size(len);
106   mutexor mtx(&heap_mutex);
107 
108   if (NULL == freelist)
109     init_heap();
110 
111   //  Walk the free list, looking for a "big enough" chunk
112   for (p = freelist, prev = 0; p && p != list_end;
113        prev = p, p = node_from_offset(p->next_node)) {
114 
115     if (p->len > nelems) { //  chunk is larger, shorten, and return the tail
116       heap_node* q;
117 
118       p->len = static_cast<heap_size>(p->len - nelems);
119       q = p + p->len;
120       q->next_node = 0;
121       q->len = static_cast<heap_size>(nelems);
122       return (void*)(q + 1);
123     }
124 
125     if (p->len == nelems) { // exact size match
126       if (prev == 0)
127         freelist = node_from_offset(p->next_node);
128       else
129         prev->next_node = p->next_node;
130       p->next_node = 0;
131       return (void*)(p + 1);
132     }
133   }
134   return NULL; // couldn't find a spot big enough
135 }
136 
137 //  Return the start of the next block
138 heap_node* after(struct heap_node* p) { return p + p->len; }
139 
140 void fallback_free(void* ptr) {
141   struct heap_node* cp = ((struct heap_node*)ptr) - 1; // retrieve the chunk
142   struct heap_node *p, *prev;
143 
144   mutexor mtx(&heap_mutex);
145 
146 #ifdef DEBUG_FALLBACK_MALLOC
147   std::printf("Freeing item at %d of size %d\n", offset_from_node(cp), cp->len);
148 #endif
149 
150   for (p = freelist, prev = 0; p && p != list_end;
151        prev = p, p = node_from_offset(p->next_node)) {
152 #ifdef DEBUG_FALLBACK_MALLOC
153     std::printf("  p=%d, cp=%d, after(p)=%d, after(cp)=%d\n",
154       offset_from_node(p), offset_from_node(cp),
155       offset_from_node(after(p)), offset_from_node(after(cp)));
156 #endif
157     if (after(p) == cp) {
158 #ifdef DEBUG_FALLBACK_MALLOC
159       std::printf("  Appending onto chunk at %d\n", offset_from_node(p));
160 #endif
161       p->len = static_cast<heap_size>(
162           p->len + cp->len); // make the free heap_node larger
163       return;
164     } else if (after(cp) == p) { // there's a free heap_node right after
165 #ifdef DEBUG_FALLBACK_MALLOC
166       std::printf("  Appending free chunk at %d\n", offset_from_node(p));
167 #endif
168       cp->len = static_cast<heap_size>(cp->len + p->len);
169       if (prev == 0) {
170         freelist = cp;
171         cp->next_node = p->next_node;
172       } else
173         prev->next_node = offset_from_node(cp);
174       return;
175     }
176   }
177 //  Nothing to merge with, add it to the start of the free list
178 #ifdef DEBUG_FALLBACK_MALLOC
179   std::printf("  Making new free list entry %d\n", offset_from_node(cp));
180 #endif
181   cp->next_node = offset_from_node(freelist);
182   freelist = cp;
183 }
184 
185 #ifdef INSTRUMENT_FALLBACK_MALLOC
186 size_t print_free_list() {
187   struct heap_node *p, *prev;
188   heap_size total_free = 0;
189   if (NULL == freelist)
190     init_heap();
191 
192   for (p = freelist, prev = 0; p && p != list_end;
193        prev = p, p = node_from_offset(p->next_node)) {
194     std::printf("%sOffset: %d\tsize: %d Next: %d\n",
195       (prev == 0 ? "" : "  "), offset_from_node(p), p->len, p->next_node);
196     total_free += p->len;
197   }
198   std::printf("Total Free space: %d\n", total_free);
199   return total_free;
200 }
201 #endif
202 } // end unnamed namespace
203 
204 namespace __cxxabiv1 {
205 
206 struct __attribute__((aligned)) __aligned_type {};
207 
208 void* __aligned_malloc_with_fallback(size_t size) {
209 #if defined(_WIN32)
210   if (void* dest = _aligned_malloc(size, alignof(__aligned_type)))
211     return dest;
212 #elif defined(_LIBCPP_HAS_NO_LIBRARY_ALIGNED_ALLOCATION)
213   if (void* dest = ::malloc(size))
214     return dest;
215 #else
216   if (size == 0)
217     size = 1;
218   void* dest;
219   if (::posix_memalign(&dest, __alignof(__aligned_type), size) == 0)
220     return dest;
221 #endif
222   return fallback_malloc(size);
223 }
224 
225 void* __calloc_with_fallback(size_t count, size_t size) {
226   void* ptr = ::calloc(count, size);
227   if (NULL != ptr)
228     return ptr;
229   // if calloc fails, fall back to emergency stash
230   ptr = fallback_malloc(size * count);
231   if (NULL != ptr)
232     ::memset(ptr, 0, size * count);
233   return ptr;
234 }
235 
236 void __aligned_free_with_fallback(void* ptr) {
237   if (is_fallback_ptr(ptr))
238     fallback_free(ptr);
239   else {
240 #if defined(_WIN32)
241     ::_aligned_free(ptr);
242 #else
243     ::free(ptr);
244 #endif
245   }
246 }
247 
248 void __free_with_fallback(void* ptr) {
249   if (is_fallback_ptr(ptr))
250     fallback_free(ptr);
251   else
252     ::free(ptr);
253 }
254 
255 } // namespace __cxxabiv1
256