1 //===------------------------ fallback_malloc.cpp -------------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is dual licensed under the MIT and the University of Illinois Open
6 // Source Licenses. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 
10 #include "fallback_malloc.h"
11 
12 #include "config.h"
13 #include "threading_support.h"
14 
15 #include <cstdlib> // for malloc, calloc, free
16 #include <cstring> // for memset
17 
18 //  A small, simple heap manager based (loosely) on
19 //  the startup heap manager from FreeBSD, optimized for space.
20 //
21 //  Manages a fixed-size memory pool, supports malloc and free only.
22 //  No support for realloc.
23 //
24 //  Allocates chunks in multiples of four bytes, with a four byte header
25 //  for each chunk. The overhead of each chunk is kept low by keeping pointers
26 //  as two byte offsets within the heap, rather than (4 or 8 byte) pointers.
27 
28 namespace {
29 
30 // When POSIX threads are not available, make the mutex operations a nop
31 #ifndef _LIBCXXABI_HAS_NO_THREADS
32 static __libcxxabi_mutex_t heap_mutex = _LIBCXXABI_MUTEX_INITIALIZER;
33 #else
34 static void * heap_mutex = 0;
35 #endif
36 
37 class mutexor {
38 public:
39 #ifndef _LIBCXXABI_HAS_NO_THREADS
40     mutexor ( __libcxxabi_mutex_t *m ) : mtx_(m) { __libcxxabi_mutex_lock ( mtx_ ); }
41     ~mutexor () { __libcxxabi_mutex_unlock ( mtx_ ); }
42 #else
43     mutexor ( void * ) {}
44     ~mutexor () {}
45 #endif
46 private:
47     mutexor ( const mutexor &rhs );
48     mutexor & operator = ( const mutexor &rhs );
49 #ifndef _LIBCXXABI_HAS_NO_THREADS
50     __libcxxabi_mutex_t *mtx_;
51 #endif
52     };
53 
54 
55 static const size_t HEAP_SIZE = 512;
56 char heap [ HEAP_SIZE ] __attribute__((aligned));
57 
58 typedef unsigned short heap_offset;
59 typedef unsigned short heap_size;
60 
61 struct heap_node {
62     heap_offset next_node;  // offset into heap
63     heap_size   len;        // size in units of "sizeof(heap_node)"
64 };
65 
66 static const heap_node *list_end = (heap_node *) ( &heap [ HEAP_SIZE ] );   // one past the end of the heap
67 static heap_node *freelist = NULL;
68 
69 heap_node *node_from_offset ( const heap_offset offset )
70     { return (heap_node *) ( heap + ( offset * sizeof (heap_node))); }
71 
72 heap_offset offset_from_node ( const heap_node *ptr )
73     { return static_cast<heap_offset>(static_cast<size_t>(reinterpret_cast<const char *>(ptr) - heap)  / sizeof (heap_node)); }
74 
75 void init_heap () {
76     freelist = (heap_node *) heap;
77     freelist->next_node = offset_from_node ( list_end );
78     freelist->len = HEAP_SIZE / sizeof (heap_node);
79     }
80 
81 //  How big a chunk we allocate
82 size_t alloc_size (size_t len)
83     { return (len + sizeof(heap_node) - 1) / sizeof(heap_node) + 1; }
84 
85 bool is_fallback_ptr ( void *ptr )
86     { return ptr >= heap && ptr < ( heap + HEAP_SIZE ); }
87 
88 void *fallback_malloc(size_t len) {
89     heap_node *p, *prev;
90     const size_t nelems = alloc_size ( len );
91     mutexor mtx ( &heap_mutex );
92 
93     if ( NULL == freelist )
94         init_heap ();
95 
96 //  Walk the free list, looking for a "big enough" chunk
97     for (p = freelist, prev = 0;
98             p && p != list_end;     prev = p, p = node_from_offset ( p->next_node)) {
99 
100         if (p->len > nelems) {  //  chunk is larger, shorten, and return the tail
101             heap_node *q;
102 
103             p->len = static_cast<heap_size>(p->len - nelems);
104             q = p + p->len;
105             q->next_node = 0;
106             q->len = static_cast<heap_size>(nelems);
107             return (void *) (q + 1);
108         }
109 
110         if (p->len == nelems) { // exact size match
111             if (prev == 0)
112                 freelist = node_from_offset(p->next_node);
113             else
114                 prev->next_node = p->next_node;
115             p->next_node = 0;
116             return (void *) (p + 1);
117         }
118     }
119     return NULL;    // couldn't find a spot big enough
120 }
121 
122 //  Return the start of the next block
123 heap_node *after ( struct heap_node *p ) { return p + p->len; }
124 
125 void fallback_free (void *ptr) {
126     struct heap_node *cp = ((struct heap_node *) ptr) - 1;      // retrieve the chunk
127     struct heap_node *p, *prev;
128 
129     mutexor mtx ( &heap_mutex );
130 
131 #ifdef DEBUG_FALLBACK_MALLOC
132         std::cout << "Freeing item at " << offset_from_node ( cp ) << " of size " << cp->len << std::endl;
133 #endif
134 
135     for (p = freelist, prev = 0;
136             p && p != list_end;     prev = p, p = node_from_offset (p->next_node)) {
137 #ifdef DEBUG_FALLBACK_MALLOC
138         std::cout << "  p, cp, after (p), after(cp) "
139             << offset_from_node ( p ) << ' '
140             << offset_from_node ( cp ) << ' '
141             << offset_from_node ( after ( p )) << ' '
142             << offset_from_node ( after ( cp )) << std::endl;
143 #endif
144         if ( after ( p ) == cp ) {
145 #ifdef DEBUG_FALLBACK_MALLOC
146             std::cout << "  Appending onto chunk at " << offset_from_node ( p ) << std::endl;
147 #endif
148             p->len = static_cast<heap_size>(p->len + cp->len);  // make the free heap_node larger
149             return;
150             }
151         else if ( after ( cp ) == p ) { // there's a free heap_node right after
152 #ifdef DEBUG_FALLBACK_MALLOC
153             std::cout << "  Appending free chunk at " << offset_from_node ( p ) << std::endl;
154 #endif
155             cp->len = static_cast<heap_size>(cp->len + p->len);
156             if ( prev == 0 ) {
157                 freelist = cp;
158                 cp->next_node = p->next_node;
159                 }
160             else
161                 prev->next_node = offset_from_node(cp);
162             return;
163             }
164         }
165 //  Nothing to merge with, add it to the start of the free list
166 #ifdef DEBUG_FALLBACK_MALLOC
167             std::cout << "  Making new free list entry " << offset_from_node ( cp ) << std::endl;
168 #endif
169     cp->next_node = offset_from_node ( freelist );
170     freelist = cp;
171 }
172 
173 #ifdef INSTRUMENT_FALLBACK_MALLOC
174 size_t print_free_list () {
175     struct heap_node *p, *prev;
176     heap_size total_free = 0;
177     if ( NULL == freelist )
178         init_heap ();
179 
180     for (p = freelist, prev = 0;
181             p && p != list_end;     prev = p, p = node_from_offset (p->next_node)) {
182         std::cout << ( prev == 0 ? "" : "  ")  << "Offset: " << offset_from_node ( p )
183                 << "\tsize: " << p->len << " Next: " << p->next_node << std::endl;
184         total_free += p->len;
185         }
186     std::cout << "Total Free space: " << total_free << std::endl;
187     return total_free;
188     }
189 #endif
190 }  // end unnamed namespace
191 
192 namespace __cxxabiv1 {
193 
194 #pragma GCC visibility push(hidden)
195 
196 void * __malloc_with_fallback(size_t size) {
197     void *ptr = std::malloc(size);
198     if (NULL == ptr) // if malloc fails, fall back to emergency stash
199         ptr = fallback_malloc(size);
200     return ptr;
201 }
202 
203 void * __calloc_with_fallback(size_t count, size_t size) {
204     void *ptr = std::calloc(count, size);
205     if (NULL != ptr)
206         return ptr;
207     // if calloc fails, fall back to emergency stash
208     ptr = fallback_malloc(size * count);
209     if (NULL != ptr)
210         std::memset(ptr, 0, size * count);
211     return ptr;
212 }
213 
214 void __free_with_fallback(void *ptr) {
215     if (is_fallback_ptr(ptr))
216         fallback_free(ptr);
217     else
218         std::free(ptr);
219 }
220 
221 #pragma GCC visibility pop
222 
223 } // namespace __cxxabiv1
224