1 /*
2 Copyright (c) 2001 Wolfram Gloger
3 Copyright (c) 2006 Cavium networks
4
5 Permission to use, copy, modify, distribute, and sell this software
6 and its documentation for any purpose is hereby granted without fee,
7 provided that (i) the above copyright notices and this permission
8 notice appear in all copies of the software and related documentation,
9 and (ii) the name of Wolfram Gloger may not be used in any advertising
10 or publicity relating to the software.
11
12 THE SOFTWARE IS PROVIDED "AS-IS" AND WITHOUT WARRANTY OF ANY KIND,
13 EXPRESS, IMPLIED OR OTHERWISE, INCLUDING WITHOUT LIMITATION, ANY
14 WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
15
16 IN NO EVENT SHALL WOLFRAM GLOGER BE LIABLE FOR ANY SPECIAL,
17 INCIDENTAL, INDIRECT OR CONSEQUENTIAL DAMAGES OF ANY KIND, OR ANY
18 DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
19 WHETHER OR NOT ADVISED OF THE POSSIBILITY OF DAMAGE, AND ON ANY THEORY
20 OF LIABILITY, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
21 PERFORMANCE OF THIS SOFTWARE.
22 */
23
24 /* $Id: arena.c 30481 2007-12-05 21:46:59Z rfranz $ */
25
26 /* Compile-time constants. */
27
28 #define HEAP_MIN_SIZE (4096) /* Must leave room for struct malloc_state, arena ptrs, etc., totals about 2400 bytes */
29
30 #ifndef THREAD_STATS
31 #define THREAD_STATS 0
32 #endif
33
34 /* If THREAD_STATS is non-zero, some statistics on mutex locking are
35 computed. */
36
37 /***************************************************************************/
38
39 // made static to avoid conflicts with newlib
40 static mstate _int_new_arena __MALLOC_P ((size_t __ini_size));
41
42 /***************************************************************************/
43
44 #define top(ar_ptr) ((ar_ptr)->top)
45
46 /* A heap is a single contiguous memory region holding (coalesceable)
47 malloc_chunks. Not used unless compiling with
48 USE_ARENAS. */
49
50 typedef struct _heap_info {
51 mstate ar_ptr; /* Arena for this heap. */
52 struct _heap_info *prev; /* Previous heap. */
53 size_t size; /* Current size in bytes. */
54 size_t pad; /* Make sure the following data is properly aligned. */
55 } heap_info;
56
57 /* Thread specific data */
58
59 static tsd_key_t arena_key; // one per PP (thread)
60 static CVMX_SHARED mutex_t list_lock; // shared...
61
62 #if THREAD_STATS
63 static int stat_n_heaps;
64 #define THREAD_STAT(x) x
65 #else
66 #define THREAD_STAT(x) do ; while(0)
67 #endif
68
69 /* Mapped memory in non-main arenas (reliable only for NO_THREADS). */
70 static unsigned long arena_mem;
71
72 /* Already initialized? */
73 int CVMX_SHARED cvmx__malloc_initialized = -1;
74
75 /**************************************************************************/
76
77 #if USE_ARENAS
78
79 /* find the heap and corresponding arena for a given ptr */
80
81 #define arena_for_chunk(ptr) ((ptr)->arena_ptr)
82 #define set_arena_for_chunk(ptr, arena) (ptr)->arena_ptr = (arena)
83
84
85 #endif /* USE_ARENAS */
86
87 /**************************************************************************/
88
89 #ifndef NO_THREADS
90
91 /* atfork support. */
92
93 static __malloc_ptr_t (*save_malloc_hook) __MALLOC_P ((size_t __size,
94 __const __malloc_ptr_t));
95 static void (*save_free_hook) __MALLOC_P ((__malloc_ptr_t __ptr,
96 __const __malloc_ptr_t));
97 static Void_t* save_arena;
98
99 /* Magic value for the thread-specific arena pointer when
100 malloc_atfork() is in use. */
101
102 #define ATFORK_ARENA_PTR ((Void_t*)-1)
103
104 /* The following hooks are used while the `atfork' handling mechanism
105 is active. */
106
107 static Void_t*
malloc_atfork(size_t sz,const Void_t * caller)108 malloc_atfork(size_t sz, const Void_t *caller)
109 {
110 return(NULL);
111 }
112
113 static void
free_atfork(Void_t * mem,const Void_t * caller)114 free_atfork(Void_t* mem, const Void_t *caller)
115 {
116 Void_t *vptr = NULL;
117 mstate ar_ptr;
118 mchunkptr p; /* chunk corresponding to mem */
119
120 if (mem == 0) /* free(0) has no effect */
121 return;
122
123 p = mem2chunk(mem); /* do not bother to replicate free_check here */
124
125 #if HAVE_MMAP
126 if (chunk_is_mmapped(p)) /* release mmapped memory. */
127 {
128 munmap_chunk(p);
129 return;
130 }
131 #endif
132
133 ar_ptr = arena_for_chunk(p);
134 tsd_getspecific(arena_key, vptr);
135 if(vptr != ATFORK_ARENA_PTR)
136 (void)mutex_lock(&ar_ptr->mutex);
137 _int_free(ar_ptr, mem);
138 if(vptr != ATFORK_ARENA_PTR)
139 (void)mutex_unlock(&ar_ptr->mutex);
140 }
141
142
143
144 #ifdef __linux__
145 #error __linux__defined!
146 #endif
147
148 #endif /* !defined NO_THREADS */
149
150
151
152 /* Initialization routine. */
153 #ifdef _LIBC
154 #error _LIBC is defined, and should not be
155 #endif /* _LIBC */
156
157 static CVMX_SHARED cvmx_spinlock_t malloc_init_spin_lock;
158
159
160
161
162 /* Managing heaps and arenas (for concurrent threads) */
163
164 #if USE_ARENAS
165
166 #if MALLOC_DEBUG > 1
167
168 /* Print the complete contents of a single heap to stderr. */
169
170 static void
171 #if __STD_C
dump_heap(heap_info * heap)172 dump_heap(heap_info *heap)
173 #else
174 dump_heap(heap) heap_info *heap;
175 #endif
176 {
177 char *ptr;
178 mchunkptr p;
179
180 fprintf(stderr, "Heap %p, size %10lx:\n", heap, (long)heap->size);
181 ptr = (heap->ar_ptr != (mstate)(heap+1)) ?
182 (char*)(heap + 1) : (char*)(heap + 1) + sizeof(struct malloc_state);
183 p = (mchunkptr)(((unsigned long)ptr + MALLOC_ALIGN_MASK) &
184 ~MALLOC_ALIGN_MASK);
185 for(;;) {
186 fprintf(stderr, "chunk %p size %10lx", p, (long)p->size);
187 if(p == top(heap->ar_ptr)) {
188 fprintf(stderr, " (top)\n");
189 break;
190 } else if(p->size == (0|PREV_INUSE)) {
191 fprintf(stderr, " (fence)\n");
192 break;
193 }
194 fprintf(stderr, "\n");
195 p = next_chunk(p);
196 }
197 }
198
199 #endif /* MALLOC_DEBUG > 1 */
200 /* Delete a heap. */
201
202
cvmx_new_arena(void * addr,size_t size)203 static mstate cvmx_new_arena(void *addr, size_t size)
204 {
205 mstate a;
206 heap_info *h;
207 char *ptr;
208 unsigned long misalign;
209 int page_mask = malloc_getpagesize - 1;
210
211 debug_printf("cvmx_new_arena called, addr: %p, size %ld\n", addr, size);
212 debug_printf("heapinfo size: %ld, mstate size: %d\n", sizeof(heap_info), sizeof(struct malloc_state));
213
214 if (!addr || (size < HEAP_MIN_SIZE))
215 {
216 return(NULL);
217 }
218 /* We must zero out the arena as the malloc code assumes this. */
219 memset(addr, 0, size);
220
221 h = (heap_info *)addr;
222 h->size = size;
223
224 a = h->ar_ptr = (mstate)(h+1);
225 malloc_init_state(a);
226 /*a->next = NULL;*/
227 a->system_mem = a->max_system_mem = h->size;
228 arena_mem += h->size;
229 a->next = a;
230
231 /* Set up the top chunk, with proper alignment. */
232 ptr = (char *)(a + 1);
233 misalign = (unsigned long)chunk2mem(ptr) & MALLOC_ALIGN_MASK;
234 if (misalign > 0)
235 ptr += MALLOC_ALIGNMENT - misalign;
236 top(a) = (mchunkptr)ptr;
237 set_head(top(a), (((char*)h + h->size) - ptr) | PREV_INUSE);
238
239 return a;
240 }
241
242
cvmx_add_arena(cvmx_arena_list_t * arena_list,void * ptr,size_t size)243 int cvmx_add_arena(cvmx_arena_list_t *arena_list, void *ptr, size_t size)
244 {
245 mstate a;
246
247 /* Enforce required alignement, and adjust size */
248 int misaligned = ((size_t)ptr) & (MALLOC_ALIGNMENT - 1);
249 if (misaligned)
250 {
251 ptr = (char*)ptr + MALLOC_ALIGNMENT - misaligned;
252 size -= MALLOC_ALIGNMENT - misaligned;
253 }
254
255 debug_printf("Adding arena at addr: %p, size %d\n", ptr, size);
256
257 a = cvmx_new_arena(ptr, size); /* checks ptr and size */
258 if (!a)
259 {
260 return(-1);
261 }
262
263 debug_printf("cmvx_add_arena - arena_list: %p, *arena_list: %p\n", arena_list, *arena_list);
264 debug_printf("cmvx_add_arena - list: %p, new: %p\n", *arena_list, a);
265 mutex_init(&a->mutex);
266 mutex_lock(&a->mutex);
267
268
269 if (*arena_list)
270 {
271 mstate ar_ptr = *arena_list;
272 (void)mutex_lock(&ar_ptr->mutex);
273 a->next = ar_ptr->next; // lock held on a and ar_ptr
274 ar_ptr->next = a;
275 (void)mutex_unlock(&ar_ptr->mutex);
276 }
277 else
278 {
279 *arena_list = a;
280 // a->next = a;
281 }
282
283 debug_printf("cvmx_add_arena - list: %p, list->next: %p\n", *arena_list, ((mstate)*arena_list)->next);
284
285 // unlock, since it is not going to be used immediately
286 (void)mutex_unlock(&a->mutex);
287
288 return(0);
289 }
290
291
292
293 #endif /* USE_ARENAS */
294