1 /*
2 * This module derived from code donated to the FreeBSD Project by
3 * Matthew Dillon <[email protected]>
4 *
5 * Copyright (c) 1998 The FreeBSD Project
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32
33 #include <sys/param.h>
34
35 /*
36 * LIB/MEMORY/ZALLOC.C - self contained low-overhead memory pool/allocation
37 * subsystem
38 *
39 * This subsystem implements memory pools and memory allocation
40 * routines.
41 *
42 * Pools are managed via a linked list of 'free' areas. Allocating
43 * memory creates holes in the freelist, freeing memory fills them.
44 * Since the freelist consists only of free memory areas, it is possible
45 * to allocate the entire pool without incuring any structural overhead.
46 *
47 * The system works best when allocating similarly-sized chunks of
48 * memory. Care must be taken to avoid fragmentation when
49 * allocating/deallocating dissimilar chunks.
50 *
51 * When a memory pool is first allocated, the entire pool is marked as
52 * allocated. This is done mainly because we do not want to modify any
53 * portion of a pool's data area until we are given permission. The
54 * caller must explicitly deallocate portions of the pool to make them
55 * available.
56 *
57 * z[n]xalloc() works like z[n]alloc() but the allocation is made from
58 * within the specified address range. If the segment could not be
59 * allocated, NULL is returned. WARNING! The address range will be
60 * aligned to an 8 or 16 byte boundry depending on the cpu so if you
61 * give an unaligned address range, unexpected results may occur.
62 *
63 * If a standard allocation fails, the reclaim function will be called
64 * to recover some space. This usually causes other portions of the
65 * same pool to be released. Memory allocations at this low level
66 * should not block but you can do that too in your reclaim function
67 * if you want. Reclaim does not function when z[n]xalloc() is used,
68 * only for z[n]alloc().
69 *
70 * Allocation and frees of 0 bytes are valid operations.
71 */
72
73 #include "zalloc_defs.h"
74
75 /*
76 * Objects in the pool must be aligned to at least the size of struct MemNode.
77 * They must also be aligned to MALLOCALIGN, which should normally be larger
78 * than the struct, so assert that to be so at compile time.
79 */
80 typedef char assert_align[(sizeof(struct MemNode) <= MALLOCALIGN) ? 1 : -1];
81
82 #define MEMNODE_SIZE_MASK MALLOCALIGN_MASK
83
84 /*
85 * znalloc() - allocate memory (without zeroing) from pool. Call reclaim
86 * and retry if appropriate, return NULL if unable to allocate
87 * memory.
88 */
89
90 void *
znalloc(MemPool * mp,uintptr_t bytes,size_t align)91 znalloc(MemPool *mp, uintptr_t bytes, size_t align)
92 {
93 MemNode **pmn;
94 MemNode *mn;
95
96 /*
97 * align according to pool object size (can be 0). This is
98 * inclusive of the MEMNODE_SIZE_MASK minimum alignment.
99 *
100 */
101 bytes = (bytes + MEMNODE_SIZE_MASK) & ~MEMNODE_SIZE_MASK;
102
103 if (bytes == 0)
104 return ((void *)-1);
105
106 /*
107 * locate freelist entry big enough to hold the object. If all objects
108 * are the same size, this is a constant-time function.
109 */
110
111 if (bytes > mp->mp_Size - mp->mp_Used)
112 return (NULL);
113
114 for (pmn = &mp->mp_First; (mn = *pmn) != NULL; pmn = &mn->mr_Next) {
115 char *ptr = (char *)mn;
116 uintptr_t dptr;
117 char *aligned;
118 size_t extra;
119
120 dptr = (uintptr_t)(ptr + MALLOCALIGN); /* pointer to data */
121 aligned = (char *)(roundup2(dptr, align) - MALLOCALIGN);
122 extra = aligned - ptr;
123
124 if (bytes + extra > mn->mr_Bytes)
125 continue;
126
127 /*
128 * Cut extra from head and create new memory node from reminder.
129 */
130
131 if (extra != 0) {
132 MemNode *new;
133
134 new = (MemNode *)aligned;
135 new->mr_Next = mn->mr_Next;
136 new->mr_Bytes = mn->mr_Bytes - extra;
137
138 /* And update current memory node */
139 mn->mr_Bytes = extra;
140 mn->mr_Next = new;
141 /* In next iteration, we will get our aligned address */
142 continue;
143 }
144
145 /*
146 * Cut a chunk of memory out of the beginning of this
147 * block and fixup the link appropriately.
148 */
149
150 if (mn->mr_Bytes == bytes) {
151 *pmn = mn->mr_Next;
152 } else {
153 mn = (MemNode *)((char *)mn + bytes);
154 mn->mr_Next = ((MemNode *)ptr)->mr_Next;
155 mn->mr_Bytes = ((MemNode *)ptr)->mr_Bytes - bytes;
156 *pmn = mn;
157 }
158 mp->mp_Used += bytes;
159 return(ptr);
160 }
161
162 /*
163 * Memory pool is full, return NULL.
164 */
165
166 return (NULL);
167 }
168
169 /*
170 * zfree() - free previously allocated memory
171 */
172
173 void
zfree(MemPool * mp,void * ptr,uintptr_t bytes)174 zfree(MemPool *mp, void *ptr, uintptr_t bytes)
175 {
176 MemNode **pmn;
177 MemNode *mn;
178
179 /*
180 * align according to pool object size (can be 0). This is
181 * inclusive of the MEMNODE_SIZE_MASK minimum alignment.
182 */
183 bytes = (bytes + MEMNODE_SIZE_MASK) & ~MEMNODE_SIZE_MASK;
184
185 if (bytes == 0)
186 return;
187
188 /*
189 * panic if illegal pointer
190 */
191
192 if ((char *)ptr < (char *)mp->mp_Base ||
193 (char *)ptr + bytes > (char *)mp->mp_End ||
194 ((uintptr_t)ptr & MEMNODE_SIZE_MASK) != 0)
195 panic("zfree(%p,%ju): wild pointer", ptr, (uintmax_t)bytes);
196
197 /*
198 * free the segment
199 */
200 mp->mp_Used -= bytes;
201
202 for (pmn = &mp->mp_First; (mn = *pmn) != NULL; pmn = &mn->mr_Next) {
203 /*
204 * If area between last node and current node
205 * - check range
206 * - check merge with next area
207 * - check merge with previous area
208 */
209 if ((char *)ptr <= (char *)mn) {
210 /*
211 * range check
212 */
213 if ((char *)ptr + bytes > (char *)mn) {
214 panic("zfree(%p,%ju): corrupt memlist1", ptr,
215 (uintmax_t)bytes);
216 }
217
218 /*
219 * merge against next area or create independant area
220 */
221
222 if ((char *)ptr + bytes == (char *)mn) {
223 ((MemNode *)ptr)->mr_Next = mn->mr_Next;
224 ((MemNode *)ptr)->mr_Bytes =
225 bytes + mn->mr_Bytes;
226 } else {
227 ((MemNode *)ptr)->mr_Next = mn;
228 ((MemNode *)ptr)->mr_Bytes = bytes;
229 }
230 *pmn = mn = (MemNode *)ptr;
231
232 /*
233 * merge against previous area (if there is a previous
234 * area).
235 */
236
237 if (pmn != &mp->mp_First) {
238 if ((char *)pmn + ((MemNode*)pmn)->mr_Bytes ==
239 (char *)ptr) {
240 ((MemNode *)pmn)->mr_Next = mn->mr_Next;
241 ((MemNode *)pmn)->mr_Bytes +=
242 mn->mr_Bytes;
243 mn = (MemNode *)pmn;
244 }
245 }
246 return;
247 }
248 if ((char *)ptr < (char *)mn + mn->mr_Bytes) {
249 panic("zfree(%p,%ju): corrupt memlist2", ptr,
250 (uintmax_t)bytes);
251 }
252 }
253 /*
254 * We are beyond the last MemNode, append new MemNode. Merge against
255 * previous area if possible.
256 */
257 if (pmn == &mp->mp_First ||
258 (char *)pmn + ((MemNode *)pmn)->mr_Bytes != (char *)ptr) {
259 ((MemNode *)ptr)->mr_Next = NULL;
260 ((MemNode *)ptr)->mr_Bytes = bytes;
261 *pmn = (MemNode *)ptr;
262 mn = (MemNode *)ptr;
263 } else {
264 ((MemNode *)pmn)->mr_Bytes += bytes;
265 mn = (MemNode *)pmn;
266 }
267 }
268
269 /*
270 * zextendPool() - extend memory pool to cover additional space.
271 *
272 * Note: the added memory starts out as allocated, you
273 * must free it to make it available to the memory subsystem.
274 *
275 * Note: mp_Size may not reflect (mp_End - mp_Base) range
276 * due to other parts of the system doing their own sbrk()
277 * calls.
278 */
279
280 void
zextendPool(MemPool * mp,void * base,uintptr_t bytes)281 zextendPool(MemPool *mp, void *base, uintptr_t bytes)
282 {
283 if (mp->mp_Size == 0) {
284 mp->mp_Base = base;
285 mp->mp_Used = bytes;
286 mp->mp_End = (char *)base + bytes;
287 mp->mp_Size = bytes;
288 } else {
289 void *pend = (char *)mp->mp_Base + mp->mp_Size;
290
291 if (base < mp->mp_Base) {
292 mp->mp_Size += (char *)mp->mp_Base - (char *)base;
293 mp->mp_Used += (char *)mp->mp_Base - (char *)base;
294 mp->mp_Base = base;
295 }
296 base = (char *)base + bytes;
297 if (base > pend) {
298 mp->mp_Size += (char *)base - (char *)pend;
299 mp->mp_Used += (char *)base - (char *)pend;
300 mp->mp_End = (char *)base;
301 }
302 }
303 }
304
305 #ifdef ZALLOCDEBUG
306
307 void
zallocstats(MemPool * mp)308 zallocstats(MemPool *mp)
309 {
310 int abytes = 0;
311 int hbytes = 0;
312 int fcount = 0;
313 MemNode *mn;
314
315 printf("%d bytes reserved", (int)mp->mp_Size);
316
317 mn = mp->mp_First;
318
319 if ((void *)mn != (void *)mp->mp_Base) {
320 abytes += (char *)mn - (char *)mp->mp_Base;
321 }
322
323 while (mn != NULL) {
324 if ((char *)mn + mn->mr_Bytes != mp->mp_End) {
325 hbytes += mn->mr_Bytes;
326 ++fcount;
327 }
328 if (mn->mr_Next != NULL) {
329 abytes += (char *)mn->mr_Next -
330 ((char *)mn + mn->mr_Bytes);
331 }
332 mn = mn->mr_Next;
333 }
334 printf(" %d bytes allocated\n%d fragments (%d bytes fragmented)\n",
335 abytes, fcount, hbytes);
336 }
337
338 #endif
339