1 /*
2 * This module derived from code donated to the FreeBSD Project by
3 * Matthew Dillon <[email protected]>
4 *
5 * Copyright (c) 1998 The FreeBSD Project
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32
33 #include <sys/param.h>
34
35 /*
36 * LIB/MEMORY/ZALLOC.C - self contained low-overhead memory pool/allocation
37 * subsystem
38 *
39 * This subsystem implements memory pools and memory allocation
40 * routines.
41 *
42 * Pools are managed via a linked list of 'free' areas. Allocating
43 * memory creates holes in the freelist, freeing memory fills them.
44 * Since the freelist consists only of free memory areas, it is possible
45 * to allocate the entire pool without incuring any structural overhead.
46 *
47 * The system works best when allocating similarly-sized chunks of
48 * memory. Care must be taken to avoid fragmentation when
49 * allocating/deallocating dissimilar chunks.
50 *
51 * When a memory pool is first allocated, the entire pool is marked as
52 * allocated. This is done mainly because we do not want to modify any
53 * portion of a pool's data area until we are given permission. The
54 * caller must explicitly deallocate portions of the pool to make them
55 * available.
56 *
57 * z[n]xalloc() works like z[n]alloc() but the allocation is made from
58 * within the specified address range. If the segment could not be
59 * allocated, NULL is returned. WARNING! The address range will be
60 * aligned to an 8 or 16 byte boundry depending on the cpu so if you
61 * give an unaligned address range, unexpected results may occur.
62 *
63 * If a standard allocation fails, the reclaim function will be called
64 * to recover some space. This usually causes other portions of the
65 * same pool to be released. Memory allocations at this low level
66 * should not block but you can do that too in your reclaim function
67 * if you want. Reclaim does not function when z[n]xalloc() is used,
68 * only for z[n]alloc().
69 *
70 * Allocation and frees of 0 bytes are valid operations.
71 */
72
73 #include "zalloc_defs.h"
74
75 /*
76 * Objects in the pool must be aligned to at least the size of struct MemNode.
77 * They must also be aligned to MALLOCALIGN, which should normally be larger
78 * than the struct, so assert that to be so at compile time.
79 */
80 typedef char assert_align[(sizeof(struct MemNode) <= MALLOCALIGN) ? 1 : -1];
81
82 #define MEMNODE_SIZE_MASK MALLOCALIGN_MASK
83
84 /*
85 * znalloc() - allocate memory (without zeroing) from pool. Call reclaim
86 * and retry if appropriate, return NULL if unable to allocate
87 * memory.
88 */
89
90 void *
znalloc(MemPool * mp,uintptr_t bytes,size_t align)91 znalloc(MemPool *mp, uintptr_t bytes, size_t align)
92 {
93 MemNode **pmn;
94 MemNode *mn;
95
96 /*
97 * align according to pool object size (can be 0). This is
98 * inclusive of the MEMNODE_SIZE_MASK minimum alignment.
99 *
100 */
101 bytes = (bytes + MEMNODE_SIZE_MASK) & ~MEMNODE_SIZE_MASK;
102
103 if (bytes == 0)
104 return ((void *)-1);
105
106 /*
107 * locate freelist entry big enough to hold the object. If all objects
108 * are the same size, this is a constant-time function.
109 */
110
111 if (bytes > mp->mp_Size - mp->mp_Used)
112 return (NULL);
113
114 for (pmn = &mp->mp_First; (mn = *pmn) != NULL; pmn = &mn->mr_Next) {
115 char *ptr = (char *)mn;
116 uintptr_t dptr;
117 char *aligned;
118 size_t extra;
119
120 dptr = (uintptr_t)(ptr + MALLOCALIGN); /* pointer to data */
121 aligned = (char *)(roundup2(dptr, align) - MALLOCALIGN);
122 extra = aligned - ptr;
123
124 if (bytes + extra > mn->mr_Bytes)
125 continue;
126
127 /*
128 * Cut extra from head and create new memory node from
129 * remainder.
130 */
131
132 if (extra != 0) {
133 MemNode *new;
134
135 new = (MemNode *)aligned;
136 new->mr_Next = mn->mr_Next;
137 new->mr_Bytes = mn->mr_Bytes - extra;
138
139 /* And update current memory node */
140 mn->mr_Bytes = extra;
141 mn->mr_Next = new;
142 /* In next iteration, we will get our aligned address */
143 continue;
144 }
145
146 /*
147 * Cut a chunk of memory out of the beginning of this
148 * block and fixup the link appropriately.
149 */
150
151 if (mn->mr_Bytes == bytes) {
152 *pmn = mn->mr_Next;
153 } else {
154 mn = (MemNode *)((char *)mn + bytes);
155 mn->mr_Next = ((MemNode *)ptr)->mr_Next;
156 mn->mr_Bytes = ((MemNode *)ptr)->mr_Bytes - bytes;
157 *pmn = mn;
158 }
159 mp->mp_Used += bytes;
160 return(ptr);
161 }
162
163 /*
164 * Memory pool is full, return NULL.
165 */
166
167 return (NULL);
168 }
169
170 /*
171 * zfree() - free previously allocated memory
172 */
173
174 void
zfree(MemPool * mp,void * ptr,uintptr_t bytes)175 zfree(MemPool *mp, void *ptr, uintptr_t bytes)
176 {
177 MemNode **pmn;
178 MemNode *mn;
179
180 /*
181 * align according to pool object size (can be 0). This is
182 * inclusive of the MEMNODE_SIZE_MASK minimum alignment.
183 */
184 bytes = (bytes + MEMNODE_SIZE_MASK) & ~MEMNODE_SIZE_MASK;
185
186 if (bytes == 0)
187 return;
188
189 /*
190 * panic if illegal pointer
191 */
192
193 if ((char *)ptr < (char *)mp->mp_Base ||
194 (char *)ptr + bytes > (char *)mp->mp_End ||
195 ((uintptr_t)ptr & MEMNODE_SIZE_MASK) != 0)
196 panic("zfree(%p,%ju): wild pointer", ptr, (uintmax_t)bytes);
197
198 /*
199 * free the segment
200 */
201 mp->mp_Used -= bytes;
202
203 for (pmn = &mp->mp_First; (mn = *pmn) != NULL; pmn = &mn->mr_Next) {
204 /*
205 * If area between last node and current node
206 * - check range
207 * - check merge with next area
208 * - check merge with previous area
209 */
210 if ((char *)ptr <= (char *)mn) {
211 /*
212 * range check
213 */
214 if ((char *)ptr + bytes > (char *)mn) {
215 panic("zfree(%p,%ju): corrupt memlist1", ptr,
216 (uintmax_t)bytes);
217 }
218
219 /*
220 * merge against next area or create independant area
221 */
222
223 if ((char *)ptr + bytes == (char *)mn) {
224 ((MemNode *)ptr)->mr_Next = mn->mr_Next;
225 ((MemNode *)ptr)->mr_Bytes =
226 bytes + mn->mr_Bytes;
227 } else {
228 ((MemNode *)ptr)->mr_Next = mn;
229 ((MemNode *)ptr)->mr_Bytes = bytes;
230 }
231 *pmn = mn = (MemNode *)ptr;
232
233 /*
234 * merge against previous area (if there is a previous
235 * area).
236 */
237
238 if (pmn != &mp->mp_First) {
239 if ((char *)pmn + ((MemNode*)pmn)->mr_Bytes ==
240 (char *)ptr) {
241 ((MemNode *)pmn)->mr_Next = mn->mr_Next;
242 ((MemNode *)pmn)->mr_Bytes +=
243 mn->mr_Bytes;
244 mn = (MemNode *)pmn;
245 }
246 }
247 return;
248 }
249 if ((char *)ptr < (char *)mn + mn->mr_Bytes) {
250 panic("zfree(%p,%ju): corrupt memlist2", ptr,
251 (uintmax_t)bytes);
252 }
253 }
254 /*
255 * We are beyond the last MemNode, append new MemNode. Merge against
256 * previous area if possible.
257 */
258 if (pmn == &mp->mp_First ||
259 (char *)pmn + ((MemNode *)pmn)->mr_Bytes != (char *)ptr) {
260 ((MemNode *)ptr)->mr_Next = NULL;
261 ((MemNode *)ptr)->mr_Bytes = bytes;
262 *pmn = (MemNode *)ptr;
263 mn = (MemNode *)ptr;
264 } else {
265 ((MemNode *)pmn)->mr_Bytes += bytes;
266 mn = (MemNode *)pmn;
267 }
268 }
269
270 /*
271 * zextendPool() - extend memory pool to cover additional space.
272 *
273 * Note: the added memory starts out as allocated, you
274 * must free it to make it available to the memory subsystem.
275 *
276 * Note: mp_Size may not reflect (mp_End - mp_Base) range
277 * due to other parts of the system doing their own sbrk()
278 * calls.
279 */
280
281 void
zextendPool(MemPool * mp,void * base,uintptr_t bytes)282 zextendPool(MemPool *mp, void *base, uintptr_t bytes)
283 {
284 if (mp->mp_Size == 0) {
285 mp->mp_Base = base;
286 mp->mp_Used = bytes;
287 mp->mp_End = (char *)base + bytes;
288 mp->mp_Size = bytes;
289 } else {
290 void *pend = (char *)mp->mp_Base + mp->mp_Size;
291
292 if (base < mp->mp_Base) {
293 mp->mp_Size += (char *)mp->mp_Base - (char *)base;
294 mp->mp_Used += (char *)mp->mp_Base - (char *)base;
295 mp->mp_Base = base;
296 }
297 base = (char *)base + bytes;
298 if (base > pend) {
299 mp->mp_Size += (char *)base - (char *)pend;
300 mp->mp_Used += (char *)base - (char *)pend;
301 mp->mp_End = (char *)base;
302 }
303 }
304 }
305
306 #ifdef ZALLOCDEBUG
307
308 void
zallocstats(MemPool * mp)309 zallocstats(MemPool *mp)
310 {
311 int abytes = 0;
312 int hbytes = 0;
313 int fcount = 0;
314 MemNode *mn;
315
316 printf("%d bytes reserved", (int)mp->mp_Size);
317
318 mn = mp->mp_First;
319
320 if ((void *)mn != (void *)mp->mp_Base) {
321 abytes += (char *)mn - (char *)mp->mp_Base;
322 }
323
324 while (mn != NULL) {
325 if ((char *)mn + mn->mr_Bytes != mp->mp_End) {
326 hbytes += mn->mr_Bytes;
327 ++fcount;
328 }
329 if (mn->mr_Next != NULL) {
330 abytes += (char *)mn->mr_Next -
331 ((char *)mn + mn->mr_Bytes);
332 }
333 mn = mn->mr_Next;
334 }
335 printf(" %d bytes allocated\n%d fragments (%d bytes fragmented)\n",
336 abytes, fcount, hbytes);
337 }
338
339 #endif
340