1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1983 Regents of the University of California.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the University nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31
32 #if defined(LIBC_SCCS) && !defined(lint)
33 /*static char *sccsid = "from: @(#)malloc.c 5.11 (Berkeley) 2/23/91";*/
34 #endif /* LIBC_SCCS and not lint */
35
36 /*
37 * malloc.c (Caltech) 2/21/82
38 * Chris Kingsley, kingsley@cit-20.
39 *
40 * This is a very fast storage allocator. It allocates blocks of a small
41 * number of different sizes, and keeps free lists of each size. Blocks that
42 * don't exactly fit are passed up to the next larger size. In this
43 * implementation, the available sizes are 2^n-4 (or 2^n-10) bytes long.
44 * This is designed for use in a virtual memory environment.
45 */
46
47 #include <sys/param.h>
48 #include <sys/sysctl.h>
49 #include <sys/mman.h>
50 #include <errno.h>
51 #include <stdarg.h>
52 #include <stddef.h>
53 #include <string.h>
54 #include <unistd.h>
55 #ifdef IN_RTLD
56 #include "rtld.h"
57 #include "rtld_printf.h"
58 #include "rtld_paths.h"
59 #endif
60 #include "rtld_malloc.h"
61
62 /*
63 * Pre-allocate mmap'ed pages
64 */
65 #define NPOOLPAGES (128*1024/pagesz)
66 static caddr_t pagepool_start, pagepool_end;
67
68 /*
69 * The overhead on a block is at least 4 bytes. When free, this space
70 * contains a pointer to the next free block, and the bottom two bits must
71 * be zero. When in use, the first byte is set to MAGIC, and the second
72 * byte is the size index. The remaining bytes are for alignment.
73 */
74 union overhead {
75 union overhead *ov_next; /* when free */
76 struct {
77 uint16_t ovu_index; /* bucket # */
78 uint8_t ovu_magic; /* magic number */
79 } ovu;
80 #define ov_magic ovu.ovu_magic
81 #define ov_index ovu.ovu_index
82 };
83
84 static void morecore(int bucket);
85 static int morepages(int n);
86
87 #define MAGIC 0xef /* magic # on accounting info */
88 #define AMAGIC 0xdf /* magic # for aligned alloc */
89
90 /*
91 * nextf[i] is the pointer to the next free block of size
92 * (FIRST_BUCKET_SIZE << i). The overhead information precedes the data
93 * area returned to the user.
94 */
95 #define LOW_BITS 3
96 #define FIRST_BUCKET_SIZE (1U << LOW_BITS)
97 #define NBUCKETS 30
98 static union overhead *nextf[NBUCKETS];
99
100 static int pagesz; /* page size */
101
102 /*
103 * The array of supported page sizes is provided by the user, i.e., the
104 * program that calls this storage allocator. That program must initialize
105 * the array before making its first call to allocate storage. The array
106 * must contain at least one page size. The page sizes must be stored in
107 * increasing order.
108 */
109
110 static void *
cp2op(void * cp)111 cp2op(void *cp)
112 {
113 return (((caddr_t)cp - sizeof(union overhead)));
114 }
115
116 void *
__crt_malloc(size_t nbytes)117 __crt_malloc(size_t nbytes)
118 {
119 union overhead *op;
120 int bucket;
121 size_t amt;
122
123 /*
124 * First time malloc is called, setup page size.
125 */
126 if (pagesz == 0)
127 pagesz = pagesizes[0];
128 /*
129 * Convert amount of memory requested into closest block size
130 * stored in hash buckets which satisfies request.
131 * Account for space used per block for accounting.
132 */
133 amt = FIRST_BUCKET_SIZE;
134 bucket = 0;
135 while (nbytes > amt - sizeof(*op)) {
136 amt <<= 1;
137 bucket++;
138 if (amt == 0 || bucket >= NBUCKETS)
139 return (NULL);
140 }
141 /*
142 * If nothing in hash bucket right now,
143 * request more memory from the system.
144 */
145 if ((op = nextf[bucket]) == NULL) {
146 morecore(bucket);
147 if ((op = nextf[bucket]) == NULL)
148 return (NULL);
149 }
150 /* remove from linked list */
151 nextf[bucket] = op->ov_next;
152 op->ov_magic = MAGIC;
153 op->ov_index = bucket;
154 return ((char *)(op + 1));
155 }
156
157 void *
__crt_calloc(size_t num,size_t size)158 __crt_calloc(size_t num, size_t size)
159 {
160 void *ret;
161
162 if (size != 0 && (num * size) / size != num) {
163 /* size_t overflow. */
164 return (NULL);
165 }
166
167 if ((ret = __crt_malloc(num * size)) != NULL)
168 memset(ret, 0, num * size);
169
170 return (ret);
171 }
172
173 void *
__crt_aligned_alloc_offset(size_t align,size_t size,size_t offset)174 __crt_aligned_alloc_offset(size_t align, size_t size, size_t offset)
175 {
176 void *mem, *ov;
177 union overhead ov1;
178 uintptr_t x;
179
180 if (align < FIRST_BUCKET_SIZE)
181 align = FIRST_BUCKET_SIZE;
182 offset &= align - 1;
183 mem = __crt_malloc(size + align + offset + sizeof(union overhead));
184 if (mem == NULL)
185 return (NULL);
186 x = roundup2((uintptr_t)mem + sizeof(union overhead), align);
187 x += offset;
188 ov = cp2op((void *)x);
189 ov1.ov_magic = AMAGIC;
190 ov1.ov_index = x - (uintptr_t)mem + sizeof(union overhead);
191 memcpy(ov, &ov1, sizeof(ov1));
192 return ((void *)x);
193 }
194
195 /*
196 * Allocate more memory to the indicated bucket.
197 */
198 static void
morecore(int bucket)199 morecore(int bucket)
200 {
201 union overhead *op;
202 int sz; /* size of desired block */
203 int amt; /* amount to allocate */
204 int nblks; /* how many blocks we get */
205
206 sz = FIRST_BUCKET_SIZE << bucket;
207 if (sz < pagesz) {
208 amt = pagesz;
209 nblks = amt / sz;
210 } else {
211 amt = sz;
212 nblks = 1;
213 }
214 if (amt > pagepool_end - pagepool_start)
215 if (morepages(amt / pagesz + NPOOLPAGES) == 0 &&
216 /* Retry with min required size */
217 morepages(amt / pagesz) == 0)
218 return;
219 op = (union overhead *)pagepool_start;
220 pagepool_start += amt;
221
222 /*
223 * Add new memory allocated to that on
224 * free list for this hash bucket.
225 */
226 nextf[bucket] = op;
227 while (--nblks > 0) {
228 op->ov_next = (union overhead *)((caddr_t)op + sz);
229 op = (union overhead *)((caddr_t)op + sz);
230 }
231 }
232
233 void
__crt_free(void * cp)234 __crt_free(void *cp)
235 {
236 union overhead *op, op1;
237 void *opx;
238 int size;
239
240 if (cp == NULL)
241 return;
242 opx = cp2op(cp);
243 memcpy(&op1, opx, sizeof(op1));
244 op = op1.ov_magic == AMAGIC ? (void *)((caddr_t)cp - op1.ov_index) :
245 opx;
246 if (op->ov_magic != MAGIC)
247 return; /* sanity */
248 size = op->ov_index;
249 op->ov_next = nextf[size]; /* also clobbers ov_magic */
250 nextf[size] = op;
251 }
252
253 void *
__crt_realloc(void * cp,size_t nbytes)254 __crt_realloc(void *cp, size_t nbytes)
255 {
256 u_int onb;
257 int i;
258 union overhead *op;
259 char *res;
260
261 if (cp == NULL)
262 return (__crt_malloc(nbytes));
263 op = cp2op(cp);
264 if (op->ov_magic != MAGIC)
265 return (NULL); /* Double-free or bad argument */
266 i = op->ov_index;
267 onb = 1 << (i + 3);
268 if (onb < (u_int)pagesz)
269 onb -= sizeof(*op);
270 else
271 onb += pagesz - sizeof(*op);
272 /* avoid the copy if same size block */
273 if (i != 0) {
274 i = 1 << (i + 2);
275 if (i < pagesz)
276 i -= sizeof(*op);
277 else
278 i += pagesz - sizeof(*op);
279 }
280 if (nbytes <= onb && nbytes > (size_t)i)
281 return (cp);
282 if ((res = __crt_malloc(nbytes)) == NULL)
283 return (NULL);
284 bcopy(cp, res, (nbytes < onb) ? nbytes : onb);
285 __crt_free(cp);
286 return (res);
287 }
288
289 static int
morepages(int n)290 morepages(int n)
291 {
292 caddr_t addr;
293 int offset;
294
295 if (pagepool_end - pagepool_start > pagesz) {
296 addr = roundup2(pagepool_start, pagesz);
297 if (munmap(addr, pagepool_end - addr) != 0) {
298 #ifdef IN_RTLD
299 rtld_fdprintf(STDERR_FILENO, _BASENAME_RTLD ": "
300 "morepages: cannot munmap %p: %s\n",
301 addr, rtld_strerror(errno));
302 #endif
303 }
304 }
305
306 offset = (uintptr_t)pagepool_start - rounddown2(
307 (uintptr_t)pagepool_start, pagesz);
308
309 addr = mmap(0, n * pagesz, PROT_READ | PROT_WRITE,
310 MAP_ANON | MAP_PRIVATE, -1, 0);
311 if (addr == MAP_FAILED) {
312 #ifdef IN_RTLD
313 rtld_fdprintf(STDERR_FILENO, _BASENAME_RTLD ": morepages: "
314 "cannot mmap anonymous memory: %s\n",
315 rtld_strerror(errno));
316 #endif
317 pagepool_start = pagepool_end = NULL;
318 return (0);
319 }
320 pagepool_start = addr;
321 pagepool_end = pagepool_start + n * pagesz;
322 pagepool_start += offset;
323
324 return (n);
325 }
326