1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1987, 1993
5 * The Regents of the University of California.
6 * Copyright (c) 2005, 2009 Robert N. M. Watson
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * @(#)malloc.h 8.5 (Berkeley) 5/3/95
34 * $FreeBSD$
35 */
36
37 #ifndef _SYS_MALLOC_H_
38 #define _SYS_MALLOC_H_
39
40 #ifndef _STANDALONE
41 #include <sys/param.h>
42 #ifdef _KERNEL
43 #include <sys/systm.h>
44 #endif
45 #include <sys/queue.h>
46 #include <sys/_lock.h>
47 #include <sys/_mutex.h>
48 #include <machine/_limits.h>
49
50 #define MINALLOCSIZE UMA_SMALLEST_UNIT
51
52 /*
53 * Flags to memory allocation functions.
54 */
55 #define M_NOWAIT 0x0001 /* do not block */
56 #define M_WAITOK 0x0002 /* ok to block */
57 #define M_ZERO 0x0100 /* bzero the allocation */
58 #define M_NOVM 0x0200 /* don't ask VM for pages */
59 #define M_USE_RESERVE 0x0400 /* can alloc out of reserve memory */
60 #define M_NODUMP 0x0800 /* don't dump pages in this allocation */
61 #define M_FIRSTFIT 0x1000 /* only for vmem, fast fit */
62 #define M_BESTFIT 0x2000 /* only for vmem, low fragmentation */
63 #define M_EXEC 0x4000 /* allocate executable space */
64 #define M_NEXTFIT 0x8000 /* only for vmem, follow cursor */
65
66 #define M_VERSION 2020110501
67
68 /*
69 * Two malloc type structures are present: malloc_type, which is used by a
70 * type owner to declare the type, and malloc_type_internal, which holds
71 * malloc-owned statistics and other ABI-sensitive fields, such as the set of
72 * malloc statistics indexed by the compile-time MAXCPU constant.
73 * Applications should avoid introducing dependence on the allocator private
74 * data layout and size.
75 *
76 * The malloc_type ks_next field is protected by malloc_mtx. Other fields in
77 * malloc_type are static after initialization so unsynchronized.
78 *
79 * Statistics in malloc_type_stats are written only when holding a critical
80 * section and running on the CPU associated with the index into the stat
81 * array, but read lock-free resulting in possible (minor) races, which the
82 * monitoring app should take into account.
83 */
84 struct malloc_type_stats {
85 uint64_t mts_memalloced; /* Bytes allocated on CPU. */
86 uint64_t mts_memfreed; /* Bytes freed on CPU. */
87 uint64_t mts_numallocs; /* Number of allocates on CPU. */
88 uint64_t mts_numfrees; /* number of frees on CPU. */
89 uint64_t mts_size; /* Bitmask of sizes allocated on CPU. */
90 uint64_t _mts_reserved1; /* Reserved field. */
91 uint64_t _mts_reserved2; /* Reserved field. */
92 uint64_t _mts_reserved3; /* Reserved field. */
93 };
94
95 _Static_assert(sizeof(struct malloc_type_stats) == 64,
96 "allocations come from pcpu_zone_64");
97
98 /*
99 * Index definitions for the mti_probes[] array.
100 */
101 #define DTMALLOC_PROBE_MALLOC 0
102 #define DTMALLOC_PROBE_FREE 1
103 #define DTMALLOC_PROBE_MAX 2
104
105 struct malloc_type_internal {
106 uint32_t mti_probes[DTMALLOC_PROBE_MAX];
107 /* DTrace probe ID array. */
108 u_char mti_zone;
109 struct malloc_type_stats *mti_stats;
110 u_long mti_spare[8];
111 };
112
113 /*
114 * Public data structure describing a malloc type.
115 */
116 struct malloc_type {
117 struct malloc_type *ks_next; /* Next in global chain. */
118 u_long ks_version; /* Detect programmer error. */
119 const char *ks_shortdesc; /* Printable type name. */
120 struct malloc_type_internal ks_mti;
121 };
122
123 /*
124 * Statistics structure headers for user space. The kern.malloc sysctl
125 * exposes a structure stream consisting of a stream header, then a series of
126 * malloc type headers and statistics structures (quantity maxcpus). For
127 * convenience, the kernel will provide the current value of maxcpus at the
128 * head of the stream.
129 */
130 #define MALLOC_TYPE_STREAM_VERSION 0x00000001
131 struct malloc_type_stream_header {
132 uint32_t mtsh_version; /* Stream format version. */
133 uint32_t mtsh_maxcpus; /* Value of MAXCPU for stream. */
134 uint32_t mtsh_count; /* Number of records. */
135 uint32_t _mtsh_pad; /* Pad/reserved field. */
136 };
137
138 #define MALLOC_MAX_NAME 32
139 struct malloc_type_header {
140 char mth_name[MALLOC_MAX_NAME];
141 };
142
143 #ifdef _KERNEL
144 #define MALLOC_DEFINE(type, shortdesc, longdesc) \
145 struct malloc_type type[1] = { \
146 { \
147 .ks_next = NULL, \
148 .ks_version = M_VERSION, \
149 .ks_shortdesc = shortdesc, \
150 } \
151 }; \
152 SYSINIT(type##_init, SI_SUB_KMEM, SI_ORDER_THIRD, malloc_init, \
153 type); \
154 SYSUNINIT(type##_uninit, SI_SUB_KMEM, SI_ORDER_ANY, \
155 malloc_uninit, type)
156
157 #define MALLOC_DECLARE(type) \
158 extern struct malloc_type type[1]
159
160 MALLOC_DECLARE(M_CACHE);
161 MALLOC_DECLARE(M_DEVBUF);
162 MALLOC_DECLARE(M_TEMP);
163
164 /*
165 * XXX this should be declared in <sys/uio.h>, but that tends to fail
166 * because <sys/uio.h> is included in a header before the source file
167 * has a chance to include <sys/malloc.h> to get MALLOC_DECLARE() defined.
168 */
169 MALLOC_DECLARE(M_IOV);
170
171 struct domainset;
172 extern struct mtx malloc_mtx;
173
174 /*
175 * Function type used when iterating over the list of malloc types.
176 */
177 typedef void malloc_type_list_func_t(struct malloc_type *, void *);
178
179 void contigfree(void *addr, unsigned long size, struct malloc_type *type);
180 void *contigmalloc(unsigned long size, struct malloc_type *type, int flags,
181 vm_paddr_t low, vm_paddr_t high, unsigned long alignment,
182 vm_paddr_t boundary) __malloc_like __result_use_check
183 __alloc_size(1) __alloc_align(6);
184 void *contigmalloc_domainset(unsigned long size, struct malloc_type *type,
185 struct domainset *ds, int flags, vm_paddr_t low, vm_paddr_t high,
186 unsigned long alignment, vm_paddr_t boundary)
187 __malloc_like __result_use_check __alloc_size(1) __alloc_align(7);
188 void free(void *addr, struct malloc_type *type);
189 void zfree(void *addr, struct malloc_type *type);
190 void *malloc(size_t size, struct malloc_type *type, int flags) __malloc_like
191 __result_use_check __alloc_size(1);
192
193 #ifndef FSTACK
194 /*
195 * Try to optimize malloc(..., ..., M_ZERO) allocations by doing zeroing in
196 * place if the size is known at compilation time.
197 *
198 * Passing the flag down requires malloc to blindly zero the entire object.
199 * In practice a lot of the zeroing can be avoided if most of the object
200 * gets explicitly initialized after the allocation. Letting the compiler
201 * zero in place gives it the opportunity to take advantage of this state.
202 *
203 * Note that the operation is only applicable if both flags and size are
204 * known at compilation time. If M_ZERO is passed but M_WAITOK is not, the
205 * allocation can fail and a NULL check is needed. However, if M_WAITOK is
206 * passed we know the allocation must succeed and the check can be elided.
207 *
208 * _malloc_item = malloc(_size, type, (flags) &~ M_ZERO);
209 * if (((flags) & M_WAITOK) != 0 || _malloc_item != NULL)
210 * bzero(_malloc_item, _size);
211 *
212 * If the flag is set, the compiler knows the left side is always true,
213 * therefore the entire statement is true and the callsite is:
214 *
215 * _malloc_item = malloc(_size, type, (flags) &~ M_ZERO);
216 * bzero(_malloc_item, _size);
217 *
218 * If the flag is not set, the compiler knows the left size is always false
219 * and the NULL check is needed, therefore the callsite is:
220 *
221 * _malloc_item = malloc(_size, type, (flags) &~ M_ZERO);
222 * if (_malloc_item != NULL)
223 * bzero(_malloc_item, _size);
224 *
225 * The implementation is a macro because of what appears to be a clang 6 bug:
226 * an inline function variant ended up being compiled to a mere malloc call
227 * regardless of argument. gcc generates expected code (like the above).
228 */
229 #define malloc(size, type, flags) ({ \
230 void *_malloc_item; \
231 size_t _size = (size); \
232 if (__builtin_constant_p(size) && __builtin_constant_p(flags) &&\
233 ((flags) & M_ZERO) != 0) { \
234 _malloc_item = malloc(_size, type, (flags) &~ M_ZERO); \
235 if (((flags) & M_WAITOK) != 0 || \
236 __predict_true(_malloc_item != NULL)) \
237 bzero(_malloc_item, _size); \
238 } else { \
239 _malloc_item = malloc(_size, type, flags); \
240 } \
241 _malloc_item; \
242 })
243 #endif
244
245 void *malloc_domainset(size_t size, struct malloc_type *type,
246 struct domainset *ds, int flags) __malloc_like __result_use_check
247 __alloc_size(1);
248 void *mallocarray(size_t nmemb, size_t size, struct malloc_type *type,
249 int flags) __malloc_like __result_use_check
250 __alloc_size2(1, 2);
251 void *malloc_exec(size_t size, struct malloc_type *type, int flags) __malloc_like
252 __result_use_check __alloc_size(1);
253 void *malloc_domainset_exec(size_t size, struct malloc_type *type,
254 struct domainset *ds, int flags) __malloc_like __result_use_check
255 __alloc_size(1);
256 void malloc_init(void *);
257 void malloc_type_allocated(struct malloc_type *type, unsigned long size);
258 void malloc_type_freed(struct malloc_type *type, unsigned long size);
259 void malloc_type_list(malloc_type_list_func_t *, void *);
260 void malloc_uninit(void *);
261 size_t malloc_size(size_t);
262 size_t malloc_usable_size(const void *);
263 void *realloc(void *addr, size_t size, struct malloc_type *type, int flags)
264 __result_use_check __alloc_size(2);
265 void *reallocf(void *addr, size_t size, struct malloc_type *type, int flags)
266 __result_use_check __alloc_size(2);
267 void *malloc_domainset_aligned(size_t size, size_t align,
268 struct malloc_type *mtp, struct domainset *ds, int flags)
269 __malloc_like __result_use_check __alloc_size(1);
270
271 struct malloc_type *malloc_desc2type(const char *desc);
272
273 /*
274 * This is sqrt(SIZE_MAX+1), as s1*s2 <= SIZE_MAX
275 * if both s1 < MUL_NO_OVERFLOW and s2 < MUL_NO_OVERFLOW
276 */
277 #define MUL_NO_OVERFLOW (1UL << (sizeof(size_t) * 8 / 2))
278 static inline bool
WOULD_OVERFLOW(size_t nmemb,size_t size)279 WOULD_OVERFLOW(size_t nmemb, size_t size)
280 {
281
282 return ((nmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
283 nmemb > 0 && __SIZE_T_MAX / nmemb < size);
284 }
285 #undef MUL_NO_OVERFLOW
286 #endif /* _KERNEL */
287
288 #else
289 /*
290 * The native stand malloc / free interface we're mapping to
291 */
292 extern void Free(void *p, const char *file, int line);
293 extern void *Malloc(size_t bytes, const char *file, int line);
294
295 /*
296 * Minimal standalone malloc implementation / environment. None of the
297 * flags mean anything and there's no need declare malloc types.
298 * Define the simple alloc / free routines in terms of Malloc and
299 * Free. None of the kernel features that this stuff disables are needed.
300 *
301 * XXX we are setting ourselves up for a potential crash if we can't allocate
302 * memory for a M_WAITOK call.
303 */
304 #define M_WAITOK 0
305 #define M_ZERO 0
306 #define M_NOWAIT 0
307 #define MALLOC_DECLARE(x)
308
309 #define kmem_zalloc(size, flags) Malloc((size), __FILE__, __LINE__)
310 #define kmem_free(p, size) Free(p, __FILE__, __LINE__)
311
312 /*
313 * ZFS mem.h define that's the OpenZFS porting layer way of saying
314 * M_WAITOK. Given the above, it will also be a nop.
315 */
316 #define KM_SLEEP M_WAITOK
317 #endif /* _STANDALONE */
318 #endif /* !_SYS_MALLOC_H_ */
319