1#ifndef JEMALLOC_INTERNAL_H
2#define	JEMALLOC_INTERNAL_H
3
4#include "jemalloc_internal_defs.h"
5#include "jemalloc/internal/jemalloc_internal_decls.h"
6
7#ifdef JEMALLOC_UTRACE
8#include <sys/ktrace.h>
9#endif
10
11#define	JEMALLOC_NO_DEMANGLE
12#ifdef JEMALLOC_JET
13#  define JEMALLOC_N(n) jet_##n
14#  include "jemalloc/internal/public_namespace.h"
15#  define JEMALLOC_NO_RENAME
16#  include "../jemalloc@[email protected]"
17#  undef JEMALLOC_NO_RENAME
18#else
19#  define JEMALLOC_N(n) @private_namespace@##n
20#  include "../jemalloc@[email protected]"
21#endif
22#include "jemalloc/internal/private_namespace.h"
23
24static const bool config_debug =
25#ifdef JEMALLOC_DEBUG
26    true
27#else
28    false
29#endif
30    ;
31static const bool have_dss =
32#ifdef JEMALLOC_DSS
33    true
34#else
35    false
36#endif
37    ;
38static const bool config_fill =
39#ifdef JEMALLOC_FILL
40    true
41#else
42    false
43#endif
44    ;
45static const bool config_lazy_lock =
46#ifdef JEMALLOC_LAZY_LOCK
47    true
48#else
49    false
50#endif
51    ;
52static const bool config_prof =
53#ifdef JEMALLOC_PROF
54    true
55#else
56    false
57#endif
58    ;
59static const bool config_prof_libgcc =
60#ifdef JEMALLOC_PROF_LIBGCC
61    true
62#else
63    false
64#endif
65    ;
66static const bool config_prof_libunwind =
67#ifdef JEMALLOC_PROF_LIBUNWIND
68    true
69#else
70    false
71#endif
72    ;
73static const bool maps_coalesce =
74#ifdef JEMALLOC_MAPS_COALESCE
75    true
76#else
77    false
78#endif
79    ;
80static const bool config_munmap =
81#ifdef JEMALLOC_MUNMAP
82    true
83#else
84    false
85#endif
86    ;
87static const bool config_stats =
88#ifdef JEMALLOC_STATS
89    true
90#else
91    false
92#endif
93    ;
94static const bool config_tcache =
95#ifdef JEMALLOC_TCACHE
96    true
97#else
98    false
99#endif
100    ;
101static const bool config_tls =
102#ifdef JEMALLOC_TLS
103    true
104#else
105    false
106#endif
107    ;
108static const bool config_utrace =
109#ifdef JEMALLOC_UTRACE
110    true
111#else
112    false
113#endif
114    ;
115static const bool config_valgrind =
116#ifdef JEMALLOC_VALGRIND
117    true
118#else
119    false
120#endif
121    ;
122static const bool config_xmalloc =
123#ifdef JEMALLOC_XMALLOC
124    true
125#else
126    false
127#endif
128    ;
129static const bool config_ivsalloc =
130#ifdef JEMALLOC_IVSALLOC
131    true
132#else
133    false
134#endif
135    ;
136static const bool config_cache_oblivious =
137#ifdef JEMALLOC_CACHE_OBLIVIOUS
138    true
139#else
140    false
141#endif
142    ;
143
144#ifdef JEMALLOC_C11ATOMICS
145#include <stdatomic.h>
146#endif
147
148#ifdef JEMALLOC_ATOMIC9
149#include <machine/atomic.h>
150#endif
151
152#if (defined(JEMALLOC_OSATOMIC) || defined(JEMALLOC_OSSPIN))
153#include <libkern/OSAtomic.h>
154#endif
155
156#ifdef JEMALLOC_ZONE
157#include <mach/mach_error.h>
158#include <mach/mach_init.h>
159#include <mach/vm_map.h>
160#include <malloc/malloc.h>
161#endif
162
163#define	RB_COMPACT
164#include "jemalloc/internal/rb.h"
165#include "jemalloc/internal/qr.h"
166#include "jemalloc/internal/ql.h"
167
168/*
169 * jemalloc can conceptually be broken into components (arena, tcache, etc.),
170 * but there are circular dependencies that cannot be broken without
171 * substantial performance degradation.  In order to reduce the effect on
172 * visual code flow, read the header files in multiple passes, with one of the
173 * following cpp variables defined during each pass:
174 *
175 *   JEMALLOC_H_TYPES   : Preprocessor-defined constants and psuedo-opaque data
176 *                        types.
177 *   JEMALLOC_H_STRUCTS : Data structures.
178 *   JEMALLOC_H_EXTERNS : Extern data declarations and function prototypes.
179 *   JEMALLOC_H_INLINES : Inline functions.
180 */
181/******************************************************************************/
182#define	JEMALLOC_H_TYPES
183
184#include "jemalloc/internal/jemalloc_internal_macros.h"
185
186/* Size class index type. */
187typedef unsigned szind_t;
188
189/*
190 * Flags bits:
191 *
192 * a: arena
193 * t: tcache
194 * 0: unused
195 * z: zero
196 * n: alignment
197 *
198 * aaaaaaaa aaaatttt tttttttt 0znnnnnn
199 */
200#define	MALLOCX_ARENA_MASK	((int)~0xfffff)
201#define	MALLOCX_ARENA_MAX	0xffe
202#define	MALLOCX_TCACHE_MASK	((int)~0xfff000ffU)
203#define	MALLOCX_TCACHE_MAX	0xffd
204#define	MALLOCX_LG_ALIGN_MASK	((int)0x3f)
205/* Use MALLOCX_ALIGN_GET() if alignment may not be specified in flags. */
206#define	MALLOCX_ALIGN_GET_SPECIFIED(flags)				\
207    (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK))
208#define	MALLOCX_ALIGN_GET(flags)					\
209    (MALLOCX_ALIGN_GET_SPECIFIED(flags) & (SIZE_T_MAX-1))
210#define	MALLOCX_ZERO_GET(flags)						\
211    ((bool)(flags & MALLOCX_ZERO))
212
213#define	MALLOCX_TCACHE_GET(flags)					\
214    (((unsigned)((flags & MALLOCX_TCACHE_MASK) >> 8)) - 2)
215#define	MALLOCX_ARENA_GET(flags)					\
216    (((unsigned)(((unsigned)flags) >> 20)) - 1)
217
218/* Smallest size class to support. */
219#define	TINY_MIN		(1U << LG_TINY_MIN)
220
221/*
222 * Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size
223 * classes).
224 */
225#ifndef LG_QUANTUM
226#  if (defined(__i386__) || defined(_M_IX86))
227#    define LG_QUANTUM		4
228#  endif
229#  ifdef __ia64__
230#    define LG_QUANTUM		4
231#  endif
232#  ifdef __alpha__
233#    define LG_QUANTUM		4
234#  endif
235#  if (defined(__sparc64__) || defined(__sparcv9))
236#    define LG_QUANTUM		4
237#  endif
238#  if (defined(__amd64__) || defined(__x86_64__) || defined(_M_X64))
239#    define LG_QUANTUM		4
240#  endif
241#  ifdef __arm__
242#    define LG_QUANTUM		3
243#  endif
244#  ifdef __aarch64__
245#    define LG_QUANTUM		4
246#  endif
247#  ifdef __hppa__
248#    define LG_QUANTUM		4
249#  endif
250#  ifdef __mips__
251#    define LG_QUANTUM		3
252#  endif
253#  ifdef __or1k__
254#    define LG_QUANTUM		3
255#  endif
256#  ifdef __powerpc__
257#    define LG_QUANTUM		4
258#  endif
259#  ifdef __s390__
260#    define LG_QUANTUM		4
261#  endif
262#  ifdef __SH4__
263#    define LG_QUANTUM		4
264#  endif
265#  ifdef __tile__
266#    define LG_QUANTUM		4
267#  endif
268#  ifdef __le32__
269#    define LG_QUANTUM		4
270#  endif
271#  ifndef LG_QUANTUM
272#    error "Unknown minimum alignment for architecture; specify via "
273	 "--with-lg-quantum"
274#  endif
275#endif
276
277#define	QUANTUM			((size_t)(1U << LG_QUANTUM))
278#define	QUANTUM_MASK		(QUANTUM - 1)
279
280/* Return the smallest quantum multiple that is >= a. */
281#define	QUANTUM_CEILING(a)						\
282	(((a) + QUANTUM_MASK) & ~QUANTUM_MASK)
283
284#define	LONG			((size_t)(1U << LG_SIZEOF_LONG))
285#define	LONG_MASK		(LONG - 1)
286
287/* Return the smallest long multiple that is >= a. */
288#define	LONG_CEILING(a)							\
289	(((a) + LONG_MASK) & ~LONG_MASK)
290
291#define	SIZEOF_PTR		(1U << LG_SIZEOF_PTR)
292#define	PTR_MASK		(SIZEOF_PTR - 1)
293
294/* Return the smallest (void *) multiple that is >= a. */
295#define	PTR_CEILING(a)							\
296	(((a) + PTR_MASK) & ~PTR_MASK)
297
298/*
299 * Maximum size of L1 cache line.  This is used to avoid cache line aliasing.
300 * In addition, this controls the spacing of cacheline-spaced size classes.
301 *
302 * CACHELINE cannot be based on LG_CACHELINE because __declspec(align()) can
303 * only handle raw constants.
304 */
305#define	LG_CACHELINE		6
306#define	CACHELINE		64
307#define	CACHELINE_MASK		(CACHELINE - 1)
308
309/* Return the smallest cacheline multiple that is >= s. */
310#define	CACHELINE_CEILING(s)						\
311	(((s) + CACHELINE_MASK) & ~CACHELINE_MASK)
312
313/* Page size.  LG_PAGE is determined by the configure script. */
314#ifdef PAGE_MASK
315#  undef PAGE_MASK
316#endif
317#define	PAGE		((size_t)(1U << LG_PAGE))
318#define	PAGE_MASK	((size_t)(PAGE - 1))
319
320/* Return the smallest pagesize multiple that is >= s. */
321#define	PAGE_CEILING(s)							\
322	(((s) + PAGE_MASK) & ~PAGE_MASK)
323
324/* Return the nearest aligned address at or below a. */
325#define	ALIGNMENT_ADDR2BASE(a, alignment)				\
326	((void *)((uintptr_t)(a) & (-(alignment))))
327
328/* Return the offset between a and the nearest aligned address at or below a. */
329#define	ALIGNMENT_ADDR2OFFSET(a, alignment)				\
330	((size_t)((uintptr_t)(a) & (alignment - 1)))
331
332/* Return the smallest alignment multiple that is >= s. */
333#define	ALIGNMENT_CEILING(s, alignment)					\
334	(((s) + (alignment - 1)) & (-(alignment)))
335
336/* Declare a variable-length array. */
337#if __STDC_VERSION__ < 199901L
338#  ifdef _MSC_VER
339#    include <malloc.h>
340#    define alloca _alloca
341#  else
342#    ifdef JEMALLOC_HAS_ALLOCA_H
343#      include <alloca.h>
344#    else
345#      include <stdlib.h>
346#    endif
347#  endif
348#  define VARIABLE_ARRAY(type, name, count) \
349	type *name = alloca(sizeof(type) * (count))
350#else
351#  define VARIABLE_ARRAY(type, name, count) type name[(count)]
352#endif
353
354#include "jemalloc/internal/valgrind.h"
355#include "jemalloc/internal/util.h"
356#include "jemalloc/internal/atomic.h"
357#include "jemalloc/internal/prng.h"
358#include "jemalloc/internal/ckh.h"
359#include "jemalloc/internal/size_classes.h"
360#include "jemalloc/internal/stats.h"
361#include "jemalloc/internal/ctl.h"
362#include "jemalloc/internal/mutex.h"
363#include "jemalloc/internal/tsd.h"
364#include "jemalloc/internal/mb.h"
365#include "jemalloc/internal/extent.h"
366#include "jemalloc/internal/arena.h"
367#include "jemalloc/internal/bitmap.h"
368#include "jemalloc/internal/base.h"
369#include "jemalloc/internal/rtree.h"
370#include "jemalloc/internal/pages.h"
371#include "jemalloc/internal/chunk.h"
372#include "jemalloc/internal/huge.h"
373#include "jemalloc/internal/tcache.h"
374#include "jemalloc/internal/hash.h"
375#include "jemalloc/internal/quarantine.h"
376#include "jemalloc/internal/prof.h"
377
378#undef JEMALLOC_H_TYPES
379/******************************************************************************/
380#define	JEMALLOC_H_STRUCTS
381
382#include "jemalloc/internal/valgrind.h"
383#include "jemalloc/internal/util.h"
384#include "jemalloc/internal/atomic.h"
385#include "jemalloc/internal/prng.h"
386#include "jemalloc/internal/ckh.h"
387#include "jemalloc/internal/size_classes.h"
388#include "jemalloc/internal/stats.h"
389#include "jemalloc/internal/ctl.h"
390#include "jemalloc/internal/mutex.h"
391#include "jemalloc/internal/mb.h"
392#include "jemalloc/internal/bitmap.h"
393#define	JEMALLOC_ARENA_STRUCTS_A
394#include "jemalloc/internal/arena.h"
395#undef JEMALLOC_ARENA_STRUCTS_A
396#include "jemalloc/internal/extent.h"
397#define	JEMALLOC_ARENA_STRUCTS_B
398#include "jemalloc/internal/arena.h"
399#undef JEMALLOC_ARENA_STRUCTS_B
400#include "jemalloc/internal/base.h"
401#include "jemalloc/internal/rtree.h"
402#include "jemalloc/internal/pages.h"
403#include "jemalloc/internal/chunk.h"
404#include "jemalloc/internal/huge.h"
405#include "jemalloc/internal/tcache.h"
406#include "jemalloc/internal/hash.h"
407#include "jemalloc/internal/quarantine.h"
408#include "jemalloc/internal/prof.h"
409
410#include "jemalloc/internal/tsd.h"
411
412#undef JEMALLOC_H_STRUCTS
413/******************************************************************************/
414#define	JEMALLOC_H_EXTERNS
415
416extern bool	opt_abort;
417extern const char	*opt_junk;
418extern bool	opt_junk_alloc;
419extern bool	opt_junk_free;
420extern size_t	opt_quarantine;
421extern bool	opt_redzone;
422extern bool	opt_utrace;
423extern bool	opt_xmalloc;
424extern bool	opt_zero;
425extern size_t	opt_narenas;
426
427extern bool	in_valgrind;
428
429/* Number of CPUs. */
430extern unsigned		ncpus;
431
432/*
433 * index2size_tab encodes the same information as could be computed (at
434 * unacceptable cost in some code paths) by index2size_compute().
435 */
436extern size_t const	index2size_tab[NSIZES];
437/*
438 * size2index_tab is a compact lookup table that rounds request sizes up to
439 * size classes.  In order to reduce cache footprint, the table is compressed,
440 * and all accesses are via size2index().
441 */
442extern uint8_t const	size2index_tab[];
443
444arena_t	*a0get(void);
445void	*a0malloc(size_t size);
446void	a0dalloc(void *ptr);
447void	*bootstrap_malloc(size_t size);
448void	*bootstrap_calloc(size_t num, size_t size);
449void	bootstrap_free(void *ptr);
450arena_t	*arenas_extend(unsigned ind);
451arena_t	*arena_init(unsigned ind);
452unsigned	narenas_total_get(void);
453arena_t	*arena_get_hard(tsd_t *tsd, unsigned ind, bool init_if_missing);
454arena_t	*arena_choose_hard(tsd_t *tsd);
455void	arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind);
456unsigned	arena_nbound(unsigned ind);
457void	thread_allocated_cleanup(tsd_t *tsd);
458void	thread_deallocated_cleanup(tsd_t *tsd);
459void	arena_cleanup(tsd_t *tsd);
460void	arenas_cache_cleanup(tsd_t *tsd);
461void	narenas_cache_cleanup(tsd_t *tsd);
462void	arenas_cache_bypass_cleanup(tsd_t *tsd);
463void	jemalloc_prefork(void);
464void	jemalloc_postfork_parent(void);
465void	jemalloc_postfork_child(void);
466
467#include "jemalloc/internal/valgrind.h"
468#include "jemalloc/internal/util.h"
469#include "jemalloc/internal/atomic.h"
470#include "jemalloc/internal/prng.h"
471#include "jemalloc/internal/ckh.h"
472#include "jemalloc/internal/size_classes.h"
473#include "jemalloc/internal/stats.h"
474#include "jemalloc/internal/ctl.h"
475#include "jemalloc/internal/mutex.h"
476#include "jemalloc/internal/mb.h"
477#include "jemalloc/internal/bitmap.h"
478#include "jemalloc/internal/extent.h"
479#include "jemalloc/internal/arena.h"
480#include "jemalloc/internal/base.h"
481#include "jemalloc/internal/rtree.h"
482#include "jemalloc/internal/pages.h"
483#include "jemalloc/internal/chunk.h"
484#include "jemalloc/internal/huge.h"
485#include "jemalloc/internal/tcache.h"
486#include "jemalloc/internal/hash.h"
487#include "jemalloc/internal/quarantine.h"
488#include "jemalloc/internal/prof.h"
489#include "jemalloc/internal/tsd.h"
490
491#undef JEMALLOC_H_EXTERNS
492/******************************************************************************/
493#define	JEMALLOC_H_INLINES
494
495#include "jemalloc/internal/valgrind.h"
496#include "jemalloc/internal/util.h"
497#include "jemalloc/internal/atomic.h"
498#include "jemalloc/internal/prng.h"
499#include "jemalloc/internal/ckh.h"
500#include "jemalloc/internal/size_classes.h"
501#include "jemalloc/internal/stats.h"
502#include "jemalloc/internal/ctl.h"
503#include "jemalloc/internal/mutex.h"
504#include "jemalloc/internal/tsd.h"
505#include "jemalloc/internal/mb.h"
506#include "jemalloc/internal/extent.h"
507#include "jemalloc/internal/base.h"
508#include "jemalloc/internal/rtree.h"
509#include "jemalloc/internal/pages.h"
510#include "jemalloc/internal/chunk.h"
511#include "jemalloc/internal/huge.h"
512
513#ifndef JEMALLOC_ENABLE_INLINE
514szind_t	size2index_compute(size_t size);
515szind_t	size2index_lookup(size_t size);
516szind_t	size2index(size_t size);
517size_t	index2size_compute(szind_t index);
518size_t	index2size_lookup(szind_t index);
519size_t	index2size(szind_t index);
520size_t	s2u_compute(size_t size);
521size_t	s2u_lookup(size_t size);
522size_t	s2u(size_t size);
523size_t	sa2u(size_t size, size_t alignment);
524arena_t	*arena_choose(tsd_t *tsd, arena_t *arena);
525arena_t	*arena_get(tsd_t *tsd, unsigned ind, bool init_if_missing,
526    bool refresh_if_missing);
527#endif
528
529#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
530JEMALLOC_INLINE szind_t
531size2index_compute(size_t size)
532{
533
534#if (NTBINS != 0)
535	if (size <= (ZU(1) << LG_TINY_MAXCLASS)) {
536		size_t lg_tmin = LG_TINY_MAXCLASS - NTBINS + 1;
537		size_t lg_ceil = lg_floor(pow2_ceil(size));
538		return (lg_ceil < lg_tmin ? 0 : lg_ceil - lg_tmin);
539	}
540#endif
541	{
542		size_t x = unlikely(ZI(size) < 0) ? ((size<<1) ?
543		    (ZU(1)<<(LG_SIZEOF_PTR+3)) : ((ZU(1)<<(LG_SIZEOF_PTR+3))-1))
544		    : lg_floor((size<<1)-1);
545		size_t shift = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM) ? 0 :
546		    x - (LG_SIZE_CLASS_GROUP + LG_QUANTUM);
547		size_t grp = shift << LG_SIZE_CLASS_GROUP;
548
549		size_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM + 1)
550		    ? LG_QUANTUM : x - LG_SIZE_CLASS_GROUP - 1;
551
552		size_t delta_inverse_mask = ZI(-1) << lg_delta;
553		size_t mod = ((((size-1) & delta_inverse_mask) >> lg_delta)) &
554		    ((ZU(1) << LG_SIZE_CLASS_GROUP) - 1);
555
556		size_t index = NTBINS + grp + mod;
557		return (index);
558	}
559}
560
561JEMALLOC_ALWAYS_INLINE szind_t
562size2index_lookup(size_t size)
563{
564
565	assert(size <= LOOKUP_MAXCLASS);
566	{
567		size_t ret = ((size_t)(size2index_tab[(size-1) >>
568		    LG_TINY_MIN]));
569		assert(ret == size2index_compute(size));
570		return (ret);
571	}
572}
573
574JEMALLOC_ALWAYS_INLINE szind_t
575size2index(size_t size)
576{
577
578	assert(size > 0);
579	if (likely(size <= LOOKUP_MAXCLASS))
580		return (size2index_lookup(size));
581	return (size2index_compute(size));
582}
583
584JEMALLOC_INLINE size_t
585index2size_compute(szind_t index)
586{
587
588#if (NTBINS > 0)
589	if (index < NTBINS)
590		return (ZU(1) << (LG_TINY_MAXCLASS - NTBINS + 1 + index));
591#endif
592	{
593		size_t reduced_index = index - NTBINS;
594		size_t grp = reduced_index >> LG_SIZE_CLASS_GROUP;
595		size_t mod = reduced_index & ((ZU(1) << LG_SIZE_CLASS_GROUP) -
596		    1);
597
598		size_t grp_size_mask = ~((!!grp)-1);
599		size_t grp_size = ((ZU(1) << (LG_QUANTUM +
600		    (LG_SIZE_CLASS_GROUP-1))) << grp) & grp_size_mask;
601
602		size_t shift = (grp == 0) ? 1 : grp;
603		size_t lg_delta = shift + (LG_QUANTUM-1);
604		size_t mod_size = (mod+1) << lg_delta;
605
606		size_t usize = grp_size + mod_size;
607		return (usize);
608	}
609}
610
611JEMALLOC_ALWAYS_INLINE size_t
612index2size_lookup(szind_t index)
613{
614	size_t ret = (size_t)index2size_tab[index];
615	assert(ret == index2size_compute(index));
616	return (ret);
617}
618
619JEMALLOC_ALWAYS_INLINE size_t
620index2size(szind_t index)
621{
622
623	assert(index < NSIZES);
624	return (index2size_lookup(index));
625}
626
627JEMALLOC_ALWAYS_INLINE size_t
628s2u_compute(size_t size)
629{
630
631#if (NTBINS > 0)
632	if (size <= (ZU(1) << LG_TINY_MAXCLASS)) {
633		size_t lg_tmin = LG_TINY_MAXCLASS - NTBINS + 1;
634		size_t lg_ceil = lg_floor(pow2_ceil(size));
635		return (lg_ceil < lg_tmin ? (ZU(1) << lg_tmin) :
636		    (ZU(1) << lg_ceil));
637	}
638#endif
639	{
640		size_t x = unlikely(ZI(size) < 0) ? ((size<<1) ?
641		    (ZU(1)<<(LG_SIZEOF_PTR+3)) : ((ZU(1)<<(LG_SIZEOF_PTR+3))-1))
642		    : lg_floor((size<<1)-1);
643		size_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM + 1)
644		    ?  LG_QUANTUM : x - LG_SIZE_CLASS_GROUP - 1;
645		size_t delta = ZU(1) << lg_delta;
646		size_t delta_mask = delta - 1;
647		size_t usize = (size + delta_mask) & ~delta_mask;
648		return (usize);
649	}
650}
651
652JEMALLOC_ALWAYS_INLINE size_t
653s2u_lookup(size_t size)
654{
655	size_t ret = index2size_lookup(size2index_lookup(size));
656
657	assert(ret == s2u_compute(size));
658	return (ret);
659}
660
661/*
662 * Compute usable size that would result from allocating an object with the
663 * specified size.
664 */
665JEMALLOC_ALWAYS_INLINE size_t
666s2u(size_t size)
667{
668
669	assert(size > 0);
670	if (likely(size <= LOOKUP_MAXCLASS))
671		return (s2u_lookup(size));
672	return (s2u_compute(size));
673}
674
675/*
676 * Compute usable size that would result from allocating an object with the
677 * specified size and alignment.
678 */
679JEMALLOC_ALWAYS_INLINE size_t
680sa2u(size_t size, size_t alignment)
681{
682	size_t usize;
683
684	assert(alignment != 0 && ((alignment - 1) & alignment) == 0);
685
686	/* Try for a small size class. */
687	if (size <= SMALL_MAXCLASS && alignment < PAGE) {
688		/*
689		 * Round size up to the nearest multiple of alignment.
690		 *
691		 * This done, we can take advantage of the fact that for each
692		 * small size class, every object is aligned at the smallest
693		 * power of two that is non-zero in the base two representation
694		 * of the size.  For example:
695		 *
696		 *   Size |   Base 2 | Minimum alignment
697		 *   -----+----------+------------------
698		 *     96 |  1100000 |  32
699		 *    144 | 10100000 |  32
700		 *    192 | 11000000 |  64
701		 */
702		usize = s2u(ALIGNMENT_CEILING(size, alignment));
703		if (usize < LARGE_MINCLASS)
704			return (usize);
705	}
706
707	/* Try for a large size class. */
708	if (likely(size <= large_maxclass) && likely(alignment < chunksize)) {
709		/*
710		 * We can't achieve subpage alignment, so round up alignment
711		 * to the minimum that can actually be supported.
712		 */
713		alignment = PAGE_CEILING(alignment);
714
715		/* Make sure result is a large size class. */
716		usize = (size <= LARGE_MINCLASS) ? LARGE_MINCLASS : s2u(size);
717
718		/*
719		 * Calculate the size of the over-size run that arena_palloc()
720		 * would need to allocate in order to guarantee the alignment.
721		 */
722		if (usize + large_pad + alignment - PAGE <= arena_maxrun)
723			return (usize);
724	}
725
726	/* Huge size class.  Beware of size_t overflow. */
727
728	/*
729	 * We can't achieve subchunk alignment, so round up alignment to the
730	 * minimum that can actually be supported.
731	 */
732	alignment = CHUNK_CEILING(alignment);
733	if (alignment == 0) {
734		/* size_t overflow. */
735		return (0);
736	}
737
738	/* Make sure result is a huge size class. */
739	if (size <= chunksize)
740		usize = chunksize;
741	else {
742		usize = s2u(size);
743		if (usize < size) {
744			/* size_t overflow. */
745			return (0);
746		}
747	}
748
749	/*
750	 * Calculate the multi-chunk mapping that huge_palloc() would need in
751	 * order to guarantee the alignment.
752	 */
753	if (usize + alignment - PAGE < usize) {
754		/* size_t overflow. */
755		return (0);
756	}
757	return (usize);
758}
759
760/* Choose an arena based on a per-thread value. */
761JEMALLOC_INLINE arena_t *
762arena_choose(tsd_t *tsd, arena_t *arena)
763{
764	arena_t *ret;
765
766	if (arena != NULL)
767		return (arena);
768
769	if (unlikely((ret = tsd_arena_get(tsd)) == NULL))
770		ret = arena_choose_hard(tsd);
771
772	return (ret);
773}
774
775JEMALLOC_INLINE arena_t *
776arena_get(tsd_t *tsd, unsigned ind, bool init_if_missing,
777    bool refresh_if_missing)
778{
779	arena_t *arena;
780	arena_t **arenas_cache = tsd_arenas_cache_get(tsd);
781
782	/* init_if_missing requires refresh_if_missing. */
783	assert(!init_if_missing || refresh_if_missing);
784
785	if (unlikely(arenas_cache == NULL)) {
786		/* arenas_cache hasn't been initialized yet. */
787		return (arena_get_hard(tsd, ind, init_if_missing));
788	}
789	if (unlikely(ind >= tsd_narenas_cache_get(tsd))) {
790		/*
791		 * ind is invalid, cache is old (too small), or arena to be
792		 * initialized.
793		 */
794		return (refresh_if_missing ? arena_get_hard(tsd, ind,
795		    init_if_missing) : NULL);
796	}
797	arena = arenas_cache[ind];
798	if (likely(arena != NULL) || !refresh_if_missing)
799		return (arena);
800	return (arena_get_hard(tsd, ind, init_if_missing));
801}
802#endif
803
804#include "jemalloc/internal/bitmap.h"
805/*
806 * Include portions of arena.h interleaved with tcache.h in order to resolve
807 * circular dependencies.
808 */
809#define	JEMALLOC_ARENA_INLINE_A
810#include "jemalloc/internal/arena.h"
811#undef JEMALLOC_ARENA_INLINE_A
812#include "jemalloc/internal/tcache.h"
813#define	JEMALLOC_ARENA_INLINE_B
814#include "jemalloc/internal/arena.h"
815#undef JEMALLOC_ARENA_INLINE_B
816#include "jemalloc/internal/hash.h"
817#include "jemalloc/internal/quarantine.h"
818
819#ifndef JEMALLOC_ENABLE_INLINE
820arena_t	*iaalloc(const void *ptr);
821size_t	isalloc(const void *ptr, bool demote);
822void	*iallocztm(tsd_t *tsd, size_t size, bool zero, tcache_t *tcache,
823    bool is_metadata, arena_t *arena);
824void	*imalloct(tsd_t *tsd, size_t size, tcache_t *tcache, arena_t *arena);
825void	*imalloc(tsd_t *tsd, size_t size);
826void	*icalloct(tsd_t *tsd, size_t size, tcache_t *tcache, arena_t *arena);
827void	*icalloc(tsd_t *tsd, size_t size);
828void	*ipallocztm(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
829    tcache_t *tcache, bool is_metadata, arena_t *arena);
830void	*ipalloct(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
831    tcache_t *tcache, arena_t *arena);
832void	*ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero);
833size_t	ivsalloc(const void *ptr, bool demote);
834size_t	u2rz(size_t usize);
835size_t	p2rz(const void *ptr);
836void	idalloctm(tsd_t *tsd, void *ptr, tcache_t *tcache, bool is_metadata);
837void	idalloct(tsd_t *tsd, void *ptr, tcache_t *tcache);
838void	idalloc(tsd_t *tsd, void *ptr);
839void	iqalloc(tsd_t *tsd, void *ptr, tcache_t *tcache);
840void	isdalloct(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache);
841void	isqalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache);
842void	*iralloct_realign(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
843    size_t extra, size_t alignment, bool zero, tcache_t *tcache,
844    arena_t *arena);
845void	*iralloct(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
846    size_t alignment, bool zero, tcache_t *tcache, arena_t *arena);
847void	*iralloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
848    size_t alignment, bool zero);
849bool	ixalloc(void *ptr, size_t oldsize, size_t size, size_t extra,
850    size_t alignment, bool zero);
851#endif
852
853#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
854JEMALLOC_ALWAYS_INLINE arena_t *
855iaalloc(const void *ptr)
856{
857
858	assert(ptr != NULL);
859
860	return (arena_aalloc(ptr));
861}
862
863/*
864 * Typical usage:
865 *   void *ptr = [...]
866 *   size_t sz = isalloc(ptr, config_prof);
867 */
868JEMALLOC_ALWAYS_INLINE size_t
869isalloc(const void *ptr, bool demote)
870{
871
872	assert(ptr != NULL);
873	/* Demotion only makes sense if config_prof is true. */
874	assert(config_prof || !demote);
875
876	return (arena_salloc(ptr, demote));
877}
878
879JEMALLOC_ALWAYS_INLINE void *
880iallocztm(tsd_t *tsd, size_t size, bool zero, tcache_t *tcache, bool is_metadata,
881    arena_t *arena)
882{
883	void *ret;
884
885	assert(size != 0);
886
887	ret = arena_malloc(tsd, arena, size, zero, tcache);
888	if (config_stats && is_metadata && likely(ret != NULL)) {
889		arena_metadata_allocated_add(iaalloc(ret), isalloc(ret,
890		    config_prof));
891	}
892	return (ret);
893}
894
895JEMALLOC_ALWAYS_INLINE void *
896imalloct(tsd_t *tsd, size_t size, tcache_t *tcache, arena_t *arena)
897{
898
899	return (iallocztm(tsd, size, false, tcache, false, arena));
900}
901
902JEMALLOC_ALWAYS_INLINE void *
903imalloc(tsd_t *tsd, size_t size)
904{
905
906	return (iallocztm(tsd, size, false, tcache_get(tsd, true), false, NULL));
907}
908
909JEMALLOC_ALWAYS_INLINE void *
910icalloct(tsd_t *tsd, size_t size, tcache_t *tcache, arena_t *arena)
911{
912
913	return (iallocztm(tsd, size, true, tcache, false, arena));
914}
915
916JEMALLOC_ALWAYS_INLINE void *
917icalloc(tsd_t *tsd, size_t size)
918{
919
920	return (iallocztm(tsd, size, true, tcache_get(tsd, true), false, NULL));
921}
922
923JEMALLOC_ALWAYS_INLINE void *
924ipallocztm(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
925    tcache_t *tcache, bool is_metadata, arena_t *arena)
926{
927	void *ret;
928
929	assert(usize != 0);
930	assert(usize == sa2u(usize, alignment));
931
932	ret = arena_palloc(tsd, arena, usize, alignment, zero, tcache);
933	assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret);
934	if (config_stats && is_metadata && likely(ret != NULL)) {
935		arena_metadata_allocated_add(iaalloc(ret), isalloc(ret,
936		    config_prof));
937	}
938	return (ret);
939}
940
941JEMALLOC_ALWAYS_INLINE void *
942ipalloct(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
943    tcache_t *tcache, arena_t *arena)
944{
945
946	return (ipallocztm(tsd, usize, alignment, zero, tcache, false, arena));
947}
948
949JEMALLOC_ALWAYS_INLINE void *
950ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero)
951{
952
953	return (ipallocztm(tsd, usize, alignment, zero, tcache_get(tsd,
954	    NULL), false, NULL));
955}
956
957JEMALLOC_ALWAYS_INLINE size_t
958ivsalloc(const void *ptr, bool demote)
959{
960	extent_node_t *node;
961
962	/* Return 0 if ptr is not within a chunk managed by jemalloc. */
963	node = chunk_lookup(ptr, false);
964	if (node == NULL)
965		return (0);
966	/* Only arena chunks should be looked up via interior pointers. */
967	assert(extent_node_addr_get(node) == ptr ||
968	    extent_node_achunk_get(node));
969
970	return (isalloc(ptr, demote));
971}
972
973JEMALLOC_INLINE size_t
974u2rz(size_t usize)
975{
976	size_t ret;
977
978	if (usize <= SMALL_MAXCLASS) {
979		szind_t binind = size2index(usize);
980		ret = arena_bin_info[binind].redzone_size;
981	} else
982		ret = 0;
983
984	return (ret);
985}
986
987JEMALLOC_INLINE size_t
988p2rz(const void *ptr)
989{
990	size_t usize = isalloc(ptr, false);
991
992	return (u2rz(usize));
993}
994
995JEMALLOC_ALWAYS_INLINE void
996idalloctm(tsd_t *tsd, void *ptr, tcache_t *tcache, bool is_metadata)
997{
998
999	assert(ptr != NULL);
1000	if (config_stats && is_metadata) {
1001		arena_metadata_allocated_sub(iaalloc(ptr), isalloc(ptr,
1002		    config_prof));
1003	}
1004
1005	arena_dalloc(tsd, ptr, tcache);
1006}
1007
1008JEMALLOC_ALWAYS_INLINE void
1009idalloct(tsd_t *tsd, void *ptr, tcache_t *tcache)
1010{
1011
1012	idalloctm(tsd, ptr, tcache, false);
1013}
1014
1015JEMALLOC_ALWAYS_INLINE void
1016idalloc(tsd_t *tsd, void *ptr)
1017{
1018
1019	idalloctm(tsd, ptr, tcache_get(tsd, false), false);
1020}
1021
1022JEMALLOC_ALWAYS_INLINE void
1023iqalloc(tsd_t *tsd, void *ptr, tcache_t *tcache)
1024{
1025
1026	if (config_fill && unlikely(opt_quarantine))
1027		quarantine(tsd, ptr);
1028	else
1029		idalloctm(tsd, ptr, tcache, false);
1030}
1031
1032JEMALLOC_ALWAYS_INLINE void
1033isdalloct(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache)
1034{
1035
1036	arena_sdalloc(tsd, ptr, size, tcache);
1037}
1038
1039JEMALLOC_ALWAYS_INLINE void
1040isqalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache)
1041{
1042
1043	if (config_fill && unlikely(opt_quarantine))
1044		quarantine(tsd, ptr);
1045	else
1046		isdalloct(tsd, ptr, size, tcache);
1047}
1048
1049JEMALLOC_ALWAYS_INLINE void *
1050iralloct_realign(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
1051    size_t extra, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena)
1052{
1053	void *p;
1054	size_t usize, copysize;
1055
1056	usize = sa2u(size + extra, alignment);
1057	if (usize == 0)
1058		return (NULL);
1059	p = ipalloct(tsd, usize, alignment, zero, tcache, arena);
1060	if (p == NULL) {
1061		if (extra == 0)
1062			return (NULL);
1063		/* Try again, without extra this time. */
1064		usize = sa2u(size, alignment);
1065		if (usize == 0)
1066			return (NULL);
1067		p = ipalloct(tsd, usize, alignment, zero, tcache, arena);
1068		if (p == NULL)
1069			return (NULL);
1070	}
1071	/*
1072	 * Copy at most size bytes (not size+extra), since the caller has no
1073	 * expectation that the extra bytes will be reliably preserved.
1074	 */
1075	copysize = (size < oldsize) ? size : oldsize;
1076	memcpy(p, ptr, copysize);
1077	isqalloc(tsd, ptr, oldsize, tcache);
1078	return (p);
1079}
1080
1081JEMALLOC_ALWAYS_INLINE void *
1082iralloct(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t alignment,
1083    bool zero, tcache_t *tcache, arena_t *arena)
1084{
1085
1086	assert(ptr != NULL);
1087	assert(size != 0);
1088
1089	if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
1090	    != 0) {
1091		/*
1092		 * Existing object alignment is inadequate; allocate new space
1093		 * and copy.
1094		 */
1095		return (iralloct_realign(tsd, ptr, oldsize, size, 0, alignment,
1096		    zero, tcache, arena));
1097	}
1098
1099	return (arena_ralloc(tsd, arena, ptr, oldsize, size, alignment, zero,
1100	    tcache));
1101}
1102
1103JEMALLOC_ALWAYS_INLINE void *
1104iralloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t alignment,
1105    bool zero)
1106{
1107
1108	return (iralloct(tsd, ptr, oldsize, size, alignment, zero,
1109	    tcache_get(tsd, true), NULL));
1110}
1111
1112JEMALLOC_ALWAYS_INLINE bool
1113ixalloc(void *ptr, size_t oldsize, size_t size, size_t extra, size_t alignment,
1114    bool zero)
1115{
1116
1117	assert(ptr != NULL);
1118	assert(size != 0);
1119
1120	if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
1121	    != 0) {
1122		/* Existing object alignment is inadequate. */
1123		return (true);
1124	}
1125
1126	return (arena_ralloc_no_move(ptr, oldsize, size, extra, zero));
1127}
1128#endif
1129
1130#include "jemalloc/internal/prof.h"
1131
1132#undef JEMALLOC_H_INLINES
1133/******************************************************************************/
1134#endif /* JEMALLOC_INTERNAL_H */
1135