xref: /redis-3.2.3/deps/jemalloc/src/jemalloc.c (revision 5268379e)
1 #define	JEMALLOC_C_
2 #include "jemalloc/internal/jemalloc_internal.h"
3 
4 /******************************************************************************/
5 /* Data. */
6 
7 /* Runtime configuration options. */
8 const char	*je_malloc_conf JEMALLOC_ATTR(weak);
9 bool	opt_abort =
10 #ifdef JEMALLOC_DEBUG
11     true
12 #else
13     false
14 #endif
15     ;
16 const char	*opt_junk =
17 #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
18     "true"
19 #else
20     "false"
21 #endif
22     ;
23 bool	opt_junk_alloc =
24 #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
25     true
26 #else
27     false
28 #endif
29     ;
30 bool	opt_junk_free =
31 #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
32     true
33 #else
34     false
35 #endif
36     ;
37 
38 size_t	opt_quarantine = ZU(0);
39 bool	opt_redzone = false;
40 bool	opt_utrace = false;
41 bool	opt_xmalloc = false;
42 bool	opt_zero = false;
43 size_t	opt_narenas = 0;
44 
45 /* Initialized to true if the process is running inside Valgrind. */
46 bool	in_valgrind;
47 
48 unsigned	ncpus;
49 
50 /* Protects arenas initialization (arenas, narenas_total). */
51 static malloc_mutex_t	arenas_lock;
52 /*
53  * Arenas that are used to service external requests.  Not all elements of the
54  * arenas array are necessarily used; arenas are created lazily as needed.
55  *
56  * arenas[0..narenas_auto) are used for automatic multiplexing of threads and
57  * arenas.  arenas[narenas_auto..narenas_total) are only used if the application
58  * takes some action to create them and allocate from them.
59  */
60 static arena_t		**arenas;
61 static unsigned		narenas_total;
62 static arena_t		*a0; /* arenas[0]; read-only after initialization. */
63 static unsigned		narenas_auto; /* Read-only after initialization. */
64 
65 typedef enum {
66 	malloc_init_uninitialized	= 3,
67 	malloc_init_a0_initialized	= 2,
68 	malloc_init_recursible		= 1,
69 	malloc_init_initialized		= 0 /* Common case --> jnz. */
70 } malloc_init_t;
71 static malloc_init_t	malloc_init_state = malloc_init_uninitialized;
72 
73 JEMALLOC_ALIGNED(CACHELINE)
74 const size_t	index2size_tab[NSIZES] = {
75 #define	SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \
76 	((ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta)),
77 	SIZE_CLASSES
78 #undef SC
79 };
80 
81 JEMALLOC_ALIGNED(CACHELINE)
82 const uint8_t	size2index_tab[] = {
83 #if LG_TINY_MIN == 0
84 #warning "Dangerous LG_TINY_MIN"
85 #define	S2B_0(i)	i,
86 #elif LG_TINY_MIN == 1
87 #warning "Dangerous LG_TINY_MIN"
88 #define	S2B_1(i)	i,
89 #elif LG_TINY_MIN == 2
90 #warning "Dangerous LG_TINY_MIN"
91 #define	S2B_2(i)	i,
92 #elif LG_TINY_MIN == 3
93 #define	S2B_3(i)	i,
94 #elif LG_TINY_MIN == 4
95 #define	S2B_4(i)	i,
96 #elif LG_TINY_MIN == 5
97 #define	S2B_5(i)	i,
98 #elif LG_TINY_MIN == 6
99 #define	S2B_6(i)	i,
100 #elif LG_TINY_MIN == 7
101 #define	S2B_7(i)	i,
102 #elif LG_TINY_MIN == 8
103 #define	S2B_8(i)	i,
104 #elif LG_TINY_MIN == 9
105 #define	S2B_9(i)	i,
106 #elif LG_TINY_MIN == 10
107 #define	S2B_10(i)	i,
108 #elif LG_TINY_MIN == 11
109 #define	S2B_11(i)	i,
110 #else
111 #error "Unsupported LG_TINY_MIN"
112 #endif
113 #if LG_TINY_MIN < 1
114 #define	S2B_1(i)	S2B_0(i) S2B_0(i)
115 #endif
116 #if LG_TINY_MIN < 2
117 #define	S2B_2(i)	S2B_1(i) S2B_1(i)
118 #endif
119 #if LG_TINY_MIN < 3
120 #define	S2B_3(i)	S2B_2(i) S2B_2(i)
121 #endif
122 #if LG_TINY_MIN < 4
123 #define	S2B_4(i)	S2B_3(i) S2B_3(i)
124 #endif
125 #if LG_TINY_MIN < 5
126 #define	S2B_5(i)	S2B_4(i) S2B_4(i)
127 #endif
128 #if LG_TINY_MIN < 6
129 #define	S2B_6(i)	S2B_5(i) S2B_5(i)
130 #endif
131 #if LG_TINY_MIN < 7
132 #define	S2B_7(i)	S2B_6(i) S2B_6(i)
133 #endif
134 #if LG_TINY_MIN < 8
135 #define	S2B_8(i)	S2B_7(i) S2B_7(i)
136 #endif
137 #if LG_TINY_MIN < 9
138 #define	S2B_9(i)	S2B_8(i) S2B_8(i)
139 #endif
140 #if LG_TINY_MIN < 10
141 #define	S2B_10(i)	S2B_9(i) S2B_9(i)
142 #endif
143 #if LG_TINY_MIN < 11
144 #define	S2B_11(i)	S2B_10(i) S2B_10(i)
145 #endif
146 #define	S2B_no(i)
147 #define	SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \
148 	S2B_##lg_delta_lookup(index)
149 	SIZE_CLASSES
150 #undef S2B_3
151 #undef S2B_4
152 #undef S2B_5
153 #undef S2B_6
154 #undef S2B_7
155 #undef S2B_8
156 #undef S2B_9
157 #undef S2B_10
158 #undef S2B_11
159 #undef S2B_no
160 #undef SC
161 };
162 
163 #ifdef JEMALLOC_THREADED_INIT
164 /* Used to let the initializing thread recursively allocate. */
165 #  define NO_INITIALIZER	((unsigned long)0)
166 #  define INITIALIZER		pthread_self()
167 #  define IS_INITIALIZER	(malloc_initializer == pthread_self())
168 static pthread_t		malloc_initializer = NO_INITIALIZER;
169 #else
170 #  define NO_INITIALIZER	false
171 #  define INITIALIZER		true
172 #  define IS_INITIALIZER	malloc_initializer
173 static bool			malloc_initializer = NO_INITIALIZER;
174 #endif
175 
176 /* Used to avoid initialization races. */
177 #ifdef _WIN32
178 #if _WIN32_WINNT >= 0x0600
179 static malloc_mutex_t	init_lock = SRWLOCK_INIT;
180 #else
181 static malloc_mutex_t	init_lock;
182 static bool init_lock_initialized = false;
183 
JEMALLOC_ATTR(constructor)184 JEMALLOC_ATTR(constructor)
185 static void WINAPI
186 _init_init_lock(void)
187 {
188 
189 	/* If another constructor in the same binary is using mallctl to
190 	 * e.g. setup chunk hooks, it may end up running before this one,
191 	 * and malloc_init_hard will crash trying to lock the uninitialized
192 	 * lock. So we force an initialization of the lock in
193 	 * malloc_init_hard as well. We don't try to care about atomicity
194 	 * of the accessed to the init_lock_initialized boolean, since it
195 	 * really only matters early in the process creation, before any
196 	 * separate thread normally starts doing anything. */
197 	if (!init_lock_initialized)
198 		malloc_mutex_init(&init_lock);
199 	init_lock_initialized = true;
200 }
201 
202 #ifdef _MSC_VER
203 #  pragma section(".CRT$XCU", read)
204 JEMALLOC_SECTION(".CRT$XCU") JEMALLOC_ATTR(used)
205 static const void (WINAPI *init_init_lock)(void) = _init_init_lock;
206 #endif
207 #endif
208 #else
209 static malloc_mutex_t	init_lock = MALLOC_MUTEX_INITIALIZER;
210 #endif
211 
212 typedef struct {
213 	void	*p;	/* Input pointer (as in realloc(p, s)). */
214 	size_t	s;	/* Request size. */
215 	void	*r;	/* Result pointer. */
216 } malloc_utrace_t;
217 
218 #ifdef JEMALLOC_UTRACE
219 #  define UTRACE(a, b, c) do {						\
220 	if (unlikely(opt_utrace)) {					\
221 		int utrace_serrno = errno;				\
222 		malloc_utrace_t ut;					\
223 		ut.p = (a);						\
224 		ut.s = (b);						\
225 		ut.r = (c);						\
226 		utrace(&ut, sizeof(ut));				\
227 		errno = utrace_serrno;					\
228 	}								\
229 } while (0)
230 #else
231 #  define UTRACE(a, b, c)
232 #endif
233 
234 /******************************************************************************/
235 /*
236  * Function prototypes for static functions that are referenced prior to
237  * definition.
238  */
239 
240 static bool	malloc_init_hard_a0(void);
241 static bool	malloc_init_hard(void);
242 
243 /******************************************************************************/
244 /*
245  * Begin miscellaneous support functions.
246  */
247 
248 JEMALLOC_ALWAYS_INLINE_C bool
malloc_initialized(void)249 malloc_initialized(void)
250 {
251 
252 	return (malloc_init_state == malloc_init_initialized);
253 }
254 
255 JEMALLOC_ALWAYS_INLINE_C void
malloc_thread_init(void)256 malloc_thread_init(void)
257 {
258 
259 	/*
260 	 * TSD initialization can't be safely done as a side effect of
261 	 * deallocation, because it is possible for a thread to do nothing but
262 	 * deallocate its TLS data via free(), in which case writing to TLS
263 	 * would cause write-after-free memory corruption.  The quarantine
264 	 * facility *only* gets used as a side effect of deallocation, so make
265 	 * a best effort attempt at initializing its TSD by hooking all
266 	 * allocation events.
267 	 */
268 	if (config_fill && unlikely(opt_quarantine))
269 		quarantine_alloc_hook();
270 }
271 
272 JEMALLOC_ALWAYS_INLINE_C bool
malloc_init_a0(void)273 malloc_init_a0(void)
274 {
275 
276 	if (unlikely(malloc_init_state == malloc_init_uninitialized))
277 		return (malloc_init_hard_a0());
278 	return (false);
279 }
280 
281 JEMALLOC_ALWAYS_INLINE_C bool
malloc_init(void)282 malloc_init(void)
283 {
284 
285 	if (unlikely(!malloc_initialized()) && malloc_init_hard())
286 		return (true);
287 	malloc_thread_init();
288 
289 	return (false);
290 }
291 
292 /*
293  * The a0*() functions are used instead of i[mcd]alloc() in situations that
294  * cannot tolerate TLS variable access.
295  */
296 
297 arena_t *
a0get(void)298 a0get(void)
299 {
300 
301 	assert(a0 != NULL);
302 	return (a0);
303 }
304 
305 static void *
a0ialloc(size_t size,bool zero,bool is_metadata)306 a0ialloc(size_t size, bool zero, bool is_metadata)
307 {
308 
309 	if (unlikely(malloc_init_a0()))
310 		return (NULL);
311 
312 	return (iallocztm(NULL, size, zero, false, is_metadata, a0get()));
313 }
314 
315 static void
a0idalloc(void * ptr,bool is_metadata)316 a0idalloc(void *ptr, bool is_metadata)
317 {
318 
319 	idalloctm(NULL, ptr, false, is_metadata);
320 }
321 
322 void *
a0malloc(size_t size)323 a0malloc(size_t size)
324 {
325 
326 	return (a0ialloc(size, false, true));
327 }
328 
329 void
a0dalloc(void * ptr)330 a0dalloc(void *ptr)
331 {
332 
333 	a0idalloc(ptr, true);
334 }
335 
336 /*
337  * FreeBSD's libc uses the bootstrap_*() functions in bootstrap-senstive
338  * situations that cannot tolerate TLS variable access (TLS allocation and very
339  * early internal data structure initialization).
340  */
341 
342 void *
bootstrap_malloc(size_t size)343 bootstrap_malloc(size_t size)
344 {
345 
346 	if (unlikely(size == 0))
347 		size = 1;
348 
349 	return (a0ialloc(size, false, false));
350 }
351 
352 void *
bootstrap_calloc(size_t num,size_t size)353 bootstrap_calloc(size_t num, size_t size)
354 {
355 	size_t num_size;
356 
357 	num_size = num * size;
358 	if (unlikely(num_size == 0)) {
359 		assert(num == 0 || size == 0);
360 		num_size = 1;
361 	}
362 
363 	return (a0ialloc(num_size, true, false));
364 }
365 
366 void
bootstrap_free(void * ptr)367 bootstrap_free(void *ptr)
368 {
369 
370 	if (unlikely(ptr == NULL))
371 		return;
372 
373 	a0idalloc(ptr, false);
374 }
375 
376 /* Create a new arena and insert it into the arenas array at index ind. */
377 static arena_t *
arena_init_locked(unsigned ind)378 arena_init_locked(unsigned ind)
379 {
380 	arena_t *arena;
381 
382 	/* Expand arenas if necessary. */
383 	assert(ind <= narenas_total);
384 	if (ind > MALLOCX_ARENA_MAX)
385 		return (NULL);
386 	if (ind == narenas_total) {
387 		unsigned narenas_new = narenas_total + 1;
388 		arena_t **arenas_new =
389 		    (arena_t **)a0malloc(CACHELINE_CEILING(narenas_new *
390 		    sizeof(arena_t *)));
391 		if (arenas_new == NULL)
392 			return (NULL);
393 		memcpy(arenas_new, arenas, narenas_total * sizeof(arena_t *));
394 		arenas_new[ind] = NULL;
395 		/*
396 		 * Deallocate only if arenas came from a0malloc() (not
397 		 * base_alloc()).
398 		 */
399 		if (narenas_total != narenas_auto)
400 			a0dalloc(arenas);
401 		arenas = arenas_new;
402 		narenas_total = narenas_new;
403 	}
404 
405 	/*
406 	 * Another thread may have already initialized arenas[ind] if it's an
407 	 * auto arena.
408 	 */
409 	arena = arenas[ind];
410 	if (arena != NULL) {
411 		assert(ind < narenas_auto);
412 		return (arena);
413 	}
414 
415 	/* Actually initialize the arena. */
416 	arena = arenas[ind] = arena_new(ind);
417 	return (arena);
418 }
419 
420 arena_t *
arena_init(unsigned ind)421 arena_init(unsigned ind)
422 {
423 	arena_t *arena;
424 
425 	malloc_mutex_lock(&arenas_lock);
426 	arena = arena_init_locked(ind);
427 	malloc_mutex_unlock(&arenas_lock);
428 	return (arena);
429 }
430 
431 unsigned
narenas_total_get(void)432 narenas_total_get(void)
433 {
434 	unsigned narenas;
435 
436 	malloc_mutex_lock(&arenas_lock);
437 	narenas = narenas_total;
438 	malloc_mutex_unlock(&arenas_lock);
439 
440 	return (narenas);
441 }
442 
443 static void
arena_bind_locked(tsd_t * tsd,unsigned ind)444 arena_bind_locked(tsd_t *tsd, unsigned ind)
445 {
446 	arena_t *arena;
447 
448 	arena = arenas[ind];
449 	arena->nthreads++;
450 
451 	if (tsd_nominal(tsd))
452 		tsd_arena_set(tsd, arena);
453 }
454 
455 static void
arena_bind(tsd_t * tsd,unsigned ind)456 arena_bind(tsd_t *tsd, unsigned ind)
457 {
458 
459 	malloc_mutex_lock(&arenas_lock);
460 	arena_bind_locked(tsd, ind);
461 	malloc_mutex_unlock(&arenas_lock);
462 }
463 
464 void
arena_migrate(tsd_t * tsd,unsigned oldind,unsigned newind)465 arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind)
466 {
467 	arena_t *oldarena, *newarena;
468 
469 	malloc_mutex_lock(&arenas_lock);
470 	oldarena = arenas[oldind];
471 	newarena = arenas[newind];
472 	oldarena->nthreads--;
473 	newarena->nthreads++;
474 	malloc_mutex_unlock(&arenas_lock);
475 	tsd_arena_set(tsd, newarena);
476 }
477 
478 unsigned
arena_nbound(unsigned ind)479 arena_nbound(unsigned ind)
480 {
481 	unsigned nthreads;
482 
483 	malloc_mutex_lock(&arenas_lock);
484 	nthreads = arenas[ind]->nthreads;
485 	malloc_mutex_unlock(&arenas_lock);
486 	return (nthreads);
487 }
488 
489 static void
arena_unbind(tsd_t * tsd,unsigned ind)490 arena_unbind(tsd_t *tsd, unsigned ind)
491 {
492 	arena_t *arena;
493 
494 	malloc_mutex_lock(&arenas_lock);
495 	arena = arenas[ind];
496 	arena->nthreads--;
497 	malloc_mutex_unlock(&arenas_lock);
498 	tsd_arena_set(tsd, NULL);
499 }
500 
501 arena_t *
arena_get_hard(tsd_t * tsd,unsigned ind,bool init_if_missing)502 arena_get_hard(tsd_t *tsd, unsigned ind, bool init_if_missing)
503 {
504 	arena_t *arena;
505 	arena_t **arenas_cache = tsd_arenas_cache_get(tsd);
506 	unsigned narenas_cache = tsd_narenas_cache_get(tsd);
507 	unsigned narenas_actual = narenas_total_get();
508 
509 	/* Deallocate old cache if it's too small. */
510 	if (arenas_cache != NULL && narenas_cache < narenas_actual) {
511 		a0dalloc(arenas_cache);
512 		arenas_cache = NULL;
513 		narenas_cache = 0;
514 		tsd_arenas_cache_set(tsd, arenas_cache);
515 		tsd_narenas_cache_set(tsd, narenas_cache);
516 	}
517 
518 	/* Allocate cache if it's missing. */
519 	if (arenas_cache == NULL) {
520 		bool *arenas_cache_bypassp = tsd_arenas_cache_bypassp_get(tsd);
521 		assert(ind < narenas_actual || !init_if_missing);
522 		narenas_cache = (ind < narenas_actual) ? narenas_actual : ind+1;
523 
524 		if (tsd_nominal(tsd) && !*arenas_cache_bypassp) {
525 			*arenas_cache_bypassp = true;
526 			arenas_cache = (arena_t **)a0malloc(sizeof(arena_t *) *
527 			    narenas_cache);
528 			*arenas_cache_bypassp = false;
529 		}
530 		if (arenas_cache == NULL) {
531 			/*
532 			 * This function must always tell the truth, even if
533 			 * it's slow, so don't let OOM, thread cleanup (note
534 			 * tsd_nominal check), nor recursive allocation
535 			 * avoidance (note arenas_cache_bypass check) get in the
536 			 * way.
537 			 */
538 			if (ind >= narenas_actual)
539 				return (NULL);
540 			malloc_mutex_lock(&arenas_lock);
541 			arena = arenas[ind];
542 			malloc_mutex_unlock(&arenas_lock);
543 			return (arena);
544 		}
545 		assert(tsd_nominal(tsd) && !*arenas_cache_bypassp);
546 		tsd_arenas_cache_set(tsd, arenas_cache);
547 		tsd_narenas_cache_set(tsd, narenas_cache);
548 	}
549 
550 	/*
551 	 * Copy to cache.  It's possible that the actual number of arenas has
552 	 * increased since narenas_total_get() was called above, but that causes
553 	 * no correctness issues unless two threads concurrently execute the
554 	 * arenas.extend mallctl, which we trust mallctl synchronization to
555 	 * prevent.
556 	 */
557 	malloc_mutex_lock(&arenas_lock);
558 	memcpy(arenas_cache, arenas, sizeof(arena_t *) * narenas_actual);
559 	malloc_mutex_unlock(&arenas_lock);
560 	if (narenas_cache > narenas_actual) {
561 		memset(&arenas_cache[narenas_actual], 0, sizeof(arena_t *) *
562 		    (narenas_cache - narenas_actual));
563 	}
564 
565 	/* Read the refreshed cache, and init the arena if necessary. */
566 	arena = arenas_cache[ind];
567 	if (init_if_missing && arena == NULL)
568 		arena = arenas_cache[ind] = arena_init(ind);
569 	return (arena);
570 }
571 
572 /* Slow path, called only by arena_choose(). */
573 arena_t *
arena_choose_hard(tsd_t * tsd)574 arena_choose_hard(tsd_t *tsd)
575 {
576 	arena_t *ret;
577 
578 	if (narenas_auto > 1) {
579 		unsigned i, choose, first_null;
580 
581 		choose = 0;
582 		first_null = narenas_auto;
583 		malloc_mutex_lock(&arenas_lock);
584 		assert(a0get() != NULL);
585 		for (i = 1; i < narenas_auto; i++) {
586 			if (arenas[i] != NULL) {
587 				/*
588 				 * Choose the first arena that has the lowest
589 				 * number of threads assigned to it.
590 				 */
591 				if (arenas[i]->nthreads <
592 				    arenas[choose]->nthreads)
593 					choose = i;
594 			} else if (first_null == narenas_auto) {
595 				/*
596 				 * Record the index of the first uninitialized
597 				 * arena, in case all extant arenas are in use.
598 				 *
599 				 * NB: It is possible for there to be
600 				 * discontinuities in terms of initialized
601 				 * versus uninitialized arenas, due to the
602 				 * "thread.arena" mallctl.
603 				 */
604 				first_null = i;
605 			}
606 		}
607 
608 		if (arenas[choose]->nthreads == 0
609 		    || first_null == narenas_auto) {
610 			/*
611 			 * Use an unloaded arena, or the least loaded arena if
612 			 * all arenas are already initialized.
613 			 */
614 			ret = arenas[choose];
615 		} else {
616 			/* Initialize a new arena. */
617 			choose = first_null;
618 			ret = arena_init_locked(choose);
619 			if (ret == NULL) {
620 				malloc_mutex_unlock(&arenas_lock);
621 				return (NULL);
622 			}
623 		}
624 		arena_bind_locked(tsd, choose);
625 		malloc_mutex_unlock(&arenas_lock);
626 	} else {
627 		ret = a0get();
628 		arena_bind(tsd, 0);
629 	}
630 
631 	return (ret);
632 }
633 
634 void
thread_allocated_cleanup(tsd_t * tsd)635 thread_allocated_cleanup(tsd_t *tsd)
636 {
637 
638 	/* Do nothing. */
639 }
640 
641 void
thread_deallocated_cleanup(tsd_t * tsd)642 thread_deallocated_cleanup(tsd_t *tsd)
643 {
644 
645 	/* Do nothing. */
646 }
647 
648 void
arena_cleanup(tsd_t * tsd)649 arena_cleanup(tsd_t *tsd)
650 {
651 	arena_t *arena;
652 
653 	arena = tsd_arena_get(tsd);
654 	if (arena != NULL)
655 		arena_unbind(tsd, arena->ind);
656 }
657 
658 void
arenas_cache_cleanup(tsd_t * tsd)659 arenas_cache_cleanup(tsd_t *tsd)
660 {
661 	arena_t **arenas_cache;
662 
663 	arenas_cache = tsd_arenas_cache_get(tsd);
664 	if (arenas_cache != NULL) {
665 		tsd_arenas_cache_set(tsd, NULL);
666 		a0dalloc(arenas_cache);
667 	}
668 }
669 
670 void
narenas_cache_cleanup(tsd_t * tsd)671 narenas_cache_cleanup(tsd_t *tsd)
672 {
673 
674 	/* Do nothing. */
675 }
676 
677 void
arenas_cache_bypass_cleanup(tsd_t * tsd)678 arenas_cache_bypass_cleanup(tsd_t *tsd)
679 {
680 
681 	/* Do nothing. */
682 }
683 
684 static void
stats_print_atexit(void)685 stats_print_atexit(void)
686 {
687 
688 	if (config_tcache && config_stats) {
689 		unsigned narenas, i;
690 
691 		/*
692 		 * Merge stats from extant threads.  This is racy, since
693 		 * individual threads do not lock when recording tcache stats
694 		 * events.  As a consequence, the final stats may be slightly
695 		 * out of date by the time they are reported, if other threads
696 		 * continue to allocate.
697 		 */
698 		for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
699 			arena_t *arena = arenas[i];
700 			if (arena != NULL) {
701 				tcache_t *tcache;
702 
703 				/*
704 				 * tcache_stats_merge() locks bins, so if any
705 				 * code is introduced that acquires both arena
706 				 * and bin locks in the opposite order,
707 				 * deadlocks may result.
708 				 */
709 				malloc_mutex_lock(&arena->lock);
710 				ql_foreach(tcache, &arena->tcache_ql, link) {
711 					tcache_stats_merge(tcache, arena);
712 				}
713 				malloc_mutex_unlock(&arena->lock);
714 			}
715 		}
716 	}
717 	je_malloc_stats_print(NULL, NULL, NULL);
718 }
719 
720 /*
721  * End miscellaneous support functions.
722  */
723 /******************************************************************************/
724 /*
725  * Begin initialization functions.
726  */
727 
728 #ifndef JEMALLOC_HAVE_SECURE_GETENV
729 static char *
secure_getenv(const char * name)730 secure_getenv(const char *name)
731 {
732 
733 #  ifdef JEMALLOC_HAVE_ISSETUGID
734 	if (issetugid() != 0)
735 		return (NULL);
736 #  endif
737 	return (getenv(name));
738 }
739 #endif
740 
741 static unsigned
malloc_ncpus(void)742 malloc_ncpus(void)
743 {
744 	long result;
745 
746 #ifdef _WIN32
747 	SYSTEM_INFO si;
748 	GetSystemInfo(&si);
749 	result = si.dwNumberOfProcessors;
750 #else
751 	result = sysconf(_SC_NPROCESSORS_ONLN);
752 #endif
753 	return ((result == -1) ? 1 : (unsigned)result);
754 }
755 
756 static bool
malloc_conf_next(char const ** opts_p,char const ** k_p,size_t * klen_p,char const ** v_p,size_t * vlen_p)757 malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
758     char const **v_p, size_t *vlen_p)
759 {
760 	bool accept;
761 	const char *opts = *opts_p;
762 
763 	*k_p = opts;
764 
765 	for (accept = false; !accept;) {
766 		switch (*opts) {
767 		case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
768 		case 'G': case 'H': case 'I': case 'J': case 'K': case 'L':
769 		case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R':
770 		case 'S': case 'T': case 'U': case 'V': case 'W': case 'X':
771 		case 'Y': case 'Z':
772 		case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
773 		case 'g': case 'h': case 'i': case 'j': case 'k': case 'l':
774 		case 'm': case 'n': case 'o': case 'p': case 'q': case 'r':
775 		case 's': case 't': case 'u': case 'v': case 'w': case 'x':
776 		case 'y': case 'z':
777 		case '0': case '1': case '2': case '3': case '4': case '5':
778 		case '6': case '7': case '8': case '9':
779 		case '_':
780 			opts++;
781 			break;
782 		case ':':
783 			opts++;
784 			*klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p;
785 			*v_p = opts;
786 			accept = true;
787 			break;
788 		case '\0':
789 			if (opts != *opts_p) {
790 				malloc_write("<jemalloc>: Conf string ends "
791 				    "with key\n");
792 			}
793 			return (true);
794 		default:
795 			malloc_write("<jemalloc>: Malformed conf string\n");
796 			return (true);
797 		}
798 	}
799 
800 	for (accept = false; !accept;) {
801 		switch (*opts) {
802 		case ',':
803 			opts++;
804 			/*
805 			 * Look ahead one character here, because the next time
806 			 * this function is called, it will assume that end of
807 			 * input has been cleanly reached if no input remains,
808 			 * but we have optimistically already consumed the
809 			 * comma if one exists.
810 			 */
811 			if (*opts == '\0') {
812 				malloc_write("<jemalloc>: Conf string ends "
813 				    "with comma\n");
814 			}
815 			*vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p;
816 			accept = true;
817 			break;
818 		case '\0':
819 			*vlen_p = (uintptr_t)opts - (uintptr_t)*v_p;
820 			accept = true;
821 			break;
822 		default:
823 			opts++;
824 			break;
825 		}
826 	}
827 
828 	*opts_p = opts;
829 	return (false);
830 }
831 
832 static void
malloc_conf_error(const char * msg,const char * k,size_t klen,const char * v,size_t vlen)833 malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v,
834     size_t vlen)
835 {
836 
837 	malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k,
838 	    (int)vlen, v);
839 }
840 
841 static void
malloc_conf_init(void)842 malloc_conf_init(void)
843 {
844 	unsigned i;
845 	char buf[PATH_MAX + 1];
846 	const char *opts, *k, *v;
847 	size_t klen, vlen;
848 
849 	/*
850 	 * Automatically configure valgrind before processing options.  The
851 	 * valgrind option remains in jemalloc 3.x for compatibility reasons.
852 	 */
853 	if (config_valgrind) {
854 		in_valgrind = (RUNNING_ON_VALGRIND != 0) ? true : false;
855 		if (config_fill && unlikely(in_valgrind)) {
856 			opt_junk = "false";
857 			opt_junk_alloc = false;
858 			opt_junk_free = false;
859 			assert(!opt_zero);
860 			opt_quarantine = JEMALLOC_VALGRIND_QUARANTINE_DEFAULT;
861 			opt_redzone = true;
862 		}
863 		if (config_tcache && unlikely(in_valgrind))
864 			opt_tcache = false;
865 	}
866 
867 	for (i = 0; i < 3; i++) {
868 		/* Get runtime configuration. */
869 		switch (i) {
870 		case 0:
871 			if (je_malloc_conf != NULL) {
872 				/*
873 				 * Use options that were compiled into the
874 				 * program.
875 				 */
876 				opts = je_malloc_conf;
877 			} else {
878 				/* No configuration specified. */
879 				buf[0] = '\0';
880 				opts = buf;
881 			}
882 			break;
883 		case 1: {
884 			int linklen = 0;
885 #ifndef _WIN32
886 			int saved_errno = errno;
887 			const char *linkname =
888 #  ifdef JEMALLOC_PREFIX
889 			    "/etc/"JEMALLOC_PREFIX"malloc.conf"
890 #  else
891 			    "/etc/malloc.conf"
892 #  endif
893 			    ;
894 
895 			/*
896 			 * Try to use the contents of the "/etc/malloc.conf"
897 			 * symbolic link's name.
898 			 */
899 			linklen = readlink(linkname, buf, sizeof(buf) - 1);
900 			if (linklen == -1) {
901 				/* No configuration specified. */
902 				linklen = 0;
903 				/* Restore errno. */
904 				set_errno(saved_errno);
905 			}
906 #endif
907 			buf[linklen] = '\0';
908 			opts = buf;
909 			break;
910 		} case 2: {
911 			const char *envname =
912 #ifdef JEMALLOC_PREFIX
913 			    JEMALLOC_CPREFIX"MALLOC_CONF"
914 #else
915 			    "MALLOC_CONF"
916 #endif
917 			    ;
918 
919 			if ((opts = secure_getenv(envname)) != NULL) {
920 				/*
921 				 * Do nothing; opts is already initialized to
922 				 * the value of the MALLOC_CONF environment
923 				 * variable.
924 				 */
925 			} else {
926 				/* No configuration specified. */
927 				buf[0] = '\0';
928 				opts = buf;
929 			}
930 			break;
931 		} default:
932 			not_reached();
933 			buf[0] = '\0';
934 			opts = buf;
935 		}
936 
937 		while (*opts != '\0' && !malloc_conf_next(&opts, &k, &klen, &v,
938 		    &vlen)) {
939 #define	CONF_MATCH(n)							\
940 	(sizeof(n)-1 == klen && strncmp(n, k, klen) == 0)
941 #define	CONF_MATCH_VALUE(n)						\
942 	(sizeof(n)-1 == vlen && strncmp(n, v, vlen) == 0)
943 #define	CONF_HANDLE_BOOL(o, n, cont)					\
944 			if (CONF_MATCH(n)) {				\
945 				if (CONF_MATCH_VALUE("true"))		\
946 					o = true;			\
947 				else if (CONF_MATCH_VALUE("false"))	\
948 					o = false;			\
949 				else {					\
950 					malloc_conf_error(		\
951 					    "Invalid conf value",	\
952 					    k, klen, v, vlen);		\
953 				}					\
954 				if (cont)				\
955 					continue;			\
956 			}
957 #define	CONF_HANDLE_SIZE_T(o, n, min, max, clip)			\
958 			if (CONF_MATCH(n)) {				\
959 				uintmax_t um;				\
960 				char *end;				\
961 									\
962 				set_errno(0);				\
963 				um = malloc_strtoumax(v, &end, 0);	\
964 				if (get_errno() != 0 || (uintptr_t)end -\
965 				    (uintptr_t)v != vlen) {		\
966 					malloc_conf_error(		\
967 					    "Invalid conf value",	\
968 					    k, klen, v, vlen);		\
969 				} else if (clip) {			\
970 					if ((min) != 0 && um < (min))	\
971 						o = (min);		\
972 					else if (um > (max))		\
973 						o = (max);		\
974 					else				\
975 						o = um;			\
976 				} else {				\
977 					if (((min) != 0 && um < (min))	\
978 					    || um > (max)) {		\
979 						malloc_conf_error(	\
980 						    "Out-of-range "	\
981 						    "conf value",	\
982 						    k, klen, v, vlen);	\
983 					} else				\
984 						o = um;			\
985 				}					\
986 				continue;				\
987 			}
988 #define	CONF_HANDLE_SSIZE_T(o, n, min, max)				\
989 			if (CONF_MATCH(n)) {				\
990 				long l;					\
991 				char *end;				\
992 									\
993 				set_errno(0);				\
994 				l = strtol(v, &end, 0);			\
995 				if (get_errno() != 0 || (uintptr_t)end -\
996 				    (uintptr_t)v != vlen) {		\
997 					malloc_conf_error(		\
998 					    "Invalid conf value",	\
999 					    k, klen, v, vlen);		\
1000 				} else if (l < (ssize_t)(min) || l >	\
1001 				    (ssize_t)(max)) {			\
1002 					malloc_conf_error(		\
1003 					    "Out-of-range conf value",	\
1004 					    k, klen, v, vlen);		\
1005 				} else					\
1006 					o = l;				\
1007 				continue;				\
1008 			}
1009 #define	CONF_HANDLE_CHAR_P(o, n, d)					\
1010 			if (CONF_MATCH(n)) {				\
1011 				size_t cpylen = (vlen <=		\
1012 				    sizeof(o)-1) ? vlen :		\
1013 				    sizeof(o)-1;			\
1014 				strncpy(o, v, cpylen);			\
1015 				o[cpylen] = '\0';			\
1016 				continue;				\
1017 			}
1018 
1019 			CONF_HANDLE_BOOL(opt_abort, "abort", true)
1020 			/*
1021 			 * Chunks always require at least one header page,
1022 			 * as many as 2^(LG_SIZE_CLASS_GROUP+1) data pages, and
1023 			 * possibly an additional page in the presence of
1024 			 * redzones.  In order to simplify options processing,
1025 			 * use a conservative bound that accommodates all these
1026 			 * constraints.
1027 			 */
1028 			CONF_HANDLE_SIZE_T(opt_lg_chunk, "lg_chunk", LG_PAGE +
1029 			    LG_SIZE_CLASS_GROUP + (config_fill ? 2 : 1),
1030 			    (sizeof(size_t) << 3) - 1, true)
1031 			if (strncmp("dss", k, klen) == 0) {
1032 				int i;
1033 				bool match = false;
1034 				for (i = 0; i < dss_prec_limit; i++) {
1035 					if (strncmp(dss_prec_names[i], v, vlen)
1036 					    == 0) {
1037 						if (chunk_dss_prec_set(i)) {
1038 							malloc_conf_error(
1039 							    "Error setting dss",
1040 							    k, klen, v, vlen);
1041 						} else {
1042 							opt_dss =
1043 							    dss_prec_names[i];
1044 							match = true;
1045 							break;
1046 						}
1047 					}
1048 				}
1049 				if (!match) {
1050 					malloc_conf_error("Invalid conf value",
1051 					    k, klen, v, vlen);
1052 				}
1053 				continue;
1054 			}
1055 			CONF_HANDLE_SIZE_T(opt_narenas, "narenas", 1,
1056 			    SIZE_T_MAX, false)
1057 			CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, "lg_dirty_mult",
1058 			    -1, (sizeof(size_t) << 3) - 1)
1059 			CONF_HANDLE_BOOL(opt_stats_print, "stats_print", true)
1060 			if (config_fill) {
1061 				if (CONF_MATCH("junk")) {
1062 					if (CONF_MATCH_VALUE("true")) {
1063 						opt_junk = "true";
1064 						opt_junk_alloc = opt_junk_free =
1065 						    true;
1066 					} else if (CONF_MATCH_VALUE("false")) {
1067 						opt_junk = "false";
1068 						opt_junk_alloc = opt_junk_free =
1069 						    false;
1070 					} else if (CONF_MATCH_VALUE("alloc")) {
1071 						opt_junk = "alloc";
1072 						opt_junk_alloc = true;
1073 						opt_junk_free = false;
1074 					} else if (CONF_MATCH_VALUE("free")) {
1075 						opt_junk = "free";
1076 						opt_junk_alloc = false;
1077 						opt_junk_free = true;
1078 					} else {
1079 						malloc_conf_error(
1080 						    "Invalid conf value", k,
1081 						    klen, v, vlen);
1082 					}
1083 					continue;
1084 				}
1085 				CONF_HANDLE_SIZE_T(opt_quarantine, "quarantine",
1086 				    0, SIZE_T_MAX, false)
1087 				CONF_HANDLE_BOOL(opt_redzone, "redzone", true)
1088 				CONF_HANDLE_BOOL(opt_zero, "zero", true)
1089 			}
1090 			if (config_utrace) {
1091 				CONF_HANDLE_BOOL(opt_utrace, "utrace", true)
1092 			}
1093 			if (config_xmalloc) {
1094 				CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc", true)
1095 			}
1096 			if (config_tcache) {
1097 				CONF_HANDLE_BOOL(opt_tcache, "tcache",
1098 				    !config_valgrind || !in_valgrind)
1099 				if (CONF_MATCH("tcache")) {
1100 					assert(config_valgrind && in_valgrind);
1101 					if (opt_tcache) {
1102 						opt_tcache = false;
1103 						malloc_conf_error(
1104 						"tcache cannot be enabled "
1105 						"while running inside Valgrind",
1106 						k, klen, v, vlen);
1107 					}
1108 					continue;
1109 				}
1110 				CONF_HANDLE_SSIZE_T(opt_lg_tcache_max,
1111 				    "lg_tcache_max", -1,
1112 				    (sizeof(size_t) << 3) - 1)
1113 			}
1114 			if (config_prof) {
1115 				CONF_HANDLE_BOOL(opt_prof, "prof", true)
1116 				CONF_HANDLE_CHAR_P(opt_prof_prefix,
1117 				    "prof_prefix", "jeprof")
1118 				CONF_HANDLE_BOOL(opt_prof_active, "prof_active",
1119 				    true)
1120 				CONF_HANDLE_BOOL(opt_prof_thread_active_init,
1121 				    "prof_thread_active_init", true)
1122 				CONF_HANDLE_SIZE_T(opt_lg_prof_sample,
1123 				    "lg_prof_sample", 0,
1124 				    (sizeof(uint64_t) << 3) - 1, true)
1125 				CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum",
1126 				    true)
1127 				CONF_HANDLE_SSIZE_T(opt_lg_prof_interval,
1128 				    "lg_prof_interval", -1,
1129 				    (sizeof(uint64_t) << 3) - 1)
1130 				CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump",
1131 				    true)
1132 				CONF_HANDLE_BOOL(opt_prof_final, "prof_final",
1133 				    true)
1134 				CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak",
1135 				    true)
1136 			}
1137 			malloc_conf_error("Invalid conf pair", k, klen, v,
1138 			    vlen);
1139 #undef CONF_MATCH
1140 #undef CONF_HANDLE_BOOL
1141 #undef CONF_HANDLE_SIZE_T
1142 #undef CONF_HANDLE_SSIZE_T
1143 #undef CONF_HANDLE_CHAR_P
1144 		}
1145 	}
1146 }
1147 
1148 /* init_lock must be held. */
1149 static bool
malloc_init_hard_needed(void)1150 malloc_init_hard_needed(void)
1151 {
1152 
1153 	if (malloc_initialized() || (IS_INITIALIZER && malloc_init_state ==
1154 	    malloc_init_recursible)) {
1155 		/*
1156 		 * Another thread initialized the allocator before this one
1157 		 * acquired init_lock, or this thread is the initializing
1158 		 * thread, and it is recursively allocating.
1159 		 */
1160 		return (false);
1161 	}
1162 #ifdef JEMALLOC_THREADED_INIT
1163 	if (malloc_initializer != NO_INITIALIZER && !IS_INITIALIZER) {
1164 		/* Busy-wait until the initializing thread completes. */
1165 		do {
1166 			malloc_mutex_unlock(&init_lock);
1167 			CPU_SPINWAIT;
1168 			malloc_mutex_lock(&init_lock);
1169 		} while (!malloc_initialized());
1170 		return (false);
1171 	}
1172 #endif
1173 	return (true);
1174 }
1175 
1176 /* init_lock must be held. */
1177 static bool
malloc_init_hard_a0_locked(void)1178 malloc_init_hard_a0_locked(void)
1179 {
1180 
1181 	malloc_initializer = INITIALIZER;
1182 
1183 	if (config_prof)
1184 		prof_boot0();
1185 	malloc_conf_init();
1186 	if (opt_stats_print) {
1187 		/* Print statistics at exit. */
1188 		if (atexit(stats_print_atexit) != 0) {
1189 			malloc_write("<jemalloc>: Error in atexit()\n");
1190 			if (opt_abort)
1191 				abort();
1192 		}
1193 	}
1194 	if (base_boot())
1195 		return (true);
1196 	if (chunk_boot())
1197 		return (true);
1198 	if (ctl_boot())
1199 		return (true);
1200 	if (config_prof)
1201 		prof_boot1();
1202 	if (arena_boot())
1203 		return (true);
1204 	if (config_tcache && tcache_boot())
1205 		return (true);
1206 	if (malloc_mutex_init(&arenas_lock))
1207 		return (true);
1208 	/*
1209 	 * Create enough scaffolding to allow recursive allocation in
1210 	 * malloc_ncpus().
1211 	 */
1212 	narenas_total = narenas_auto = 1;
1213 	arenas = &a0;
1214 	memset(arenas, 0, sizeof(arena_t *) * narenas_auto);
1215 	/*
1216 	 * Initialize one arena here.  The rest are lazily created in
1217 	 * arena_choose_hard().
1218 	 */
1219 	if (arena_init(0) == NULL)
1220 		return (true);
1221 	malloc_init_state = malloc_init_a0_initialized;
1222 	return (false);
1223 }
1224 
1225 static bool
malloc_init_hard_a0(void)1226 malloc_init_hard_a0(void)
1227 {
1228 	bool ret;
1229 
1230 	malloc_mutex_lock(&init_lock);
1231 	ret = malloc_init_hard_a0_locked();
1232 	malloc_mutex_unlock(&init_lock);
1233 	return (ret);
1234 }
1235 
1236 /*
1237  * Initialize data structures which may trigger recursive allocation.
1238  *
1239  * init_lock must be held.
1240  */
1241 static void
malloc_init_hard_recursible(void)1242 malloc_init_hard_recursible(void)
1243 {
1244 
1245 	malloc_init_state = malloc_init_recursible;
1246 	malloc_mutex_unlock(&init_lock);
1247 
1248 	ncpus = malloc_ncpus();
1249 
1250 #if (!defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_ZONE) \
1251     && !defined(_WIN32) && !defined(__native_client__))
1252 	/* LinuxThreads's pthread_atfork() allocates. */
1253 	if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent,
1254 	    jemalloc_postfork_child) != 0) {
1255 		malloc_write("<jemalloc>: Error in pthread_atfork()\n");
1256 		if (opt_abort)
1257 			abort();
1258 	}
1259 #endif
1260 	malloc_mutex_lock(&init_lock);
1261 }
1262 
1263 /* init_lock must be held. */
1264 static bool
malloc_init_hard_finish(void)1265 malloc_init_hard_finish(void)
1266 {
1267 
1268 	if (mutex_boot())
1269 		return (true);
1270 
1271 	if (opt_narenas == 0) {
1272 		/*
1273 		 * For SMP systems, create more than one arena per CPU by
1274 		 * default.
1275 		 */
1276 		if (ncpus > 1)
1277 			opt_narenas = ncpus << 2;
1278 		else
1279 			opt_narenas = 1;
1280 	}
1281 	narenas_auto = opt_narenas;
1282 	/*
1283 	 * Make sure that the arenas array can be allocated.  In practice, this
1284 	 * limit is enough to allow the allocator to function, but the ctl
1285 	 * machinery will fail to allocate memory at far lower limits.
1286 	 */
1287 	if (narenas_auto > chunksize / sizeof(arena_t *)) {
1288 		narenas_auto = chunksize / sizeof(arena_t *);
1289 		malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n",
1290 		    narenas_auto);
1291 	}
1292 	narenas_total = narenas_auto;
1293 
1294 	/* Allocate and initialize arenas. */
1295 	arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas_total);
1296 	if (arenas == NULL)
1297 		return (true);
1298 	/*
1299 	 * Zero the array.  In practice, this should always be pre-zeroed,
1300 	 * since it was just mmap()ed, but let's be sure.
1301 	 */
1302 	memset(arenas, 0, sizeof(arena_t *) * narenas_total);
1303 	/* Copy the pointer to the one arena that was already initialized. */
1304 	arenas[0] = a0;
1305 
1306 	malloc_init_state = malloc_init_initialized;
1307 	return (false);
1308 }
1309 
1310 static bool
malloc_init_hard(void)1311 malloc_init_hard(void)
1312 {
1313 
1314 #if defined(_WIN32) && _WIN32_WINNT < 0x0600
1315 	_init_init_lock();
1316 #endif
1317 	malloc_mutex_lock(&init_lock);
1318 	if (!malloc_init_hard_needed()) {
1319 		malloc_mutex_unlock(&init_lock);
1320 		return (false);
1321 	}
1322 
1323 	if (malloc_init_state != malloc_init_a0_initialized &&
1324 	    malloc_init_hard_a0_locked()) {
1325 		malloc_mutex_unlock(&init_lock);
1326 		return (true);
1327 	}
1328 	if (malloc_tsd_boot0()) {
1329 		malloc_mutex_unlock(&init_lock);
1330 		return (true);
1331 	}
1332 	if (config_prof && prof_boot2()) {
1333 		malloc_mutex_unlock(&init_lock);
1334 		return (true);
1335 	}
1336 
1337 	malloc_init_hard_recursible();
1338 
1339 	if (malloc_init_hard_finish()) {
1340 		malloc_mutex_unlock(&init_lock);
1341 		return (true);
1342 	}
1343 
1344 	malloc_mutex_unlock(&init_lock);
1345 	malloc_tsd_boot1();
1346 	return (false);
1347 }
1348 
1349 /*
1350  * End initialization functions.
1351  */
1352 /******************************************************************************/
1353 /*
1354  * Begin malloc(3)-compatible functions.
1355  */
1356 
1357 static void *
imalloc_prof_sample(tsd_t * tsd,size_t usize,prof_tctx_t * tctx)1358 imalloc_prof_sample(tsd_t *tsd, size_t usize, prof_tctx_t *tctx)
1359 {
1360 	void *p;
1361 
1362 	if (tctx == NULL)
1363 		return (NULL);
1364 	if (usize <= SMALL_MAXCLASS) {
1365 		p = imalloc(tsd, LARGE_MINCLASS);
1366 		if (p == NULL)
1367 			return (NULL);
1368 		arena_prof_promoted(p, usize);
1369 	} else
1370 		p = imalloc(tsd, usize);
1371 
1372 	return (p);
1373 }
1374 
1375 JEMALLOC_ALWAYS_INLINE_C void *
imalloc_prof(tsd_t * tsd,size_t usize)1376 imalloc_prof(tsd_t *tsd, size_t usize)
1377 {
1378 	void *p;
1379 	prof_tctx_t *tctx;
1380 
1381 	tctx = prof_alloc_prep(tsd, usize, prof_active_get_unlocked(), true);
1382 	if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
1383 		p = imalloc_prof_sample(tsd, usize, tctx);
1384 	else
1385 		p = imalloc(tsd, usize);
1386 	if (unlikely(p == NULL)) {
1387 		prof_alloc_rollback(tsd, tctx, true);
1388 		return (NULL);
1389 	}
1390 	prof_malloc(p, usize, tctx);
1391 
1392 	return (p);
1393 }
1394 
1395 JEMALLOC_ALWAYS_INLINE_C void *
imalloc_body(size_t size,tsd_t ** tsd,size_t * usize)1396 imalloc_body(size_t size, tsd_t **tsd, size_t *usize)
1397 {
1398 
1399 	if (unlikely(malloc_init()))
1400 		return (NULL);
1401 	*tsd = tsd_fetch();
1402 
1403 	if (config_prof && opt_prof) {
1404 		*usize = s2u(size);
1405 		if (unlikely(*usize == 0))
1406 			return (NULL);
1407 		return (imalloc_prof(*tsd, *usize));
1408 	}
1409 
1410 	if (config_stats || (config_valgrind && unlikely(in_valgrind)))
1411 		*usize = s2u(size);
1412 	return (imalloc(*tsd, size));
1413 }
1414 
1415 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
1416 void JEMALLOC_NOTHROW *
JEMALLOC_ATTR(malloc)1417 JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1)
1418 je_malloc(size_t size)
1419 {
1420 	void *ret;
1421 	tsd_t *tsd;
1422 	size_t usize JEMALLOC_CC_SILENCE_INIT(0);
1423 
1424 	if (size == 0)
1425 		size = 1;
1426 
1427 	ret = imalloc_body(size, &tsd, &usize);
1428 	if (unlikely(ret == NULL)) {
1429 		if (config_xmalloc && unlikely(opt_xmalloc)) {
1430 			malloc_write("<jemalloc>: Error in malloc(): "
1431 			    "out of memory\n");
1432 			abort();
1433 		}
1434 		set_errno(ENOMEM);
1435 	}
1436 	if (config_stats && likely(ret != NULL)) {
1437 		assert(usize == isalloc(ret, config_prof));
1438 		*tsd_thread_allocatedp_get(tsd) += usize;
1439 	}
1440 	UTRACE(0, size, ret);
1441 	JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, false);
1442 	return (ret);
1443 }
1444 
1445 static void *
imemalign_prof_sample(tsd_t * tsd,size_t alignment,size_t usize,prof_tctx_t * tctx)1446 imemalign_prof_sample(tsd_t *tsd, size_t alignment, size_t usize,
1447     prof_tctx_t *tctx)
1448 {
1449 	void *p;
1450 
1451 	if (tctx == NULL)
1452 		return (NULL);
1453 	if (usize <= SMALL_MAXCLASS) {
1454 		assert(sa2u(LARGE_MINCLASS, alignment) == LARGE_MINCLASS);
1455 		p = ipalloc(tsd, LARGE_MINCLASS, alignment, false);
1456 		if (p == NULL)
1457 			return (NULL);
1458 		arena_prof_promoted(p, usize);
1459 	} else
1460 		p = ipalloc(tsd, usize, alignment, false);
1461 
1462 	return (p);
1463 }
1464 
1465 JEMALLOC_ALWAYS_INLINE_C void *
imemalign_prof(tsd_t * tsd,size_t alignment,size_t usize)1466 imemalign_prof(tsd_t *tsd, size_t alignment, size_t usize)
1467 {
1468 	void *p;
1469 	prof_tctx_t *tctx;
1470 
1471 	tctx = prof_alloc_prep(tsd, usize, prof_active_get_unlocked(), true);
1472 	if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
1473 		p = imemalign_prof_sample(tsd, alignment, usize, tctx);
1474 	else
1475 		p = ipalloc(tsd, usize, alignment, false);
1476 	if (unlikely(p == NULL)) {
1477 		prof_alloc_rollback(tsd, tctx, true);
1478 		return (NULL);
1479 	}
1480 	prof_malloc(p, usize, tctx);
1481 
1482 	return (p);
1483 }
1484 
1485 JEMALLOC_ATTR(nonnull(1))
1486 static int
imemalign(void ** memptr,size_t alignment,size_t size,size_t min_alignment)1487 imemalign(void **memptr, size_t alignment, size_t size, size_t min_alignment)
1488 {
1489 	int ret;
1490 	tsd_t *tsd;
1491 	size_t usize;
1492 	void *result;
1493 
1494 	assert(min_alignment != 0);
1495 
1496 	if (unlikely(malloc_init())) {
1497 		result = NULL;
1498 		goto label_oom;
1499 	}
1500 	tsd = tsd_fetch();
1501 	if (size == 0)
1502 		size = 1;
1503 
1504 	/* Make sure that alignment is a large enough power of 2. */
1505 	if (unlikely(((alignment - 1) & alignment) != 0
1506 	    || (alignment < min_alignment))) {
1507 		if (config_xmalloc && unlikely(opt_xmalloc)) {
1508 			malloc_write("<jemalloc>: Error allocating "
1509 			    "aligned memory: invalid alignment\n");
1510 			abort();
1511 		}
1512 		result = NULL;
1513 		ret = EINVAL;
1514 		goto label_return;
1515 	}
1516 
1517 	usize = sa2u(size, alignment);
1518 	if (unlikely(usize == 0)) {
1519 		result = NULL;
1520 		goto label_oom;
1521 	}
1522 
1523 	if (config_prof && opt_prof)
1524 		result = imemalign_prof(tsd, alignment, usize);
1525 	else
1526 		result = ipalloc(tsd, usize, alignment, false);
1527 	if (unlikely(result == NULL))
1528 		goto label_oom;
1529 	assert(((uintptr_t)result & (alignment - 1)) == ZU(0));
1530 
1531 	*memptr = result;
1532 	ret = 0;
1533 label_return:
1534 	if (config_stats && likely(result != NULL)) {
1535 		assert(usize == isalloc(result, config_prof));
1536 		*tsd_thread_allocatedp_get(tsd) += usize;
1537 	}
1538 	UTRACE(0, size, result);
1539 	return (ret);
1540 label_oom:
1541 	assert(result == NULL);
1542 	if (config_xmalloc && unlikely(opt_xmalloc)) {
1543 		malloc_write("<jemalloc>: Error allocating aligned memory: "
1544 		    "out of memory\n");
1545 		abort();
1546 	}
1547 	ret = ENOMEM;
1548 	goto label_return;
1549 }
1550 
1551 JEMALLOC_EXPORT int JEMALLOC_NOTHROW
1552 JEMALLOC_ATTR(nonnull(1))
je_posix_memalign(void ** memptr,size_t alignment,size_t size)1553 je_posix_memalign(void **memptr, size_t alignment, size_t size)
1554 {
1555 	int ret = imemalign(memptr, alignment, size, sizeof(void *));
1556 	JEMALLOC_VALGRIND_MALLOC(ret == 0, *memptr, isalloc(*memptr,
1557 	    config_prof), false);
1558 	return (ret);
1559 }
1560 
1561 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
1562 void JEMALLOC_NOTHROW *
JEMALLOC_ATTR(malloc)1563 JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(2)
1564 je_aligned_alloc(size_t alignment, size_t size)
1565 {
1566 	void *ret;
1567 	int err;
1568 
1569 	if (unlikely((err = imemalign(&ret, alignment, size, 1)) != 0)) {
1570 		ret = NULL;
1571 		set_errno(err);
1572 	}
1573 	JEMALLOC_VALGRIND_MALLOC(err == 0, ret, isalloc(ret, config_prof),
1574 	    false);
1575 	return (ret);
1576 }
1577 
1578 static void *
icalloc_prof_sample(tsd_t * tsd,size_t usize,prof_tctx_t * tctx)1579 icalloc_prof_sample(tsd_t *tsd, size_t usize, prof_tctx_t *tctx)
1580 {
1581 	void *p;
1582 
1583 	if (tctx == NULL)
1584 		return (NULL);
1585 	if (usize <= SMALL_MAXCLASS) {
1586 		p = icalloc(tsd, LARGE_MINCLASS);
1587 		if (p == NULL)
1588 			return (NULL);
1589 		arena_prof_promoted(p, usize);
1590 	} else
1591 		p = icalloc(tsd, usize);
1592 
1593 	return (p);
1594 }
1595 
1596 JEMALLOC_ALWAYS_INLINE_C void *
icalloc_prof(tsd_t * tsd,size_t usize)1597 icalloc_prof(tsd_t *tsd, size_t usize)
1598 {
1599 	void *p;
1600 	prof_tctx_t *tctx;
1601 
1602 	tctx = prof_alloc_prep(tsd, usize, prof_active_get_unlocked(), true);
1603 	if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
1604 		p = icalloc_prof_sample(tsd, usize, tctx);
1605 	else
1606 		p = icalloc(tsd, usize);
1607 	if (unlikely(p == NULL)) {
1608 		prof_alloc_rollback(tsd, tctx, true);
1609 		return (NULL);
1610 	}
1611 	prof_malloc(p, usize, tctx);
1612 
1613 	return (p);
1614 }
1615 
1616 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
1617 void JEMALLOC_NOTHROW *
JEMALLOC_ATTR(malloc)1618 JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2)
1619 je_calloc(size_t num, size_t size)
1620 {
1621 	void *ret;
1622 	tsd_t *tsd;
1623 	size_t num_size;
1624 	size_t usize JEMALLOC_CC_SILENCE_INIT(0);
1625 
1626 	if (unlikely(malloc_init())) {
1627 		num_size = 0;
1628 		ret = NULL;
1629 		goto label_return;
1630 	}
1631 	tsd = tsd_fetch();
1632 
1633 	num_size = num * size;
1634 	if (unlikely(num_size == 0)) {
1635 		if (num == 0 || size == 0)
1636 			num_size = 1;
1637 		else {
1638 			ret = NULL;
1639 			goto label_return;
1640 		}
1641 	/*
1642 	 * Try to avoid division here.  We know that it isn't possible to
1643 	 * overflow during multiplication if neither operand uses any of the
1644 	 * most significant half of the bits in a size_t.
1645 	 */
1646 	} else if (unlikely(((num | size) & (SIZE_T_MAX << (sizeof(size_t) <<
1647 	    2))) && (num_size / size != num))) {
1648 		/* size_t overflow. */
1649 		ret = NULL;
1650 		goto label_return;
1651 	}
1652 
1653 	if (config_prof && opt_prof) {
1654 		usize = s2u(num_size);
1655 		if (unlikely(usize == 0)) {
1656 			ret = NULL;
1657 			goto label_return;
1658 		}
1659 		ret = icalloc_prof(tsd, usize);
1660 	} else {
1661 		if (config_stats || (config_valgrind && unlikely(in_valgrind)))
1662 			usize = s2u(num_size);
1663 		ret = icalloc(tsd, num_size);
1664 	}
1665 
1666 label_return:
1667 	if (unlikely(ret == NULL)) {
1668 		if (config_xmalloc && unlikely(opt_xmalloc)) {
1669 			malloc_write("<jemalloc>: Error in calloc(): out of "
1670 			    "memory\n");
1671 			abort();
1672 		}
1673 		set_errno(ENOMEM);
1674 	}
1675 	if (config_stats && likely(ret != NULL)) {
1676 		assert(usize == isalloc(ret, config_prof));
1677 		*tsd_thread_allocatedp_get(tsd) += usize;
1678 	}
1679 	UTRACE(0, num_size, ret);
1680 	JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, true);
1681 	return (ret);
1682 }
1683 
1684 static void *
irealloc_prof_sample(tsd_t * tsd,void * old_ptr,size_t old_usize,size_t usize,prof_tctx_t * tctx)1685 irealloc_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize,
1686     prof_tctx_t *tctx)
1687 {
1688 	void *p;
1689 
1690 	if (tctx == NULL)
1691 		return (NULL);
1692 	if (usize <= SMALL_MAXCLASS) {
1693 		p = iralloc(tsd, old_ptr, old_usize, LARGE_MINCLASS, 0, false);
1694 		if (p == NULL)
1695 			return (NULL);
1696 		arena_prof_promoted(p, usize);
1697 	} else
1698 		p = iralloc(tsd, old_ptr, old_usize, usize, 0, false);
1699 
1700 	return (p);
1701 }
1702 
1703 JEMALLOC_ALWAYS_INLINE_C void *
irealloc_prof(tsd_t * tsd,void * old_ptr,size_t old_usize,size_t usize)1704 irealloc_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize)
1705 {
1706 	void *p;
1707 	bool prof_active;
1708 	prof_tctx_t *old_tctx, *tctx;
1709 
1710 	prof_active = prof_active_get_unlocked();
1711 	old_tctx = prof_tctx_get(old_ptr);
1712 	tctx = prof_alloc_prep(tsd, usize, prof_active, true);
1713 	if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
1714 		p = irealloc_prof_sample(tsd, old_ptr, old_usize, usize, tctx);
1715 	else
1716 		p = iralloc(tsd, old_ptr, old_usize, usize, 0, false);
1717 	if (unlikely(p == NULL)) {
1718 		prof_alloc_rollback(tsd, tctx, true);
1719 		return (NULL);
1720 	}
1721 	prof_realloc(tsd, p, usize, tctx, prof_active, true, old_ptr, old_usize,
1722 	    old_tctx);
1723 
1724 	return (p);
1725 }
1726 
1727 JEMALLOC_INLINE_C void
ifree(tsd_t * tsd,void * ptr,tcache_t * tcache)1728 ifree(tsd_t *tsd, void *ptr, tcache_t *tcache)
1729 {
1730 	size_t usize;
1731 	UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
1732 
1733 	assert(ptr != NULL);
1734 	assert(malloc_initialized() || IS_INITIALIZER);
1735 
1736 	if (config_prof && opt_prof) {
1737 		usize = isalloc(ptr, config_prof);
1738 		prof_free(tsd, ptr, usize);
1739 	} else if (config_stats || config_valgrind)
1740 		usize = isalloc(ptr, config_prof);
1741 	if (config_stats)
1742 		*tsd_thread_deallocatedp_get(tsd) += usize;
1743 	if (config_valgrind && unlikely(in_valgrind))
1744 		rzsize = p2rz(ptr);
1745 	iqalloc(tsd, ptr, tcache);
1746 	JEMALLOC_VALGRIND_FREE(ptr, rzsize);
1747 }
1748 
1749 JEMALLOC_INLINE_C void
isfree(tsd_t * tsd,void * ptr,size_t usize,tcache_t * tcache)1750 isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache)
1751 {
1752 	UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
1753 
1754 	assert(ptr != NULL);
1755 	assert(malloc_initialized() || IS_INITIALIZER);
1756 
1757 	if (config_prof && opt_prof)
1758 		prof_free(tsd, ptr, usize);
1759 	if (config_stats)
1760 		*tsd_thread_deallocatedp_get(tsd) += usize;
1761 	if (config_valgrind && unlikely(in_valgrind))
1762 		rzsize = p2rz(ptr);
1763 	isqalloc(tsd, ptr, usize, tcache);
1764 	JEMALLOC_VALGRIND_FREE(ptr, rzsize);
1765 }
1766 
1767 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
1768 void JEMALLOC_NOTHROW *
1769 JEMALLOC_ALLOC_SIZE(2)
je_realloc(void * ptr,size_t size)1770 je_realloc(void *ptr, size_t size)
1771 {
1772 	void *ret;
1773 	tsd_t *tsd JEMALLOC_CC_SILENCE_INIT(NULL);
1774 	size_t usize JEMALLOC_CC_SILENCE_INIT(0);
1775 	size_t old_usize = 0;
1776 	UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
1777 
1778 	if (unlikely(size == 0)) {
1779 		if (ptr != NULL) {
1780 			/* realloc(ptr, 0) is equivalent to free(ptr). */
1781 			UTRACE(ptr, 0, 0);
1782 			tsd = tsd_fetch();
1783 			ifree(tsd, ptr, tcache_get(tsd, false));
1784 			return (NULL);
1785 		}
1786 		size = 1;
1787 	}
1788 
1789 	if (likely(ptr != NULL)) {
1790 		assert(malloc_initialized() || IS_INITIALIZER);
1791 		malloc_thread_init();
1792 		tsd = tsd_fetch();
1793 
1794 		old_usize = isalloc(ptr, config_prof);
1795 		if (config_valgrind && unlikely(in_valgrind))
1796 			old_rzsize = config_prof ? p2rz(ptr) : u2rz(old_usize);
1797 
1798 		if (config_prof && opt_prof) {
1799 			usize = s2u(size);
1800 			ret = unlikely(usize == 0) ? NULL : irealloc_prof(tsd,
1801 			    ptr, old_usize, usize);
1802 		} else {
1803 			if (config_stats || (config_valgrind &&
1804 			    unlikely(in_valgrind)))
1805 				usize = s2u(size);
1806 			ret = iralloc(tsd, ptr, old_usize, size, 0, false);
1807 		}
1808 	} else {
1809 		/* realloc(NULL, size) is equivalent to malloc(size). */
1810 		ret = imalloc_body(size, &tsd, &usize);
1811 	}
1812 
1813 	if (unlikely(ret == NULL)) {
1814 		if (config_xmalloc && unlikely(opt_xmalloc)) {
1815 			malloc_write("<jemalloc>: Error in realloc(): "
1816 			    "out of memory\n");
1817 			abort();
1818 		}
1819 		set_errno(ENOMEM);
1820 	}
1821 	if (config_stats && likely(ret != NULL)) {
1822 		assert(usize == isalloc(ret, config_prof));
1823 		*tsd_thread_allocatedp_get(tsd) += usize;
1824 		*tsd_thread_deallocatedp_get(tsd) += old_usize;
1825 	}
1826 	UTRACE(ptr, size, ret);
1827 	JEMALLOC_VALGRIND_REALLOC(true, ret, usize, true, ptr, old_usize,
1828 	    old_rzsize, true, false);
1829 	return (ret);
1830 }
1831 
1832 JEMALLOC_EXPORT void JEMALLOC_NOTHROW
je_free(void * ptr)1833 je_free(void *ptr)
1834 {
1835 
1836 	UTRACE(ptr, 0, 0);
1837 	if (likely(ptr != NULL)) {
1838 		tsd_t *tsd = tsd_fetch();
1839 		ifree(tsd, ptr, tcache_get(tsd, false));
1840 	}
1841 }
1842 
1843 /*
1844  * End malloc(3)-compatible functions.
1845  */
1846 /******************************************************************************/
1847 /*
1848  * Begin non-standard override functions.
1849  */
1850 
1851 #ifdef JEMALLOC_OVERRIDE_MEMALIGN
1852 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
1853 void JEMALLOC_NOTHROW *
JEMALLOC_ATTR(malloc)1854 JEMALLOC_ATTR(malloc)
1855 je_memalign(size_t alignment, size_t size)
1856 {
1857 	void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
1858 	if (unlikely(imemalign(&ret, alignment, size, 1) != 0))
1859 		ret = NULL;
1860 	JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false);
1861 	return (ret);
1862 }
1863 #endif
1864 
1865 #ifdef JEMALLOC_OVERRIDE_VALLOC
1866 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
1867 void JEMALLOC_NOTHROW *
JEMALLOC_ATTR(malloc)1868 JEMALLOC_ATTR(malloc)
1869 je_valloc(size_t size)
1870 {
1871 	void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
1872 	if (unlikely(imemalign(&ret, PAGE, size, 1) != 0))
1873 		ret = NULL;
1874 	JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false);
1875 	return (ret);
1876 }
1877 #endif
1878 
1879 /*
1880  * is_malloc(je_malloc) is some macro magic to detect if jemalloc_defs.h has
1881  * #define je_malloc malloc
1882  */
1883 #define	malloc_is_malloc 1
1884 #define	is_malloc_(a) malloc_is_ ## a
1885 #define	is_malloc(a) is_malloc_(a)
1886 
1887 #if ((is_malloc(je_malloc) == 1) && defined(JEMALLOC_GLIBC_MALLOC_HOOK))
1888 /*
1889  * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible
1890  * to inconsistently reference libc's malloc(3)-compatible functions
1891  * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541).
1892  *
1893  * These definitions interpose hooks in glibc.  The functions are actually
1894  * passed an extra argument for the caller return address, which will be
1895  * ignored.
1896  */
1897 JEMALLOC_EXPORT void (*__free_hook)(void *ptr) = je_free;
1898 JEMALLOC_EXPORT void *(*__malloc_hook)(size_t size) = je_malloc;
1899 JEMALLOC_EXPORT void *(*__realloc_hook)(void *ptr, size_t size) = je_realloc;
1900 # ifdef JEMALLOC_GLIBC_MEMALIGN_HOOK
1901 JEMALLOC_EXPORT void *(*__memalign_hook)(size_t alignment, size_t size) =
1902     je_memalign;
1903 # endif
1904 #endif
1905 
1906 /*
1907  * End non-standard override functions.
1908  */
1909 /******************************************************************************/
1910 /*
1911  * Begin non-standard functions.
1912  */
1913 
1914 JEMALLOC_ALWAYS_INLINE_C bool
imallocx_flags_decode_hard(tsd_t * tsd,size_t size,int flags,size_t * usize,size_t * alignment,bool * zero,tcache_t ** tcache,arena_t ** arena)1915 imallocx_flags_decode_hard(tsd_t *tsd, size_t size, int flags, size_t *usize,
1916     size_t *alignment, bool *zero, tcache_t **tcache, arena_t **arena)
1917 {
1918 
1919 	if ((flags & MALLOCX_LG_ALIGN_MASK) == 0) {
1920 		*alignment = 0;
1921 		*usize = s2u(size);
1922 	} else {
1923 		*alignment = MALLOCX_ALIGN_GET_SPECIFIED(flags);
1924 		*usize = sa2u(size, *alignment);
1925 	}
1926 	assert(*usize != 0);
1927 	*zero = MALLOCX_ZERO_GET(flags);
1928 	if ((flags & MALLOCX_TCACHE_MASK) != 0) {
1929 		if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
1930 			*tcache = NULL;
1931 		else
1932 			*tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
1933 	} else
1934 		*tcache = tcache_get(tsd, true);
1935 	if ((flags & MALLOCX_ARENA_MASK) != 0) {
1936 		unsigned arena_ind = MALLOCX_ARENA_GET(flags);
1937 		*arena = arena_get(tsd, arena_ind, true, true);
1938 		if (unlikely(*arena == NULL))
1939 			return (true);
1940 	} else
1941 		*arena = NULL;
1942 	return (false);
1943 }
1944 
1945 JEMALLOC_ALWAYS_INLINE_C bool
imallocx_flags_decode(tsd_t * tsd,size_t size,int flags,size_t * usize,size_t * alignment,bool * zero,tcache_t ** tcache,arena_t ** arena)1946 imallocx_flags_decode(tsd_t *tsd, size_t size, int flags, size_t *usize,
1947     size_t *alignment, bool *zero, tcache_t **tcache, arena_t **arena)
1948 {
1949 
1950 	if (likely(flags == 0)) {
1951 		*usize = s2u(size);
1952 		assert(*usize != 0);
1953 		*alignment = 0;
1954 		*zero = false;
1955 		*tcache = tcache_get(tsd, true);
1956 		*arena = NULL;
1957 		return (false);
1958 	} else {
1959 		return (imallocx_flags_decode_hard(tsd, size, flags, usize,
1960 		    alignment, zero, tcache, arena));
1961 	}
1962 }
1963 
1964 JEMALLOC_ALWAYS_INLINE_C void *
imallocx_flags(tsd_t * tsd,size_t usize,size_t alignment,bool zero,tcache_t * tcache,arena_t * arena)1965 imallocx_flags(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
1966     tcache_t *tcache, arena_t *arena)
1967 {
1968 
1969 	if (unlikely(alignment != 0))
1970 		return (ipalloct(tsd, usize, alignment, zero, tcache, arena));
1971 	if (unlikely(zero))
1972 		return (icalloct(tsd, usize, tcache, arena));
1973 	return (imalloct(tsd, usize, tcache, arena));
1974 }
1975 
1976 static void *
imallocx_prof_sample(tsd_t * tsd,size_t usize,size_t alignment,bool zero,tcache_t * tcache,arena_t * arena)1977 imallocx_prof_sample(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
1978     tcache_t *tcache, arena_t *arena)
1979 {
1980 	void *p;
1981 
1982 	if (usize <= SMALL_MAXCLASS) {
1983 		assert(((alignment == 0) ? s2u(LARGE_MINCLASS) :
1984 		    sa2u(LARGE_MINCLASS, alignment)) == LARGE_MINCLASS);
1985 		p = imallocx_flags(tsd, LARGE_MINCLASS, alignment, zero, tcache,
1986 		    arena);
1987 		if (p == NULL)
1988 			return (NULL);
1989 		arena_prof_promoted(p, usize);
1990 	} else
1991 		p = imallocx_flags(tsd, usize, alignment, zero, tcache, arena);
1992 
1993 	return (p);
1994 }
1995 
1996 JEMALLOC_ALWAYS_INLINE_C void *
imallocx_prof(tsd_t * tsd,size_t size,int flags,size_t * usize)1997 imallocx_prof(tsd_t *tsd, size_t size, int flags, size_t *usize)
1998 {
1999 	void *p;
2000 	size_t alignment;
2001 	bool zero;
2002 	tcache_t *tcache;
2003 	arena_t *arena;
2004 	prof_tctx_t *tctx;
2005 
2006 	if (unlikely(imallocx_flags_decode(tsd, size, flags, usize, &alignment,
2007 	    &zero, &tcache, &arena)))
2008 		return (NULL);
2009 	tctx = prof_alloc_prep(tsd, *usize, prof_active_get_unlocked(), true);
2010 	if (likely((uintptr_t)tctx == (uintptr_t)1U))
2011 		p = imallocx_flags(tsd, *usize, alignment, zero, tcache, arena);
2012 	else if ((uintptr_t)tctx > (uintptr_t)1U) {
2013 		p = imallocx_prof_sample(tsd, *usize, alignment, zero, tcache,
2014 		    arena);
2015 	} else
2016 		p = NULL;
2017 	if (unlikely(p == NULL)) {
2018 		prof_alloc_rollback(tsd, tctx, true);
2019 		return (NULL);
2020 	}
2021 	prof_malloc(p, *usize, tctx);
2022 
2023 	assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
2024 	return (p);
2025 }
2026 
2027 JEMALLOC_ALWAYS_INLINE_C void *
imallocx_no_prof(tsd_t * tsd,size_t size,int flags,size_t * usize)2028 imallocx_no_prof(tsd_t *tsd, size_t size, int flags, size_t *usize)
2029 {
2030 	void *p;
2031 	size_t alignment;
2032 	bool zero;
2033 	tcache_t *tcache;
2034 	arena_t *arena;
2035 
2036 	if (likely(flags == 0)) {
2037 		if (config_stats || (config_valgrind && unlikely(in_valgrind)))
2038 			*usize = s2u(size);
2039 		return (imalloc(tsd, size));
2040 	}
2041 
2042 	if (unlikely(imallocx_flags_decode_hard(tsd, size, flags, usize,
2043 	    &alignment, &zero, &tcache, &arena)))
2044 		return (NULL);
2045 	p = imallocx_flags(tsd, *usize, alignment, zero, tcache, arena);
2046 	assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
2047 	return (p);
2048 }
2049 
2050 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2051 void JEMALLOC_NOTHROW *
JEMALLOC_ATTR(malloc)2052 JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1)
2053 je_mallocx(size_t size, int flags)
2054 {
2055 	tsd_t *tsd;
2056 	void *p;
2057 	size_t usize;
2058 
2059 	assert(size != 0);
2060 
2061 	if (unlikely(malloc_init()))
2062 		goto label_oom;
2063 	tsd = tsd_fetch();
2064 
2065 	if (config_prof && opt_prof)
2066 		p = imallocx_prof(tsd, size, flags, &usize);
2067 	else
2068 		p = imallocx_no_prof(tsd, size, flags, &usize);
2069 	if (unlikely(p == NULL))
2070 		goto label_oom;
2071 
2072 	if (config_stats) {
2073 		assert(usize == isalloc(p, config_prof));
2074 		*tsd_thread_allocatedp_get(tsd) += usize;
2075 	}
2076 	UTRACE(0, size, p);
2077 	JEMALLOC_VALGRIND_MALLOC(true, p, usize, MALLOCX_ZERO_GET(flags));
2078 	return (p);
2079 label_oom:
2080 	if (config_xmalloc && unlikely(opt_xmalloc)) {
2081 		malloc_write("<jemalloc>: Error in mallocx(): out of memory\n");
2082 		abort();
2083 	}
2084 	UTRACE(0, size, 0);
2085 	return (NULL);
2086 }
2087 
2088 static void *
irallocx_prof_sample(tsd_t * tsd,void * old_ptr,size_t old_usize,size_t usize,size_t alignment,bool zero,tcache_t * tcache,arena_t * arena,prof_tctx_t * tctx)2089 irallocx_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize,
2090     size_t usize, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena,
2091     prof_tctx_t *tctx)
2092 {
2093 	void *p;
2094 
2095 	if (tctx == NULL)
2096 		return (NULL);
2097 	if (usize <= SMALL_MAXCLASS) {
2098 		p = iralloct(tsd, old_ptr, old_usize, LARGE_MINCLASS, alignment,
2099 		    zero, tcache, arena);
2100 		if (p == NULL)
2101 			return (NULL);
2102 		arena_prof_promoted(p, usize);
2103 	} else {
2104 		p = iralloct(tsd, old_ptr, old_usize, usize, alignment, zero,
2105 		    tcache, arena);
2106 	}
2107 
2108 	return (p);
2109 }
2110 
2111 JEMALLOC_ALWAYS_INLINE_C void *
irallocx_prof(tsd_t * tsd,void * old_ptr,size_t old_usize,size_t size,size_t alignment,size_t * usize,bool zero,tcache_t * tcache,arena_t * arena)2112 irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size,
2113     size_t alignment, size_t *usize, bool zero, tcache_t *tcache,
2114     arena_t *arena)
2115 {
2116 	void *p;
2117 	bool prof_active;
2118 	prof_tctx_t *old_tctx, *tctx;
2119 
2120 	prof_active = prof_active_get_unlocked();
2121 	old_tctx = prof_tctx_get(old_ptr);
2122 	tctx = prof_alloc_prep(tsd, *usize, prof_active, true);
2123 	if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
2124 		p = irallocx_prof_sample(tsd, old_ptr, old_usize, *usize,
2125 		    alignment, zero, tcache, arena, tctx);
2126 	} else {
2127 		p = iralloct(tsd, old_ptr, old_usize, size, alignment, zero,
2128 		    tcache, arena);
2129 	}
2130 	if (unlikely(p == NULL)) {
2131 		prof_alloc_rollback(tsd, tctx, true);
2132 		return (NULL);
2133 	}
2134 
2135 	if (p == old_ptr && alignment != 0) {
2136 		/*
2137 		 * The allocation did not move, so it is possible that the size
2138 		 * class is smaller than would guarantee the requested
2139 		 * alignment, and that the alignment constraint was
2140 		 * serendipitously satisfied.  Additionally, old_usize may not
2141 		 * be the same as the current usize because of in-place large
2142 		 * reallocation.  Therefore, query the actual value of usize.
2143 		 */
2144 		*usize = isalloc(p, config_prof);
2145 	}
2146 	prof_realloc(tsd, p, *usize, tctx, prof_active, true, old_ptr,
2147 	    old_usize, old_tctx);
2148 
2149 	return (p);
2150 }
2151 
2152 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2153 void JEMALLOC_NOTHROW *
2154 JEMALLOC_ALLOC_SIZE(2)
je_rallocx(void * ptr,size_t size,int flags)2155 je_rallocx(void *ptr, size_t size, int flags)
2156 {
2157 	void *p;
2158 	tsd_t *tsd;
2159 	size_t usize;
2160 	size_t old_usize;
2161 	UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
2162 	size_t alignment = MALLOCX_ALIGN_GET(flags);
2163 	bool zero = flags & MALLOCX_ZERO;
2164 	arena_t *arena;
2165 	tcache_t *tcache;
2166 
2167 	assert(ptr != NULL);
2168 	assert(size != 0);
2169 	assert(malloc_initialized() || IS_INITIALIZER);
2170 	malloc_thread_init();
2171 	tsd = tsd_fetch();
2172 
2173 	if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) {
2174 		unsigned arena_ind = MALLOCX_ARENA_GET(flags);
2175 		arena = arena_get(tsd, arena_ind, true, true);
2176 		if (unlikely(arena == NULL))
2177 			goto label_oom;
2178 	} else
2179 		arena = NULL;
2180 
2181 	if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
2182 		if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
2183 			tcache = NULL;
2184 		else
2185 			tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
2186 	} else
2187 		tcache = tcache_get(tsd, true);
2188 
2189 	old_usize = isalloc(ptr, config_prof);
2190 	if (config_valgrind && unlikely(in_valgrind))
2191 		old_rzsize = u2rz(old_usize);
2192 
2193 	if (config_prof && opt_prof) {
2194 		usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
2195 		assert(usize != 0);
2196 		p = irallocx_prof(tsd, ptr, old_usize, size, alignment, &usize,
2197 		    zero, tcache, arena);
2198 		if (unlikely(p == NULL))
2199 			goto label_oom;
2200 	} else {
2201 		p = iralloct(tsd, ptr, old_usize, size, alignment, zero,
2202 		     tcache, arena);
2203 		if (unlikely(p == NULL))
2204 			goto label_oom;
2205 		if (config_stats || (config_valgrind && unlikely(in_valgrind)))
2206 			usize = isalloc(p, config_prof);
2207 	}
2208 	assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
2209 
2210 	if (config_stats) {
2211 		*tsd_thread_allocatedp_get(tsd) += usize;
2212 		*tsd_thread_deallocatedp_get(tsd) += old_usize;
2213 	}
2214 	UTRACE(ptr, size, p);
2215 	JEMALLOC_VALGRIND_REALLOC(true, p, usize, false, ptr, old_usize,
2216 	    old_rzsize, false, zero);
2217 	return (p);
2218 label_oom:
2219 	if (config_xmalloc && unlikely(opt_xmalloc)) {
2220 		malloc_write("<jemalloc>: Error in rallocx(): out of memory\n");
2221 		abort();
2222 	}
2223 	UTRACE(ptr, size, 0);
2224 	return (NULL);
2225 }
2226 
2227 JEMALLOC_ALWAYS_INLINE_C size_t
ixallocx_helper(void * ptr,size_t old_usize,size_t size,size_t extra,size_t alignment,bool zero)2228 ixallocx_helper(void *ptr, size_t old_usize, size_t size, size_t extra,
2229     size_t alignment, bool zero)
2230 {
2231 	size_t usize;
2232 
2233 	if (ixalloc(ptr, old_usize, size, extra, alignment, zero))
2234 		return (old_usize);
2235 	usize = isalloc(ptr, config_prof);
2236 
2237 	return (usize);
2238 }
2239 
2240 static size_t
ixallocx_prof_sample(void * ptr,size_t old_usize,size_t size,size_t extra,size_t alignment,bool zero,prof_tctx_t * tctx)2241 ixallocx_prof_sample(void *ptr, size_t old_usize, size_t size, size_t extra,
2242     size_t alignment, bool zero, prof_tctx_t *tctx)
2243 {
2244 	size_t usize;
2245 
2246 	if (tctx == NULL)
2247 		return (old_usize);
2248 	usize = ixallocx_helper(ptr, old_usize, size, extra, alignment, zero);
2249 
2250 	return (usize);
2251 }
2252 
2253 JEMALLOC_ALWAYS_INLINE_C size_t
ixallocx_prof(tsd_t * tsd,void * ptr,size_t old_usize,size_t size,size_t extra,size_t alignment,bool zero)2254 ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size,
2255     size_t extra, size_t alignment, bool zero)
2256 {
2257 	size_t usize_max, usize;
2258 	bool prof_active;
2259 	prof_tctx_t *old_tctx, *tctx;
2260 
2261 	prof_active = prof_active_get_unlocked();
2262 	old_tctx = prof_tctx_get(ptr);
2263 	/*
2264 	 * usize isn't knowable before ixalloc() returns when extra is non-zero.
2265 	 * Therefore, compute its maximum possible value and use that in
2266 	 * prof_alloc_prep() to decide whether to capture a backtrace.
2267 	 * prof_realloc() will use the actual usize to decide whether to sample.
2268 	 */
2269 	usize_max = (alignment == 0) ? s2u(size+extra) : sa2u(size+extra,
2270 	    alignment);
2271 	assert(usize_max != 0);
2272 	tctx = prof_alloc_prep(tsd, usize_max, prof_active, false);
2273 	if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
2274 		usize = ixallocx_prof_sample(ptr, old_usize, size, extra,
2275 		    alignment, zero, tctx);
2276 	} else {
2277 		usize = ixallocx_helper(ptr, old_usize, size, extra, alignment,
2278 		    zero);
2279 	}
2280 	if (usize == old_usize) {
2281 		prof_alloc_rollback(tsd, tctx, false);
2282 		return (usize);
2283 	}
2284 	prof_realloc(tsd, ptr, usize, tctx, prof_active, false, ptr, old_usize,
2285 	    old_tctx);
2286 
2287 	return (usize);
2288 }
2289 
2290 JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
je_xallocx(void * ptr,size_t size,size_t extra,int flags)2291 je_xallocx(void *ptr, size_t size, size_t extra, int flags)
2292 {
2293 	tsd_t *tsd;
2294 	size_t usize, old_usize;
2295 	UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
2296 	size_t alignment = MALLOCX_ALIGN_GET(flags);
2297 	bool zero = flags & MALLOCX_ZERO;
2298 
2299 	assert(ptr != NULL);
2300 	assert(size != 0);
2301 	assert(SIZE_T_MAX - size >= extra);
2302 	assert(malloc_initialized() || IS_INITIALIZER);
2303 	malloc_thread_init();
2304 	tsd = tsd_fetch();
2305 
2306 	old_usize = isalloc(ptr, config_prof);
2307 
2308 	/* Clamp extra if necessary to avoid (size + extra) overflow. */
2309 	if (unlikely(size + extra > HUGE_MAXCLASS)) {
2310 		/* Check for size overflow. */
2311 		if (unlikely(size > HUGE_MAXCLASS)) {
2312 			usize = old_usize;
2313 			goto label_not_resized;
2314 		}
2315 		extra = HUGE_MAXCLASS - size;
2316 	}
2317 
2318 	if (config_valgrind && unlikely(in_valgrind))
2319 		old_rzsize = u2rz(old_usize);
2320 
2321 	if (config_prof && opt_prof) {
2322 		usize = ixallocx_prof(tsd, ptr, old_usize, size, extra,
2323 		    alignment, zero);
2324 	} else {
2325 		usize = ixallocx_helper(ptr, old_usize, size, extra, alignment,
2326 		    zero);
2327 	}
2328 	if (unlikely(usize == old_usize))
2329 		goto label_not_resized;
2330 
2331 	if (config_stats) {
2332 		*tsd_thread_allocatedp_get(tsd) += usize;
2333 		*tsd_thread_deallocatedp_get(tsd) += old_usize;
2334 	}
2335 	JEMALLOC_VALGRIND_REALLOC(false, ptr, usize, false, ptr, old_usize,
2336 	    old_rzsize, false, zero);
2337 label_not_resized:
2338 	UTRACE(ptr, size, ptr);
2339 	return (usize);
2340 }
2341 
2342 JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
JEMALLOC_ATTR(pure)2343 JEMALLOC_ATTR(pure)
2344 je_sallocx(const void *ptr, int flags)
2345 {
2346 	size_t usize;
2347 
2348 	assert(malloc_initialized() || IS_INITIALIZER);
2349 	malloc_thread_init();
2350 
2351 	if (config_ivsalloc)
2352 		usize = ivsalloc(ptr, config_prof);
2353 	else
2354 		usize = isalloc(ptr, config_prof);
2355 
2356 	return (usize);
2357 }
2358 
2359 JEMALLOC_EXPORT void JEMALLOC_NOTHROW
je_dallocx(void * ptr,int flags)2360 je_dallocx(void *ptr, int flags)
2361 {
2362 	tsd_t *tsd;
2363 	tcache_t *tcache;
2364 
2365 	assert(ptr != NULL);
2366 	assert(malloc_initialized() || IS_INITIALIZER);
2367 
2368 	tsd = tsd_fetch();
2369 	if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
2370 		if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
2371 			tcache = NULL;
2372 		else
2373 			tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
2374 	} else
2375 		tcache = tcache_get(tsd, false);
2376 
2377 	UTRACE(ptr, 0, 0);
2378 	ifree(tsd_fetch(), ptr, tcache);
2379 }
2380 
2381 JEMALLOC_ALWAYS_INLINE_C size_t
inallocx(size_t size,int flags)2382 inallocx(size_t size, int flags)
2383 {
2384 	size_t usize;
2385 
2386 	if (likely((flags & MALLOCX_LG_ALIGN_MASK) == 0))
2387 		usize = s2u(size);
2388 	else
2389 		usize = sa2u(size, MALLOCX_ALIGN_GET_SPECIFIED(flags));
2390 	assert(usize != 0);
2391 	return (usize);
2392 }
2393 
2394 JEMALLOC_EXPORT void JEMALLOC_NOTHROW
je_sdallocx(void * ptr,size_t size,int flags)2395 je_sdallocx(void *ptr, size_t size, int flags)
2396 {
2397 	tsd_t *tsd;
2398 	tcache_t *tcache;
2399 	size_t usize;
2400 
2401 	assert(ptr != NULL);
2402 	assert(malloc_initialized() || IS_INITIALIZER);
2403 	usize = inallocx(size, flags);
2404 	assert(usize == isalloc(ptr, config_prof));
2405 
2406 	tsd = tsd_fetch();
2407 	if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
2408 		if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
2409 			tcache = NULL;
2410 		else
2411 			tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
2412 	} else
2413 		tcache = tcache_get(tsd, false);
2414 
2415 	UTRACE(ptr, 0, 0);
2416 	isfree(tsd, ptr, usize, tcache);
2417 }
2418 
2419 JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
JEMALLOC_ATTR(pure)2420 JEMALLOC_ATTR(pure)
2421 je_nallocx(size_t size, int flags)
2422 {
2423 
2424 	assert(size != 0);
2425 
2426 	if (unlikely(malloc_init()))
2427 		return (0);
2428 
2429 	return (inallocx(size, flags));
2430 }
2431 
2432 JEMALLOC_EXPORT int JEMALLOC_NOTHROW
je_mallctl(const char * name,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2433 je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
2434     size_t newlen)
2435 {
2436 
2437 	if (unlikely(malloc_init()))
2438 		return (EAGAIN);
2439 
2440 	return (ctl_byname(name, oldp, oldlenp, newp, newlen));
2441 }
2442 
2443 JEMALLOC_EXPORT int JEMALLOC_NOTHROW
je_mallctlnametomib(const char * name,size_t * mibp,size_t * miblenp)2444 je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp)
2445 {
2446 
2447 	if (unlikely(malloc_init()))
2448 		return (EAGAIN);
2449 
2450 	return (ctl_nametomib(name, mibp, miblenp));
2451 }
2452 
2453 JEMALLOC_EXPORT int JEMALLOC_NOTHROW
je_mallctlbymib(const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2454 je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
2455   void *newp, size_t newlen)
2456 {
2457 
2458 	if (unlikely(malloc_init()))
2459 		return (EAGAIN);
2460 
2461 	return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen));
2462 }
2463 
2464 JEMALLOC_EXPORT void JEMALLOC_NOTHROW
je_malloc_stats_print(void (* write_cb)(void *,const char *),void * cbopaque,const char * opts)2465 je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
2466     const char *opts)
2467 {
2468 
2469 	stats_print(write_cb, cbopaque, opts);
2470 }
2471 
2472 JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void * ptr)2473 je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr)
2474 {
2475 	size_t ret;
2476 
2477 	assert(malloc_initialized() || IS_INITIALIZER);
2478 	malloc_thread_init();
2479 
2480 	if (config_ivsalloc)
2481 		ret = ivsalloc(ptr, config_prof);
2482 	else
2483 		ret = (ptr == NULL) ? 0 : isalloc(ptr, config_prof);
2484 
2485 	return (ret);
2486 }
2487 
2488 /*
2489  * End non-standard functions.
2490  */
2491 /******************************************************************************/
2492 /*
2493  * The following functions are used by threading libraries for protection of
2494  * malloc during fork().
2495  */
2496 
2497 /*
2498  * If an application creates a thread before doing any allocation in the main
2499  * thread, then calls fork(2) in the main thread followed by memory allocation
2500  * in the child process, a race can occur that results in deadlock within the
2501  * child: the main thread may have forked while the created thread had
2502  * partially initialized the allocator.  Ordinarily jemalloc prevents
2503  * fork/malloc races via the following functions it registers during
2504  * initialization using pthread_atfork(), but of course that does no good if
2505  * the allocator isn't fully initialized at fork time.  The following library
2506  * constructor is a partial solution to this problem.  It may still be possible
2507  * to trigger the deadlock described above, but doing so would involve forking
2508  * via a library constructor that runs before jemalloc's runs.
2509  */
JEMALLOC_ATTR(constructor)2510 JEMALLOC_ATTR(constructor)
2511 static void
2512 jemalloc_constructor(void)
2513 {
2514 
2515 	malloc_init();
2516 }
2517 
2518 #ifndef JEMALLOC_MUTEX_INIT_CB
2519 void
jemalloc_prefork(void)2520 jemalloc_prefork(void)
2521 #else
2522 JEMALLOC_EXPORT void
2523 _malloc_prefork(void)
2524 #endif
2525 {
2526 	unsigned i;
2527 
2528 #ifdef JEMALLOC_MUTEX_INIT_CB
2529 	if (!malloc_initialized())
2530 		return;
2531 #endif
2532 	assert(malloc_initialized());
2533 
2534 	/* Acquire all mutexes in a safe order. */
2535 	ctl_prefork();
2536 	prof_prefork();
2537 	malloc_mutex_prefork(&arenas_lock);
2538 	for (i = 0; i < narenas_total; i++) {
2539 		if (arenas[i] != NULL)
2540 			arena_prefork(arenas[i]);
2541 	}
2542 	chunk_prefork();
2543 	base_prefork();
2544 }
2545 
2546 #ifndef JEMALLOC_MUTEX_INIT_CB
2547 void
jemalloc_postfork_parent(void)2548 jemalloc_postfork_parent(void)
2549 #else
2550 JEMALLOC_EXPORT void
2551 _malloc_postfork(void)
2552 #endif
2553 {
2554 	unsigned i;
2555 
2556 #ifdef JEMALLOC_MUTEX_INIT_CB
2557 	if (!malloc_initialized())
2558 		return;
2559 #endif
2560 	assert(malloc_initialized());
2561 
2562 	/* Release all mutexes, now that fork() has completed. */
2563 	base_postfork_parent();
2564 	chunk_postfork_parent();
2565 	for (i = 0; i < narenas_total; i++) {
2566 		if (arenas[i] != NULL)
2567 			arena_postfork_parent(arenas[i]);
2568 	}
2569 	malloc_mutex_postfork_parent(&arenas_lock);
2570 	prof_postfork_parent();
2571 	ctl_postfork_parent();
2572 }
2573 
2574 void
jemalloc_postfork_child(void)2575 jemalloc_postfork_child(void)
2576 {
2577 	unsigned i;
2578 
2579 	assert(malloc_initialized());
2580 
2581 	/* Release all mutexes, now that fork() has completed. */
2582 	base_postfork_child();
2583 	chunk_postfork_child();
2584 	for (i = 0; i < narenas_total; i++) {
2585 		if (arenas[i] != NULL)
2586 			arena_postfork_child(arenas[i]);
2587 	}
2588 	malloc_mutex_postfork_child(&arenas_lock);
2589 	prof_postfork_child();
2590 	ctl_postfork_child();
2591 }
2592 
2593 /******************************************************************************/
2594