1 #define JEMALLOC_C_
2 #include "jemalloc/internal/jemalloc_preamble.h"
3 #include "jemalloc/internal/jemalloc_internal_includes.h"
4
5 #include "jemalloc/internal/assert.h"
6 #include "jemalloc/internal/atomic.h"
7 #include "jemalloc/internal/ctl.h"
8 #include "jemalloc/internal/extent_dss.h"
9 #include "jemalloc/internal/extent_mmap.h"
10 #include "jemalloc/internal/jemalloc_internal_types.h"
11 #include "jemalloc/internal/log.h"
12 #include "jemalloc/internal/malloc_io.h"
13 #include "jemalloc/internal/mutex.h"
14 #include "jemalloc/internal/rtree.h"
15 #include "jemalloc/internal/size_classes.h"
16 #include "jemalloc/internal/spin.h"
17 #include "jemalloc/internal/sz.h"
18 #include "jemalloc/internal/ticker.h"
19 #include "jemalloc/internal/util.h"
20
21 /******************************************************************************/
22 /* Data. */
23
24 /* Work around <http://llvm.org/bugs/show_bug.cgi?id=12623>: */
25 const char *__malloc_options_1_0 = NULL;
26 __sym_compat(_malloc_options, __malloc_options_1_0, FBSD_1.0);
27
28 /* Runtime configuration options. */
29 const char *je_malloc_conf
30 #ifndef _WIN32
31 JEMALLOC_ATTR(weak)
32 #endif
33 ;
34 bool opt_abort =
35 #ifdef JEMALLOC_DEBUG
36 true
37 #else
38 false
39 #endif
40 ;
41 bool opt_abort_conf =
42 #ifdef JEMALLOC_DEBUG
43 true
44 #else
45 false
46 #endif
47 ;
48 const char *opt_junk =
49 #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
50 "true"
51 #else
52 "false"
53 #endif
54 ;
55 bool opt_junk_alloc =
56 #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
57 true
58 #else
59 false
60 #endif
61 ;
62 bool opt_junk_free =
63 #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
64 true
65 #else
66 false
67 #endif
68 ;
69
70 bool opt_utrace = false;
71 bool opt_xmalloc = false;
72 bool opt_zero = false;
73 unsigned opt_narenas = 0;
74
75 unsigned ncpus;
76
77 /* Protects arenas initialization. */
78 malloc_mutex_t arenas_lock;
79 /*
80 * Arenas that are used to service external requests. Not all elements of the
81 * arenas array are necessarily used; arenas are created lazily as needed.
82 *
83 * arenas[0..narenas_auto) are used for automatic multiplexing of threads and
84 * arenas. arenas[narenas_auto..narenas_total) are only used if the application
85 * takes some action to create them and allocate from them.
86 *
87 * Points to an arena_t.
88 */
89 JEMALLOC_ALIGNED(CACHELINE)
90 atomic_p_t arenas[MALLOCX_ARENA_LIMIT];
91 static atomic_u_t narenas_total; /* Use narenas_total_*(). */
92 static arena_t *a0; /* arenas[0]; read-only after initialization. */
93 unsigned narenas_auto; /* Read-only after initialization. */
94
95 typedef enum {
96 malloc_init_uninitialized = 3,
97 malloc_init_a0_initialized = 2,
98 malloc_init_recursible = 1,
99 malloc_init_initialized = 0 /* Common case --> jnz. */
100 } malloc_init_t;
101 static malloc_init_t malloc_init_state = malloc_init_uninitialized;
102
103 /* False should be the common case. Set to true to trigger initialization. */
104 bool malloc_slow = true;
105
106 /* When malloc_slow is true, set the corresponding bits for sanity check. */
107 enum {
108 flag_opt_junk_alloc = (1U),
109 flag_opt_junk_free = (1U << 1),
110 flag_opt_zero = (1U << 2),
111 flag_opt_utrace = (1U << 3),
112 flag_opt_xmalloc = (1U << 4)
113 };
114 static uint8_t malloc_slow_flags;
115
116 #ifdef JEMALLOC_THREADED_INIT
117 /* Used to let the initializing thread recursively allocate. */
118 # define NO_INITIALIZER ((unsigned long)0)
119 # define INITIALIZER pthread_self()
120 # define IS_INITIALIZER (malloc_initializer == pthread_self())
121 static pthread_t malloc_initializer = NO_INITIALIZER;
122 #else
123 # define NO_INITIALIZER false
124 # define INITIALIZER true
125 # define IS_INITIALIZER malloc_initializer
126 static bool malloc_initializer = NO_INITIALIZER;
127 #endif
128
129 /* Used to avoid initialization races. */
130 #ifdef _WIN32
131 #if _WIN32_WINNT >= 0x0600
132 static malloc_mutex_t init_lock = SRWLOCK_INIT;
133 #else
134 static malloc_mutex_t init_lock;
135 static bool init_lock_initialized = false;
136
JEMALLOC_ATTR(constructor)137 JEMALLOC_ATTR(constructor)
138 static void WINAPI
139 _init_init_lock(void) {
140 /*
141 * If another constructor in the same binary is using mallctl to e.g.
142 * set up extent hooks, it may end up running before this one, and
143 * malloc_init_hard will crash trying to lock the uninitialized lock. So
144 * we force an initialization of the lock in malloc_init_hard as well.
145 * We don't try to care about atomicity of the accessed to the
146 * init_lock_initialized boolean, since it really only matters early in
147 * the process creation, before any separate thread normally starts
148 * doing anything.
149 */
150 if (!init_lock_initialized) {
151 malloc_mutex_init(&init_lock, "init", WITNESS_RANK_INIT,
152 malloc_mutex_rank_exclusive);
153 }
154 init_lock_initialized = true;
155 }
156
157 #ifdef _MSC_VER
158 # pragma section(".CRT$XCU", read)
159 JEMALLOC_SECTION(".CRT$XCU") JEMALLOC_ATTR(used)
160 static const void (WINAPI *init_init_lock)(void) = _init_init_lock;
161 #endif
162 #endif
163 #else
164 static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER;
165 #endif
166
167 typedef struct {
168 void *p; /* Input pointer (as in realloc(p, s)). */
169 size_t s; /* Request size. */
170 void *r; /* Result pointer. */
171 } malloc_utrace_t;
172
173 #ifdef JEMALLOC_UTRACE
174 # define UTRACE(a, b, c) do { \
175 if (unlikely(opt_utrace)) { \
176 int utrace_serrno = errno; \
177 malloc_utrace_t ut; \
178 ut.p = (a); \
179 ut.s = (b); \
180 ut.r = (c); \
181 utrace(&ut, sizeof(ut)); \
182 errno = utrace_serrno; \
183 } \
184 } while (0)
185 #else
186 # define UTRACE(a, b, c)
187 #endif
188
189 /* Whether encountered any invalid config options. */
190 static bool had_conf_error = false;
191
192 /******************************************************************************/
193 /*
194 * Function prototypes for static functions that are referenced prior to
195 * definition.
196 */
197
198 static bool malloc_init_hard_a0(void);
199 static bool malloc_init_hard(void);
200
201 /******************************************************************************/
202 /*
203 * Begin miscellaneous support functions.
204 */
205
206 bool
malloc_initialized(void)207 malloc_initialized(void) {
208 return (malloc_init_state == malloc_init_initialized);
209 }
210
211 JEMALLOC_ALWAYS_INLINE bool
malloc_init_a0(void)212 malloc_init_a0(void) {
213 if (unlikely(malloc_init_state == malloc_init_uninitialized)) {
214 return malloc_init_hard_a0();
215 }
216 return false;
217 }
218
219 JEMALLOC_ALWAYS_INLINE bool
malloc_init(void)220 malloc_init(void) {
221 if (unlikely(!malloc_initialized()) && malloc_init_hard()) {
222 return true;
223 }
224 return false;
225 }
226
227 /*
228 * The a0*() functions are used instead of i{d,}alloc() in situations that
229 * cannot tolerate TLS variable access.
230 */
231
232 static void *
a0ialloc(size_t size,bool zero,bool is_internal)233 a0ialloc(size_t size, bool zero, bool is_internal) {
234 if (unlikely(malloc_init_a0())) {
235 return NULL;
236 }
237
238 return iallocztm(TSDN_NULL, size, sz_size2index(size), zero, NULL,
239 is_internal, arena_get(TSDN_NULL, 0, true), true);
240 }
241
242 static void
a0idalloc(void * ptr,bool is_internal)243 a0idalloc(void *ptr, bool is_internal) {
244 idalloctm(TSDN_NULL, ptr, NULL, NULL, is_internal, true);
245 }
246
247 void *
a0malloc(size_t size)248 a0malloc(size_t size) {
249 return a0ialloc(size, false, true);
250 }
251
252 void
a0dalloc(void * ptr)253 a0dalloc(void *ptr) {
254 a0idalloc(ptr, true);
255 }
256
257 /*
258 * FreeBSD's libc uses the bootstrap_*() functions in bootstrap-senstive
259 * situations that cannot tolerate TLS variable access (TLS allocation and very
260 * early internal data structure initialization).
261 */
262
263 void *
bootstrap_malloc(size_t size)264 bootstrap_malloc(size_t size) {
265 if (unlikely(size == 0)) {
266 size = 1;
267 }
268
269 return a0ialloc(size, false, false);
270 }
271
272 void *
bootstrap_calloc(size_t num,size_t size)273 bootstrap_calloc(size_t num, size_t size) {
274 size_t num_size;
275
276 num_size = num * size;
277 if (unlikely(num_size == 0)) {
278 assert(num == 0 || size == 0);
279 num_size = 1;
280 }
281
282 return a0ialloc(num_size, true, false);
283 }
284
285 void
bootstrap_free(void * ptr)286 bootstrap_free(void *ptr) {
287 if (unlikely(ptr == NULL)) {
288 return;
289 }
290
291 a0idalloc(ptr, false);
292 }
293
294 void
arena_set(unsigned ind,arena_t * arena)295 arena_set(unsigned ind, arena_t *arena) {
296 atomic_store_p(&arenas[ind], arena, ATOMIC_RELEASE);
297 }
298
299 static void
narenas_total_set(unsigned narenas)300 narenas_total_set(unsigned narenas) {
301 atomic_store_u(&narenas_total, narenas, ATOMIC_RELEASE);
302 }
303
304 static void
narenas_total_inc(void)305 narenas_total_inc(void) {
306 atomic_fetch_add_u(&narenas_total, 1, ATOMIC_RELEASE);
307 }
308
309 unsigned
narenas_total_get(void)310 narenas_total_get(void) {
311 return atomic_load_u(&narenas_total, ATOMIC_ACQUIRE);
312 }
313
314 /* Create a new arena and insert it into the arenas array at index ind. */
315 static arena_t *
arena_init_locked(tsdn_t * tsdn,unsigned ind,extent_hooks_t * extent_hooks)316 arena_init_locked(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
317 arena_t *arena;
318
319 assert(ind <= narenas_total_get());
320 if (ind >= MALLOCX_ARENA_LIMIT) {
321 return NULL;
322 }
323 if (ind == narenas_total_get()) {
324 narenas_total_inc();
325 }
326
327 /*
328 * Another thread may have already initialized arenas[ind] if it's an
329 * auto arena.
330 */
331 arena = arena_get(tsdn, ind, false);
332 if (arena != NULL) {
333 assert(ind < narenas_auto);
334 return arena;
335 }
336
337 /* Actually initialize the arena. */
338 arena = arena_new(tsdn, ind, extent_hooks);
339
340 return arena;
341 }
342
343 static void
arena_new_create_background_thread(tsdn_t * tsdn,unsigned ind)344 arena_new_create_background_thread(tsdn_t *tsdn, unsigned ind) {
345 if (ind == 0) {
346 return;
347 }
348 if (have_background_thread) {
349 bool err;
350 malloc_mutex_lock(tsdn, &background_thread_lock);
351 err = background_thread_create(tsdn_tsd(tsdn), ind);
352 malloc_mutex_unlock(tsdn, &background_thread_lock);
353 if (err) {
354 malloc_printf("<jemalloc>: error in background thread "
355 "creation for arena %u. Abort.\n", ind);
356 abort();
357 }
358 }
359 }
360
361 arena_t *
arena_init(tsdn_t * tsdn,unsigned ind,extent_hooks_t * extent_hooks)362 arena_init(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
363 arena_t *arena;
364
365 malloc_mutex_lock(tsdn, &arenas_lock);
366 arena = arena_init_locked(tsdn, ind, extent_hooks);
367 malloc_mutex_unlock(tsdn, &arenas_lock);
368
369 arena_new_create_background_thread(tsdn, ind);
370
371 return arena;
372 }
373
374 static void
arena_bind(tsd_t * tsd,unsigned ind,bool internal)375 arena_bind(tsd_t *tsd, unsigned ind, bool internal) {
376 arena_t *arena = arena_get(tsd_tsdn(tsd), ind, false);
377 arena_nthreads_inc(arena, internal);
378
379 if (internal) {
380 tsd_iarena_set(tsd, arena);
381 } else {
382 tsd_arena_set(tsd, arena);
383 }
384 }
385
386 void
arena_migrate(tsd_t * tsd,unsigned oldind,unsigned newind)387 arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind) {
388 arena_t *oldarena, *newarena;
389
390 oldarena = arena_get(tsd_tsdn(tsd), oldind, false);
391 newarena = arena_get(tsd_tsdn(tsd), newind, false);
392 arena_nthreads_dec(oldarena, false);
393 arena_nthreads_inc(newarena, false);
394 tsd_arena_set(tsd, newarena);
395 }
396
397 static void
arena_unbind(tsd_t * tsd,unsigned ind,bool internal)398 arena_unbind(tsd_t *tsd, unsigned ind, bool internal) {
399 arena_t *arena;
400
401 arena = arena_get(tsd_tsdn(tsd), ind, false);
402 arena_nthreads_dec(arena, internal);
403
404 if (internal) {
405 tsd_iarena_set(tsd, NULL);
406 } else {
407 tsd_arena_set(tsd, NULL);
408 }
409 }
410
411 arena_tdata_t *
arena_tdata_get_hard(tsd_t * tsd,unsigned ind)412 arena_tdata_get_hard(tsd_t *tsd, unsigned ind) {
413 arena_tdata_t *tdata, *arenas_tdata_old;
414 arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd);
415 unsigned narenas_tdata_old, i;
416 unsigned narenas_tdata = tsd_narenas_tdata_get(tsd);
417 unsigned narenas_actual = narenas_total_get();
418
419 /*
420 * Dissociate old tdata array (and set up for deallocation upon return)
421 * if it's too small.
422 */
423 if (arenas_tdata != NULL && narenas_tdata < narenas_actual) {
424 arenas_tdata_old = arenas_tdata;
425 narenas_tdata_old = narenas_tdata;
426 arenas_tdata = NULL;
427 narenas_tdata = 0;
428 tsd_arenas_tdata_set(tsd, arenas_tdata);
429 tsd_narenas_tdata_set(tsd, narenas_tdata);
430 } else {
431 arenas_tdata_old = NULL;
432 narenas_tdata_old = 0;
433 }
434
435 /* Allocate tdata array if it's missing. */
436 if (arenas_tdata == NULL) {
437 bool *arenas_tdata_bypassp = tsd_arenas_tdata_bypassp_get(tsd);
438 narenas_tdata = (ind < narenas_actual) ? narenas_actual : ind+1;
439
440 if (tsd_nominal(tsd) && !*arenas_tdata_bypassp) {
441 *arenas_tdata_bypassp = true;
442 arenas_tdata = (arena_tdata_t *)a0malloc(
443 sizeof(arena_tdata_t) * narenas_tdata);
444 *arenas_tdata_bypassp = false;
445 }
446 if (arenas_tdata == NULL) {
447 tdata = NULL;
448 goto label_return;
449 }
450 assert(tsd_nominal(tsd) && !*arenas_tdata_bypassp);
451 tsd_arenas_tdata_set(tsd, arenas_tdata);
452 tsd_narenas_tdata_set(tsd, narenas_tdata);
453 }
454
455 /*
456 * Copy to tdata array. It's possible that the actual number of arenas
457 * has increased since narenas_total_get() was called above, but that
458 * causes no correctness issues unless two threads concurrently execute
459 * the arenas.create mallctl, which we trust mallctl synchronization to
460 * prevent.
461 */
462
463 /* Copy/initialize tickers. */
464 for (i = 0; i < narenas_actual; i++) {
465 if (i < narenas_tdata_old) {
466 ticker_copy(&arenas_tdata[i].decay_ticker,
467 &arenas_tdata_old[i].decay_ticker);
468 } else {
469 ticker_init(&arenas_tdata[i].decay_ticker,
470 DECAY_NTICKS_PER_UPDATE);
471 }
472 }
473 if (narenas_tdata > narenas_actual) {
474 memset(&arenas_tdata[narenas_actual], 0, sizeof(arena_tdata_t)
475 * (narenas_tdata - narenas_actual));
476 }
477
478 /* Read the refreshed tdata array. */
479 tdata = &arenas_tdata[ind];
480 label_return:
481 if (arenas_tdata_old != NULL) {
482 a0dalloc(arenas_tdata_old);
483 }
484 return tdata;
485 }
486
487 /* Slow path, called only by arena_choose(). */
488 arena_t *
arena_choose_hard(tsd_t * tsd,bool internal)489 arena_choose_hard(tsd_t *tsd, bool internal) {
490 arena_t *ret JEMALLOC_CC_SILENCE_INIT(NULL);
491
492 if (have_percpu_arena && PERCPU_ARENA_ENABLED(opt_percpu_arena)) {
493 unsigned choose = percpu_arena_choose();
494 ret = arena_get(tsd_tsdn(tsd), choose, true);
495 assert(ret != NULL);
496 arena_bind(tsd, arena_ind_get(ret), false);
497 arena_bind(tsd, arena_ind_get(ret), true);
498
499 return ret;
500 }
501
502 if (narenas_auto > 1) {
503 unsigned i, j, choose[2], first_null;
504 bool is_new_arena[2];
505
506 /*
507 * Determine binding for both non-internal and internal
508 * allocation.
509 *
510 * choose[0]: For application allocation.
511 * choose[1]: For internal metadata allocation.
512 */
513
514 for (j = 0; j < 2; j++) {
515 choose[j] = 0;
516 is_new_arena[j] = false;
517 }
518
519 first_null = narenas_auto;
520 malloc_mutex_lock(tsd_tsdn(tsd), &arenas_lock);
521 assert(arena_get(tsd_tsdn(tsd), 0, false) != NULL);
522 for (i = 1; i < narenas_auto; i++) {
523 if (arena_get(tsd_tsdn(tsd), i, false) != NULL) {
524 /*
525 * Choose the first arena that has the lowest
526 * number of threads assigned to it.
527 */
528 for (j = 0; j < 2; j++) {
529 if (arena_nthreads_get(arena_get(
530 tsd_tsdn(tsd), i, false), !!j) <
531 arena_nthreads_get(arena_get(
532 tsd_tsdn(tsd), choose[j], false),
533 !!j)) {
534 choose[j] = i;
535 }
536 }
537 } else if (first_null == narenas_auto) {
538 /*
539 * Record the index of the first uninitialized
540 * arena, in case all extant arenas are in use.
541 *
542 * NB: It is possible for there to be
543 * discontinuities in terms of initialized
544 * versus uninitialized arenas, due to the
545 * "thread.arena" mallctl.
546 */
547 first_null = i;
548 }
549 }
550
551 for (j = 0; j < 2; j++) {
552 if (arena_nthreads_get(arena_get(tsd_tsdn(tsd),
553 choose[j], false), !!j) == 0 || first_null ==
554 narenas_auto) {
555 /*
556 * Use an unloaded arena, or the least loaded
557 * arena if all arenas are already initialized.
558 */
559 if (!!j == internal) {
560 ret = arena_get(tsd_tsdn(tsd),
561 choose[j], false);
562 }
563 } else {
564 arena_t *arena;
565
566 /* Initialize a new arena. */
567 choose[j] = first_null;
568 arena = arena_init_locked(tsd_tsdn(tsd),
569 choose[j],
570 (extent_hooks_t *)&extent_hooks_default);
571 if (arena == NULL) {
572 malloc_mutex_unlock(tsd_tsdn(tsd),
573 &arenas_lock);
574 return NULL;
575 }
576 is_new_arena[j] = true;
577 if (!!j == internal) {
578 ret = arena;
579 }
580 }
581 arena_bind(tsd, choose[j], !!j);
582 }
583 malloc_mutex_unlock(tsd_tsdn(tsd), &arenas_lock);
584
585 for (j = 0; j < 2; j++) {
586 if (is_new_arena[j]) {
587 assert(choose[j] > 0);
588 arena_new_create_background_thread(
589 tsd_tsdn(tsd), choose[j]);
590 }
591 }
592
593 } else {
594 ret = arena_get(tsd_tsdn(tsd), 0, false);
595 arena_bind(tsd, 0, false);
596 arena_bind(tsd, 0, true);
597 }
598
599 return ret;
600 }
601
602 void
iarena_cleanup(tsd_t * tsd)603 iarena_cleanup(tsd_t *tsd) {
604 arena_t *iarena;
605
606 iarena = tsd_iarena_get(tsd);
607 if (iarena != NULL) {
608 arena_unbind(tsd, arena_ind_get(iarena), true);
609 }
610 }
611
612 void
arena_cleanup(tsd_t * tsd)613 arena_cleanup(tsd_t *tsd) {
614 arena_t *arena;
615
616 arena = tsd_arena_get(tsd);
617 if (arena != NULL) {
618 arena_unbind(tsd, arena_ind_get(arena), false);
619 }
620 }
621
622 void
arenas_tdata_cleanup(tsd_t * tsd)623 arenas_tdata_cleanup(tsd_t *tsd) {
624 arena_tdata_t *arenas_tdata;
625
626 /* Prevent tsd->arenas_tdata from being (re)created. */
627 *tsd_arenas_tdata_bypassp_get(tsd) = true;
628
629 arenas_tdata = tsd_arenas_tdata_get(tsd);
630 if (arenas_tdata != NULL) {
631 tsd_arenas_tdata_set(tsd, NULL);
632 a0dalloc(arenas_tdata);
633 }
634 }
635
636 static void
stats_print_atexit(void)637 stats_print_atexit(void) {
638 if (config_stats) {
639 tsdn_t *tsdn;
640 unsigned narenas, i;
641
642 tsdn = tsdn_fetch();
643
644 /*
645 * Merge stats from extant threads. This is racy, since
646 * individual threads do not lock when recording tcache stats
647 * events. As a consequence, the final stats may be slightly
648 * out of date by the time they are reported, if other threads
649 * continue to allocate.
650 */
651 for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
652 arena_t *arena = arena_get(tsdn, i, false);
653 if (arena != NULL) {
654 tcache_t *tcache;
655
656 malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx);
657 ql_foreach(tcache, &arena->tcache_ql, link) {
658 tcache_stats_merge(tsdn, tcache, arena);
659 }
660 malloc_mutex_unlock(tsdn,
661 &arena->tcache_ql_mtx);
662 }
663 }
664 }
665 je_malloc_stats_print(NULL, NULL, opt_stats_print_opts);
666 }
667
668 /*
669 * Ensure that we don't hold any locks upon entry to or exit from allocator
670 * code (in a "broad" sense that doesn't count a reentrant allocation as an
671 * entrance or exit).
672 */
673 JEMALLOC_ALWAYS_INLINE void
check_entry_exit_locking(tsdn_t * tsdn)674 check_entry_exit_locking(tsdn_t *tsdn) {
675 if (!config_debug) {
676 return;
677 }
678 if (tsdn_null(tsdn)) {
679 return;
680 }
681 tsd_t *tsd = tsdn_tsd(tsdn);
682 /*
683 * It's possible we hold locks at entry/exit if we're in a nested
684 * allocation.
685 */
686 int8_t reentrancy_level = tsd_reentrancy_level_get(tsd);
687 if (reentrancy_level != 0) {
688 return;
689 }
690 witness_assert_lockless(tsdn_witness_tsdp_get(tsdn));
691 }
692
693 /*
694 * End miscellaneous support functions.
695 */
696 /******************************************************************************/
697 /*
698 * Begin initialization functions.
699 */
700
701 static char *
jemalloc_secure_getenv(const char * name)702 jemalloc_secure_getenv(const char *name) {
703 #ifdef JEMALLOC_HAVE_SECURE_GETENV
704 return secure_getenv(name);
705 #else
706 # ifdef JEMALLOC_HAVE_ISSETUGID
707 if (issetugid() != 0) {
708 return NULL;
709 }
710 # endif
711 return getenv(name);
712 #endif
713 }
714
715 static unsigned
malloc_ncpus(void)716 malloc_ncpus(void) {
717 long result;
718
719 #ifdef _WIN32
720 SYSTEM_INFO si;
721 GetSystemInfo(&si);
722 result = si.dwNumberOfProcessors;
723 #elif defined(JEMALLOC_GLIBC_MALLOC_HOOK) && defined(CPU_COUNT)
724 /*
725 * glibc >= 2.6 has the CPU_COUNT macro.
726 *
727 * glibc's sysconf() uses isspace(). glibc allocates for the first time
728 * *before* setting up the isspace tables. Therefore we need a
729 * different method to get the number of CPUs.
730 */
731 {
732 cpu_set_t set;
733
734 pthread_getaffinity_np(pthread_self(), sizeof(set), &set);
735 result = CPU_COUNT(&set);
736 }
737 #else
738 result = sysconf(_SC_NPROCESSORS_ONLN);
739 #endif
740 return ((result == -1) ? 1 : (unsigned)result);
741 }
742
743 static void
init_opt_stats_print_opts(const char * v,size_t vlen)744 init_opt_stats_print_opts(const char *v, size_t vlen) {
745 size_t opts_len = strlen(opt_stats_print_opts);
746 assert(opts_len <= stats_print_tot_num_options);
747
748 for (size_t i = 0; i < vlen; i++) {
749 switch (v[i]) {
750 #define OPTION(o, v, d, s) case o: break;
751 STATS_PRINT_OPTIONS
752 #undef OPTION
753 default: continue;
754 }
755
756 if (strchr(opt_stats_print_opts, v[i]) != NULL) {
757 /* Ignore repeated. */
758 continue;
759 }
760
761 opt_stats_print_opts[opts_len++] = v[i];
762 opt_stats_print_opts[opts_len] = '\0';
763 assert(opts_len <= stats_print_tot_num_options);
764 }
765 assert(opts_len == strlen(opt_stats_print_opts));
766 }
767
768 static bool
malloc_conf_next(char const ** opts_p,char const ** k_p,size_t * klen_p,char const ** v_p,size_t * vlen_p)769 malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
770 char const **v_p, size_t *vlen_p) {
771 bool accept;
772 const char *opts = *opts_p;
773
774 *k_p = opts;
775
776 for (accept = false; !accept;) {
777 switch (*opts) {
778 case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
779 case 'G': case 'H': case 'I': case 'J': case 'K': case 'L':
780 case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R':
781 case 'S': case 'T': case 'U': case 'V': case 'W': case 'X':
782 case 'Y': case 'Z':
783 case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
784 case 'g': case 'h': case 'i': case 'j': case 'k': case 'l':
785 case 'm': case 'n': case 'o': case 'p': case 'q': case 'r':
786 case 's': case 't': case 'u': case 'v': case 'w': case 'x':
787 case 'y': case 'z':
788 case '0': case '1': case '2': case '3': case '4': case '5':
789 case '6': case '7': case '8': case '9':
790 case '_':
791 opts++;
792 break;
793 case ':':
794 opts++;
795 *klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p;
796 *v_p = opts;
797 accept = true;
798 break;
799 case '\0':
800 if (opts != *opts_p) {
801 malloc_write("<jemalloc>: Conf string ends "
802 "with key\n");
803 }
804 return true;
805 default:
806 malloc_write("<jemalloc>: Malformed conf string\n");
807 return true;
808 }
809 }
810
811 for (accept = false; !accept;) {
812 switch (*opts) {
813 case ',':
814 opts++;
815 /*
816 * Look ahead one character here, because the next time
817 * this function is called, it will assume that end of
818 * input has been cleanly reached if no input remains,
819 * but we have optimistically already consumed the
820 * comma if one exists.
821 */
822 if (*opts == '\0') {
823 malloc_write("<jemalloc>: Conf string ends "
824 "with comma\n");
825 }
826 *vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p;
827 accept = true;
828 break;
829 case '\0':
830 *vlen_p = (uintptr_t)opts - (uintptr_t)*v_p;
831 accept = true;
832 break;
833 default:
834 opts++;
835 break;
836 }
837 }
838
839 *opts_p = opts;
840 return false;
841 }
842
843 static void
malloc_abort_invalid_conf(void)844 malloc_abort_invalid_conf(void) {
845 assert(opt_abort_conf);
846 malloc_printf("<jemalloc>: Abort (abort_conf:true) on invalid conf "
847 "value (see above).\n");
848 abort();
849 }
850
851 static void
malloc_conf_error(const char * msg,const char * k,size_t klen,const char * v,size_t vlen)852 malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v,
853 size_t vlen) {
854 malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k,
855 (int)vlen, v);
856 /* If abort_conf is set, error out after processing all options. */
857 had_conf_error = true;
858 }
859
860 static void
malloc_slow_flag_init(void)861 malloc_slow_flag_init(void) {
862 /*
863 * Combine the runtime options into malloc_slow for fast path. Called
864 * after processing all the options.
865 */
866 malloc_slow_flags |= (opt_junk_alloc ? flag_opt_junk_alloc : 0)
867 | (opt_junk_free ? flag_opt_junk_free : 0)
868 | (opt_zero ? flag_opt_zero : 0)
869 | (opt_utrace ? flag_opt_utrace : 0)
870 | (opt_xmalloc ? flag_opt_xmalloc : 0);
871
872 malloc_slow = (malloc_slow_flags != 0);
873 }
874
875 static void
malloc_conf_init(void)876 malloc_conf_init(void) {
877 unsigned i;
878 char buf[PATH_MAX + 1];
879 const char *opts, *k, *v;
880 size_t klen, vlen;
881
882 for (i = 0; i < 4; i++) {
883 /* Get runtime configuration. */
884 switch (i) {
885 case 0:
886 opts = config_malloc_conf;
887 break;
888 case 1:
889 if (je_malloc_conf != NULL) {
890 /*
891 * Use options that were compiled into the
892 * program.
893 */
894 opts = je_malloc_conf;
895 } else {
896 /* No configuration specified. */
897 buf[0] = '\0';
898 opts = buf;
899 }
900 break;
901 case 2: {
902 ssize_t linklen = 0;
903 #ifndef _WIN32
904 int saved_errno = errno;
905 const char *linkname =
906 # ifdef JEMALLOC_PREFIX
907 "/etc/"JEMALLOC_PREFIX"malloc.conf"
908 # else
909 "/etc/malloc.conf"
910 # endif
911 ;
912
913 /*
914 * Try to use the contents of the "/etc/malloc.conf"
915 * symbolic link's name.
916 */
917 linklen = readlink(linkname, buf, sizeof(buf) - 1);
918 if (linklen == -1) {
919 /* No configuration specified. */
920 linklen = 0;
921 /* Restore errno. */
922 set_errno(saved_errno);
923 }
924 #endif
925 buf[linklen] = '\0';
926 opts = buf;
927 break;
928 } case 3: {
929 const char *envname =
930 #ifdef JEMALLOC_PREFIX
931 JEMALLOC_CPREFIX"MALLOC_CONF"
932 #else
933 "MALLOC_CONF"
934 #endif
935 ;
936
937 if ((opts = jemalloc_secure_getenv(envname)) != NULL) {
938 /*
939 * Do nothing; opts is already initialized to
940 * the value of the MALLOC_CONF environment
941 * variable.
942 */
943 } else {
944 /* No configuration specified. */
945 buf[0] = '\0';
946 opts = buf;
947 }
948 break;
949 } default:
950 not_reached();
951 buf[0] = '\0';
952 opts = buf;
953 }
954
955 while (*opts != '\0' && !malloc_conf_next(&opts, &k, &klen, &v,
956 &vlen)) {
957 #define CONF_MATCH(n) \
958 (sizeof(n)-1 == klen && strncmp(n, k, klen) == 0)
959 #define CONF_MATCH_VALUE(n) \
960 (sizeof(n)-1 == vlen && strncmp(n, v, vlen) == 0)
961 #define CONF_HANDLE_BOOL(o, n) \
962 if (CONF_MATCH(n)) { \
963 if (CONF_MATCH_VALUE("true")) { \
964 o = true; \
965 } else if (CONF_MATCH_VALUE("false")) { \
966 o = false; \
967 } else { \
968 malloc_conf_error( \
969 "Invalid conf value", \
970 k, klen, v, vlen); \
971 } \
972 continue; \
973 }
974 #define CONF_MIN_no(um, min) false
975 #define CONF_MIN_yes(um, min) ((um) < (min))
976 #define CONF_MAX_no(um, max) false
977 #define CONF_MAX_yes(um, max) ((um) > (max))
978 #define CONF_HANDLE_T_U(t, o, n, min, max, check_min, check_max, clip) \
979 if (CONF_MATCH(n)) { \
980 uintmax_t um; \
981 char *end; \
982 \
983 set_errno(0); \
984 um = malloc_strtoumax(v, &end, 0); \
985 if (get_errno() != 0 || (uintptr_t)end -\
986 (uintptr_t)v != vlen) { \
987 malloc_conf_error( \
988 "Invalid conf value", \
989 k, klen, v, vlen); \
990 } else if (clip) { \
991 if (CONF_MIN_##check_min(um, \
992 (t)(min))) { \
993 o = (t)(min); \
994 } else if ( \
995 CONF_MAX_##check_max(um, \
996 (t)(max))) { \
997 o = (t)(max); \
998 } else { \
999 o = (t)um; \
1000 } \
1001 } else { \
1002 if (CONF_MIN_##check_min(um, \
1003 (t)(min)) || \
1004 CONF_MAX_##check_max(um, \
1005 (t)(max))) { \
1006 malloc_conf_error( \
1007 "Out-of-range " \
1008 "conf value", \
1009 k, klen, v, vlen); \
1010 } else { \
1011 o = (t)um; \
1012 } \
1013 } \
1014 continue; \
1015 }
1016 #define CONF_HANDLE_UNSIGNED(o, n, min, max, check_min, check_max, \
1017 clip) \
1018 CONF_HANDLE_T_U(unsigned, o, n, min, max, \
1019 check_min, check_max, clip)
1020 #define CONF_HANDLE_SIZE_T(o, n, min, max, check_min, check_max, clip) \
1021 CONF_HANDLE_T_U(size_t, o, n, min, max, \
1022 check_min, check_max, clip)
1023 #define CONF_HANDLE_SSIZE_T(o, n, min, max) \
1024 if (CONF_MATCH(n)) { \
1025 long l; \
1026 char *end; \
1027 \
1028 set_errno(0); \
1029 l = strtol(v, &end, 0); \
1030 if (get_errno() != 0 || (uintptr_t)end -\
1031 (uintptr_t)v != vlen) { \
1032 malloc_conf_error( \
1033 "Invalid conf value", \
1034 k, klen, v, vlen); \
1035 } else if (l < (ssize_t)(min) || l > \
1036 (ssize_t)(max)) { \
1037 malloc_conf_error( \
1038 "Out-of-range conf value", \
1039 k, klen, v, vlen); \
1040 } else { \
1041 o = l; \
1042 } \
1043 continue; \
1044 }
1045 #define CONF_HANDLE_CHAR_P(o, n, d) \
1046 if (CONF_MATCH(n)) { \
1047 size_t cpylen = (vlen <= \
1048 sizeof(o)-1) ? vlen : \
1049 sizeof(o)-1; \
1050 strncpy(o, v, cpylen); \
1051 o[cpylen] = '\0'; \
1052 continue; \
1053 }
1054
1055 CONF_HANDLE_BOOL(opt_abort, "abort")
1056 CONF_HANDLE_BOOL(opt_abort_conf, "abort_conf")
1057 if (strncmp("metadata_thp", k, klen) == 0) {
1058 int i;
1059 bool match = false;
1060 for (i = 0; i < metadata_thp_mode_limit; i++) {
1061 if (strncmp(metadata_thp_mode_names[i],
1062 v, vlen) == 0) {
1063 opt_metadata_thp = i;
1064 match = true;
1065 break;
1066 }
1067 }
1068 if (!match) {
1069 malloc_conf_error("Invalid conf value",
1070 k, klen, v, vlen);
1071 }
1072 continue;
1073 }
1074 CONF_HANDLE_BOOL(opt_retain, "retain")
1075 if (strncmp("dss", k, klen) == 0) {
1076 int i;
1077 bool match = false;
1078 for (i = 0; i < dss_prec_limit; i++) {
1079 if (strncmp(dss_prec_names[i], v, vlen)
1080 == 0) {
1081 if (extent_dss_prec_set(i)) {
1082 malloc_conf_error(
1083 "Error setting dss",
1084 k, klen, v, vlen);
1085 } else {
1086 opt_dss =
1087 dss_prec_names[i];
1088 match = true;
1089 break;
1090 }
1091 }
1092 }
1093 if (!match) {
1094 malloc_conf_error("Invalid conf value",
1095 k, klen, v, vlen);
1096 }
1097 continue;
1098 }
1099 CONF_HANDLE_UNSIGNED(opt_narenas, "narenas", 1,
1100 UINT_MAX, yes, no, false)
1101 CONF_HANDLE_SSIZE_T(opt_dirty_decay_ms,
1102 "dirty_decay_ms", -1, NSTIME_SEC_MAX * KQU(1000) <
1103 QU(SSIZE_MAX) ? NSTIME_SEC_MAX * KQU(1000) :
1104 SSIZE_MAX);
1105 CONF_HANDLE_SSIZE_T(opt_muzzy_decay_ms,
1106 "muzzy_decay_ms", -1, NSTIME_SEC_MAX * KQU(1000) <
1107 QU(SSIZE_MAX) ? NSTIME_SEC_MAX * KQU(1000) :
1108 SSIZE_MAX);
1109 CONF_HANDLE_BOOL(opt_stats_print, "stats_print")
1110 if (CONF_MATCH("stats_print_opts")) {
1111 init_opt_stats_print_opts(v, vlen);
1112 continue;
1113 }
1114 if (config_fill) {
1115 if (CONF_MATCH("junk")) {
1116 if (CONF_MATCH_VALUE("true")) {
1117 opt_junk = "true";
1118 opt_junk_alloc = opt_junk_free =
1119 true;
1120 } else if (CONF_MATCH_VALUE("false")) {
1121 opt_junk = "false";
1122 opt_junk_alloc = opt_junk_free =
1123 false;
1124 } else if (CONF_MATCH_VALUE("alloc")) {
1125 opt_junk = "alloc";
1126 opt_junk_alloc = true;
1127 opt_junk_free = false;
1128 } else if (CONF_MATCH_VALUE("free")) {
1129 opt_junk = "free";
1130 opt_junk_alloc = false;
1131 opt_junk_free = true;
1132 } else {
1133 malloc_conf_error(
1134 "Invalid conf value", k,
1135 klen, v, vlen);
1136 }
1137 continue;
1138 }
1139 CONF_HANDLE_BOOL(opt_zero, "zero")
1140 }
1141 if (config_utrace) {
1142 CONF_HANDLE_BOOL(opt_utrace, "utrace")
1143 }
1144 if (config_xmalloc) {
1145 CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc")
1146 }
1147 CONF_HANDLE_BOOL(opt_tcache, "tcache")
1148 CONF_HANDLE_SIZE_T(opt_lg_extent_max_active_fit,
1149 "lg_extent_max_active_fit", 0,
1150 (sizeof(size_t) << 3), yes, yes, false)
1151 CONF_HANDLE_SSIZE_T(opt_lg_tcache_max, "lg_tcache_max",
1152 -1, (sizeof(size_t) << 3) - 1)
1153 if (strncmp("percpu_arena", k, klen) == 0) {
1154 bool match = false;
1155 for (int i = percpu_arena_mode_names_base; i <
1156 percpu_arena_mode_names_limit; i++) {
1157 if (strncmp(percpu_arena_mode_names[i],
1158 v, vlen) == 0) {
1159 if (!have_percpu_arena) {
1160 malloc_conf_error(
1161 "No getcpu support",
1162 k, klen, v, vlen);
1163 }
1164 opt_percpu_arena = i;
1165 match = true;
1166 break;
1167 }
1168 }
1169 if (!match) {
1170 malloc_conf_error("Invalid conf value",
1171 k, klen, v, vlen);
1172 }
1173 continue;
1174 }
1175 CONF_HANDLE_BOOL(opt_background_thread,
1176 "background_thread");
1177 CONF_HANDLE_SIZE_T(opt_max_background_threads,
1178 "max_background_threads", 1,
1179 opt_max_background_threads, yes, yes,
1180 true);
1181 if (config_prof) {
1182 CONF_HANDLE_BOOL(opt_prof, "prof")
1183 CONF_HANDLE_CHAR_P(opt_prof_prefix,
1184 "prof_prefix", "jeprof")
1185 CONF_HANDLE_BOOL(opt_prof_active, "prof_active")
1186 CONF_HANDLE_BOOL(opt_prof_thread_active_init,
1187 "prof_thread_active_init")
1188 CONF_HANDLE_SIZE_T(opt_lg_prof_sample,
1189 "lg_prof_sample", 0, (sizeof(uint64_t) << 3)
1190 - 1, no, yes, true)
1191 CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum")
1192 CONF_HANDLE_SSIZE_T(opt_lg_prof_interval,
1193 "lg_prof_interval", -1,
1194 (sizeof(uint64_t) << 3) - 1)
1195 CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump")
1196 CONF_HANDLE_BOOL(opt_prof_final, "prof_final")
1197 CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak")
1198 }
1199 if (config_log) {
1200 if (CONF_MATCH("log")) {
1201 size_t cpylen = (
1202 vlen <= sizeof(log_var_names) ?
1203 vlen : sizeof(log_var_names) - 1);
1204 strncpy(log_var_names, v, cpylen);
1205 log_var_names[cpylen] = '\0';
1206 continue;
1207 }
1208 }
1209 if (CONF_MATCH("thp")) {
1210 bool match = false;
1211 for (int i = 0; i < thp_mode_names_limit; i++) {
1212 if (strncmp(thp_mode_names[i],v, vlen)
1213 == 0) {
1214 if (!have_madvise_huge) {
1215 malloc_conf_error(
1216 "No THP support",
1217 k, klen, v, vlen);
1218 }
1219 opt_thp = i;
1220 match = true;
1221 break;
1222 }
1223 }
1224 if (!match) {
1225 malloc_conf_error("Invalid conf value",
1226 k, klen, v, vlen);
1227 }
1228 continue;
1229 }
1230 malloc_conf_error("Invalid conf pair", k, klen, v,
1231 vlen);
1232 #undef CONF_MATCH
1233 #undef CONF_MATCH_VALUE
1234 #undef CONF_HANDLE_BOOL
1235 #undef CONF_MIN_no
1236 #undef CONF_MIN_yes
1237 #undef CONF_MAX_no
1238 #undef CONF_MAX_yes
1239 #undef CONF_HANDLE_T_U
1240 #undef CONF_HANDLE_UNSIGNED
1241 #undef CONF_HANDLE_SIZE_T
1242 #undef CONF_HANDLE_SSIZE_T
1243 #undef CONF_HANDLE_CHAR_P
1244 }
1245 if (opt_abort_conf && had_conf_error) {
1246 malloc_abort_invalid_conf();
1247 }
1248 }
1249 atomic_store_b(&log_init_done, true, ATOMIC_RELEASE);
1250 }
1251
1252 static bool
malloc_init_hard_needed(void)1253 malloc_init_hard_needed(void) {
1254 if (malloc_initialized() || (IS_INITIALIZER && malloc_init_state ==
1255 malloc_init_recursible)) {
1256 /*
1257 * Another thread initialized the allocator before this one
1258 * acquired init_lock, or this thread is the initializing
1259 * thread, and it is recursively allocating.
1260 */
1261 return false;
1262 }
1263 #ifdef JEMALLOC_THREADED_INIT
1264 if (malloc_initializer != NO_INITIALIZER && !IS_INITIALIZER) {
1265 /* Busy-wait until the initializing thread completes. */
1266 spin_t spinner = SPIN_INITIALIZER;
1267 do {
1268 malloc_mutex_unlock(TSDN_NULL, &init_lock);
1269 spin_adaptive(&spinner);
1270 malloc_mutex_lock(TSDN_NULL, &init_lock);
1271 } while (!malloc_initialized());
1272 return false;
1273 }
1274 #endif
1275 return true;
1276 }
1277
1278 static bool
malloc_init_hard_a0_locked()1279 malloc_init_hard_a0_locked() {
1280 malloc_initializer = INITIALIZER;
1281
1282 if (config_prof) {
1283 prof_boot0();
1284 }
1285 malloc_conf_init();
1286 if (opt_stats_print) {
1287 /* Print statistics at exit. */
1288 if (atexit(stats_print_atexit) != 0) {
1289 malloc_write("<jemalloc>: Error in atexit()\n");
1290 if (opt_abort) {
1291 abort();
1292 }
1293 }
1294 }
1295 if (pages_boot()) {
1296 return true;
1297 }
1298 if (base_boot(TSDN_NULL)) {
1299 return true;
1300 }
1301 if (extent_boot()) {
1302 return true;
1303 }
1304 if (ctl_boot()) {
1305 return true;
1306 }
1307 if (config_prof) {
1308 prof_boot1();
1309 }
1310 arena_boot();
1311 if (tcache_boot(TSDN_NULL)) {
1312 return true;
1313 }
1314 if (malloc_mutex_init(&arenas_lock, "arenas", WITNESS_RANK_ARENAS,
1315 malloc_mutex_rank_exclusive)) {
1316 return true;
1317 }
1318 /*
1319 * Create enough scaffolding to allow recursive allocation in
1320 * malloc_ncpus().
1321 */
1322 narenas_auto = 1;
1323 memset(arenas, 0, sizeof(arena_t *) * narenas_auto);
1324 /*
1325 * Initialize one arena here. The rest are lazily created in
1326 * arena_choose_hard().
1327 */
1328 if (arena_init(TSDN_NULL, 0, (extent_hooks_t *)&extent_hooks_default)
1329 == NULL) {
1330 return true;
1331 }
1332 a0 = arena_get(TSDN_NULL, 0, false);
1333 malloc_init_state = malloc_init_a0_initialized;
1334
1335 return false;
1336 }
1337
1338 static bool
malloc_init_hard_a0(void)1339 malloc_init_hard_a0(void) {
1340 bool ret;
1341
1342 malloc_mutex_lock(TSDN_NULL, &init_lock);
1343 ret = malloc_init_hard_a0_locked();
1344 malloc_mutex_unlock(TSDN_NULL, &init_lock);
1345 return ret;
1346 }
1347
1348 /* Initialize data structures which may trigger recursive allocation. */
1349 static bool
malloc_init_hard_recursible(void)1350 malloc_init_hard_recursible(void) {
1351 malloc_init_state = malloc_init_recursible;
1352
1353 ncpus = malloc_ncpus();
1354
1355 #if (defined(JEMALLOC_HAVE_PTHREAD_ATFORK) && !defined(JEMALLOC_MUTEX_INIT_CB) \
1356 && !defined(JEMALLOC_ZONE) && !defined(_WIN32) && \
1357 !defined(__native_client__))
1358 /* LinuxThreads' pthread_atfork() allocates. */
1359 if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent,
1360 jemalloc_postfork_child) != 0) {
1361 malloc_write("<jemalloc>: Error in pthread_atfork()\n");
1362 if (opt_abort) {
1363 abort();
1364 }
1365 return true;
1366 }
1367 #endif
1368
1369 if (background_thread_boot0()) {
1370 return true;
1371 }
1372
1373 return false;
1374 }
1375
1376 static unsigned
malloc_narenas_default(void)1377 malloc_narenas_default(void) {
1378 assert(ncpus > 0);
1379 /*
1380 * For SMP systems, create more than one arena per CPU by
1381 * default.
1382 */
1383 if (ncpus > 1) {
1384 return ncpus << 2;
1385 } else {
1386 return 1;
1387 }
1388 }
1389
1390 static percpu_arena_mode_t
percpu_arena_as_initialized(percpu_arena_mode_t mode)1391 percpu_arena_as_initialized(percpu_arena_mode_t mode) {
1392 assert(!malloc_initialized());
1393 assert(mode <= percpu_arena_disabled);
1394
1395 if (mode != percpu_arena_disabled) {
1396 mode += percpu_arena_mode_enabled_base;
1397 }
1398
1399 return mode;
1400 }
1401
1402 static bool
malloc_init_narenas(void)1403 malloc_init_narenas(void) {
1404 assert(ncpus > 0);
1405
1406 if (opt_percpu_arena != percpu_arena_disabled) {
1407 if (!have_percpu_arena || malloc_getcpu() < 0) {
1408 opt_percpu_arena = percpu_arena_disabled;
1409 malloc_printf("<jemalloc>: perCPU arena getcpu() not "
1410 "available. Setting narenas to %u.\n", opt_narenas ?
1411 opt_narenas : malloc_narenas_default());
1412 if (opt_abort) {
1413 abort();
1414 }
1415 } else {
1416 if (ncpus >= MALLOCX_ARENA_LIMIT) {
1417 malloc_printf("<jemalloc>: narenas w/ percpu"
1418 "arena beyond limit (%d)\n", ncpus);
1419 if (opt_abort) {
1420 abort();
1421 }
1422 return true;
1423 }
1424 /* NB: opt_percpu_arena isn't fully initialized yet. */
1425 if (percpu_arena_as_initialized(opt_percpu_arena) ==
1426 per_phycpu_arena && ncpus % 2 != 0) {
1427 malloc_printf("<jemalloc>: invalid "
1428 "configuration -- per physical CPU arena "
1429 "with odd number (%u) of CPUs (no hyper "
1430 "threading?).\n", ncpus);
1431 if (opt_abort)
1432 abort();
1433 }
1434 unsigned n = percpu_arena_ind_limit(
1435 percpu_arena_as_initialized(opt_percpu_arena));
1436 if (opt_narenas < n) {
1437 /*
1438 * If narenas is specified with percpu_arena
1439 * enabled, actual narenas is set as the greater
1440 * of the two. percpu_arena_choose will be free
1441 * to use any of the arenas based on CPU
1442 * id. This is conservative (at a small cost)
1443 * but ensures correctness.
1444 *
1445 * If for some reason the ncpus determined at
1446 * boot is not the actual number (e.g. because
1447 * of affinity setting from numactl), reserving
1448 * narenas this way provides a workaround for
1449 * percpu_arena.
1450 */
1451 opt_narenas = n;
1452 }
1453 }
1454 }
1455 if (opt_narenas == 0) {
1456 opt_narenas = malloc_narenas_default();
1457 }
1458 assert(opt_narenas > 0);
1459
1460 narenas_auto = opt_narenas;
1461 /*
1462 * Limit the number of arenas to the indexing range of MALLOCX_ARENA().
1463 */
1464 if (narenas_auto >= MALLOCX_ARENA_LIMIT) {
1465 narenas_auto = MALLOCX_ARENA_LIMIT - 1;
1466 malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n",
1467 narenas_auto);
1468 }
1469 narenas_total_set(narenas_auto);
1470
1471 return false;
1472 }
1473
1474 static void
malloc_init_percpu(void)1475 malloc_init_percpu(void) {
1476 opt_percpu_arena = percpu_arena_as_initialized(opt_percpu_arena);
1477 }
1478
1479 static bool
malloc_init_hard_finish(void)1480 malloc_init_hard_finish(void) {
1481 if (malloc_mutex_boot()) {
1482 return true;
1483 }
1484
1485 malloc_init_state = malloc_init_initialized;
1486 malloc_slow_flag_init();
1487
1488 return false;
1489 }
1490
1491 static void
malloc_init_hard_cleanup(tsdn_t * tsdn,bool reentrancy_set)1492 malloc_init_hard_cleanup(tsdn_t *tsdn, bool reentrancy_set) {
1493 malloc_mutex_assert_owner(tsdn, &init_lock);
1494 malloc_mutex_unlock(tsdn, &init_lock);
1495 if (reentrancy_set) {
1496 assert(!tsdn_null(tsdn));
1497 tsd_t *tsd = tsdn_tsd(tsdn);
1498 assert(tsd_reentrancy_level_get(tsd) > 0);
1499 post_reentrancy(tsd);
1500 }
1501 }
1502
1503 static bool
malloc_init_hard(void)1504 malloc_init_hard(void) {
1505 tsd_t *tsd;
1506
1507 #if defined(_WIN32) && _WIN32_WINNT < 0x0600
1508 _init_init_lock();
1509 #endif
1510 malloc_mutex_lock(TSDN_NULL, &init_lock);
1511
1512 #define UNLOCK_RETURN(tsdn, ret, reentrancy) \
1513 malloc_init_hard_cleanup(tsdn, reentrancy); \
1514 return ret;
1515
1516 if (!malloc_init_hard_needed()) {
1517 UNLOCK_RETURN(TSDN_NULL, false, false)
1518 }
1519
1520 if (malloc_init_state != malloc_init_a0_initialized &&
1521 malloc_init_hard_a0_locked()) {
1522 UNLOCK_RETURN(TSDN_NULL, true, false)
1523 }
1524
1525 malloc_mutex_unlock(TSDN_NULL, &init_lock);
1526 /* Recursive allocation relies on functional tsd. */
1527 tsd = malloc_tsd_boot0();
1528 if (tsd == NULL) {
1529 return true;
1530 }
1531 if (malloc_init_hard_recursible()) {
1532 return true;
1533 }
1534
1535 malloc_mutex_lock(tsd_tsdn(tsd), &init_lock);
1536 /* Set reentrancy level to 1 during init. */
1537 pre_reentrancy(tsd, NULL);
1538 /* Initialize narenas before prof_boot2 (for allocation). */
1539 if (malloc_init_narenas() || background_thread_boot1(tsd_tsdn(tsd))) {
1540 UNLOCK_RETURN(tsd_tsdn(tsd), true, true)
1541 }
1542 if (config_prof && prof_boot2(tsd)) {
1543 UNLOCK_RETURN(tsd_tsdn(tsd), true, true)
1544 }
1545
1546 malloc_init_percpu();
1547
1548 if (malloc_init_hard_finish()) {
1549 UNLOCK_RETURN(tsd_tsdn(tsd), true, true)
1550 }
1551 post_reentrancy(tsd);
1552 malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock);
1553
1554 witness_assert_lockless(witness_tsd_tsdn(
1555 tsd_witness_tsdp_get_unsafe(tsd)));
1556 malloc_tsd_boot1();
1557 /* Update TSD after tsd_boot1. */
1558 tsd = tsd_fetch();
1559 if (opt_background_thread) {
1560 assert(have_background_thread);
1561 /*
1562 * Need to finish init & unlock first before creating background
1563 * threads (pthread_create depends on malloc). ctl_init (which
1564 * sets isthreaded) needs to be called without holding any lock.
1565 */
1566 background_thread_ctl_init(tsd_tsdn(tsd));
1567
1568 malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock);
1569 bool err = background_thread_create(tsd, 0);
1570 malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock);
1571 if (err) {
1572 return true;
1573 }
1574 }
1575 #undef UNLOCK_RETURN
1576 return false;
1577 }
1578
1579 /*
1580 * End initialization functions.
1581 */
1582 /******************************************************************************/
1583 /*
1584 * Begin allocation-path internal functions and data structures.
1585 */
1586
1587 /*
1588 * Settings determined by the documented behavior of the allocation functions.
1589 */
1590 typedef struct static_opts_s static_opts_t;
1591 struct static_opts_s {
1592 /* Whether or not allocation size may overflow. */
1593 bool may_overflow;
1594 /* Whether or not allocations of size 0 should be treated as size 1. */
1595 bool bump_empty_alloc;
1596 /*
1597 * Whether to assert that allocations are not of size 0 (after any
1598 * bumping).
1599 */
1600 bool assert_nonempty_alloc;
1601
1602 /*
1603 * Whether or not to modify the 'result' argument to malloc in case of
1604 * error.
1605 */
1606 bool null_out_result_on_error;
1607 /* Whether to set errno when we encounter an error condition. */
1608 bool set_errno_on_error;
1609
1610 /*
1611 * The minimum valid alignment for functions requesting aligned storage.
1612 */
1613 size_t min_alignment;
1614
1615 /* The error string to use if we oom. */
1616 const char *oom_string;
1617 /* The error string to use if the passed-in alignment is invalid. */
1618 const char *invalid_alignment_string;
1619
1620 /*
1621 * False if we're configured to skip some time-consuming operations.
1622 *
1623 * This isn't really a malloc "behavior", but it acts as a useful
1624 * summary of several other static (or at least, static after program
1625 * initialization) options.
1626 */
1627 bool slow;
1628 };
1629
1630 JEMALLOC_ALWAYS_INLINE void
static_opts_init(static_opts_t * static_opts)1631 static_opts_init(static_opts_t *static_opts) {
1632 static_opts->may_overflow = false;
1633 static_opts->bump_empty_alloc = false;
1634 static_opts->assert_nonempty_alloc = false;
1635 static_opts->null_out_result_on_error = false;
1636 static_opts->set_errno_on_error = false;
1637 static_opts->min_alignment = 0;
1638 static_opts->oom_string = "";
1639 static_opts->invalid_alignment_string = "";
1640 static_opts->slow = false;
1641 }
1642
1643 /*
1644 * These correspond to the macros in jemalloc/jemalloc_macros.h. Broadly, we
1645 * should have one constant here per magic value there. Note however that the
1646 * representations need not be related.
1647 */
1648 #define TCACHE_IND_NONE ((unsigned)-1)
1649 #define TCACHE_IND_AUTOMATIC ((unsigned)-2)
1650 #define ARENA_IND_AUTOMATIC ((unsigned)-1)
1651
1652 typedef struct dynamic_opts_s dynamic_opts_t;
1653 struct dynamic_opts_s {
1654 void **result;
1655 size_t num_items;
1656 size_t item_size;
1657 size_t alignment;
1658 bool zero;
1659 unsigned tcache_ind;
1660 unsigned arena_ind;
1661 };
1662
1663 JEMALLOC_ALWAYS_INLINE void
dynamic_opts_init(dynamic_opts_t * dynamic_opts)1664 dynamic_opts_init(dynamic_opts_t *dynamic_opts) {
1665 dynamic_opts->result = NULL;
1666 dynamic_opts->num_items = 0;
1667 dynamic_opts->item_size = 0;
1668 dynamic_opts->alignment = 0;
1669 dynamic_opts->zero = false;
1670 dynamic_opts->tcache_ind = TCACHE_IND_AUTOMATIC;
1671 dynamic_opts->arena_ind = ARENA_IND_AUTOMATIC;
1672 }
1673
1674 /* ind is ignored if dopts->alignment > 0. */
1675 JEMALLOC_ALWAYS_INLINE void *
imalloc_no_sample(static_opts_t * sopts,dynamic_opts_t * dopts,tsd_t * tsd,size_t size,size_t usize,szind_t ind)1676 imalloc_no_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd,
1677 size_t size, size_t usize, szind_t ind) {
1678 tcache_t *tcache;
1679 arena_t *arena;
1680
1681 /* Fill in the tcache. */
1682 if (dopts->tcache_ind == TCACHE_IND_AUTOMATIC) {
1683 if (likely(!sopts->slow)) {
1684 /* Getting tcache ptr unconditionally. */
1685 tcache = tsd_tcachep_get(tsd);
1686 assert(tcache == tcache_get(tsd));
1687 } else {
1688 tcache = tcache_get(tsd);
1689 }
1690 } else if (dopts->tcache_ind == TCACHE_IND_NONE) {
1691 tcache = NULL;
1692 } else {
1693 tcache = tcaches_get(tsd, dopts->tcache_ind);
1694 }
1695
1696 /* Fill in the arena. */
1697 if (dopts->arena_ind == ARENA_IND_AUTOMATIC) {
1698 /*
1699 * In case of automatic arena management, we defer arena
1700 * computation until as late as we can, hoping to fill the
1701 * allocation out of the tcache.
1702 */
1703 arena = NULL;
1704 } else {
1705 arena = arena_get(tsd_tsdn(tsd), dopts->arena_ind, true);
1706 }
1707
1708 if (unlikely(dopts->alignment != 0)) {
1709 return ipalloct(tsd_tsdn(tsd), usize, dopts->alignment,
1710 dopts->zero, tcache, arena);
1711 }
1712
1713 return iallocztm(tsd_tsdn(tsd), size, ind, dopts->zero, tcache, false,
1714 arena, sopts->slow);
1715 }
1716
1717 JEMALLOC_ALWAYS_INLINE void *
imalloc_sample(static_opts_t * sopts,dynamic_opts_t * dopts,tsd_t * tsd,size_t usize,szind_t ind)1718 imalloc_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd,
1719 size_t usize, szind_t ind) {
1720 void *ret;
1721
1722 /*
1723 * For small allocations, sampling bumps the usize. If so, we allocate
1724 * from the ind_large bucket.
1725 */
1726 szind_t ind_large;
1727 size_t bumped_usize = usize;
1728
1729 if (usize <= SMALL_MAXCLASS) {
1730 assert(((dopts->alignment == 0) ? sz_s2u(LARGE_MINCLASS) :
1731 sz_sa2u(LARGE_MINCLASS, dopts->alignment))
1732 == LARGE_MINCLASS);
1733 ind_large = sz_size2index(LARGE_MINCLASS);
1734 bumped_usize = sz_s2u(LARGE_MINCLASS);
1735 ret = imalloc_no_sample(sopts, dopts, tsd, bumped_usize,
1736 bumped_usize, ind_large);
1737 if (unlikely(ret == NULL)) {
1738 return NULL;
1739 }
1740 arena_prof_promote(tsd_tsdn(tsd), ret, usize);
1741 } else {
1742 ret = imalloc_no_sample(sopts, dopts, tsd, usize, usize, ind);
1743 }
1744
1745 return ret;
1746 }
1747
1748 /*
1749 * Returns true if the allocation will overflow, and false otherwise. Sets
1750 * *size to the product either way.
1751 */
1752 JEMALLOC_ALWAYS_INLINE bool
compute_size_with_overflow(bool may_overflow,dynamic_opts_t * dopts,size_t * size)1753 compute_size_with_overflow(bool may_overflow, dynamic_opts_t *dopts,
1754 size_t *size) {
1755 /*
1756 * This function is just num_items * item_size, except that we may have
1757 * to check for overflow.
1758 */
1759
1760 if (!may_overflow) {
1761 assert(dopts->num_items == 1);
1762 *size = dopts->item_size;
1763 return false;
1764 }
1765
1766 /* A size_t with its high-half bits all set to 1. */
1767 static const size_t high_bits = SIZE_T_MAX << (sizeof(size_t) * 8 / 2);
1768
1769 *size = dopts->item_size * dopts->num_items;
1770
1771 if (unlikely(*size == 0)) {
1772 return (dopts->num_items != 0 && dopts->item_size != 0);
1773 }
1774
1775 /*
1776 * We got a non-zero size, but we don't know if we overflowed to get
1777 * there. To avoid having to do a divide, we'll be clever and note that
1778 * if both A and B can be represented in N/2 bits, then their product
1779 * can be represented in N bits (without the possibility of overflow).
1780 */
1781 if (likely((high_bits & (dopts->num_items | dopts->item_size)) == 0)) {
1782 return false;
1783 }
1784 if (likely(*size / dopts->item_size == dopts->num_items)) {
1785 return false;
1786 }
1787 return true;
1788 }
1789
1790 JEMALLOC_ALWAYS_INLINE int
imalloc_body(static_opts_t * sopts,dynamic_opts_t * dopts,tsd_t * tsd)1791 imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd) {
1792 /* Where the actual allocated memory will live. */
1793 void *allocation = NULL;
1794 /* Filled in by compute_size_with_overflow below. */
1795 size_t size = 0;
1796 /*
1797 * For unaligned allocations, we need only ind. For aligned
1798 * allocations, or in case of stats or profiling we need usize.
1799 *
1800 * These are actually dead stores, in that their values are reset before
1801 * any branch on their value is taken. Sometimes though, it's
1802 * convenient to pass them as arguments before this point. To avoid
1803 * undefined behavior then, we initialize them with dummy stores.
1804 */
1805 szind_t ind = 0;
1806 size_t usize = 0;
1807
1808 /* Reentrancy is only checked on slow path. */
1809 int8_t reentrancy_level;
1810
1811 /* Compute the amount of memory the user wants. */
1812 if (unlikely(compute_size_with_overflow(sopts->may_overflow, dopts,
1813 &size))) {
1814 goto label_oom;
1815 }
1816
1817 /* Validate the user input. */
1818 if (sopts->bump_empty_alloc) {
1819 if (unlikely(size == 0)) {
1820 size = 1;
1821 }
1822 }
1823
1824 if (sopts->assert_nonempty_alloc) {
1825 assert (size != 0);
1826 }
1827
1828 if (unlikely(dopts->alignment < sopts->min_alignment
1829 || (dopts->alignment & (dopts->alignment - 1)) != 0)) {
1830 goto label_invalid_alignment;
1831 }
1832
1833 /* This is the beginning of the "core" algorithm. */
1834
1835 if (dopts->alignment == 0) {
1836 ind = sz_size2index(size);
1837 if (unlikely(ind >= NSIZES)) {
1838 goto label_oom;
1839 }
1840 if (config_stats || (config_prof && opt_prof)) {
1841 usize = sz_index2size(ind);
1842 assert(usize > 0 && usize <= LARGE_MAXCLASS);
1843 }
1844 } else {
1845 usize = sz_sa2u(size, dopts->alignment);
1846 if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
1847 goto label_oom;
1848 }
1849 }
1850
1851 check_entry_exit_locking(tsd_tsdn(tsd));
1852
1853 /*
1854 * If we need to handle reentrancy, we can do it out of a
1855 * known-initialized arena (i.e. arena 0).
1856 */
1857 reentrancy_level = tsd_reentrancy_level_get(tsd);
1858 if (sopts->slow && unlikely(reentrancy_level > 0)) {
1859 /*
1860 * We should never specify particular arenas or tcaches from
1861 * within our internal allocations.
1862 */
1863 assert(dopts->tcache_ind == TCACHE_IND_AUTOMATIC ||
1864 dopts->tcache_ind == TCACHE_IND_NONE);
1865 assert(dopts->arena_ind == ARENA_IND_AUTOMATIC);
1866 dopts->tcache_ind = TCACHE_IND_NONE;
1867 /* We know that arena 0 has already been initialized. */
1868 dopts->arena_ind = 0;
1869 }
1870
1871 /* If profiling is on, get our profiling context. */
1872 if (config_prof && opt_prof) {
1873 /*
1874 * Note that if we're going down this path, usize must have been
1875 * initialized in the previous if statement.
1876 */
1877 prof_tctx_t *tctx = prof_alloc_prep(
1878 tsd, usize, prof_active_get_unlocked(), true);
1879
1880 alloc_ctx_t alloc_ctx;
1881 if (likely((uintptr_t)tctx == (uintptr_t)1U)) {
1882 alloc_ctx.slab = (usize <= SMALL_MAXCLASS);
1883 allocation = imalloc_no_sample(
1884 sopts, dopts, tsd, usize, usize, ind);
1885 } else if ((uintptr_t)tctx > (uintptr_t)1U) {
1886 /*
1887 * Note that ind might still be 0 here. This is fine;
1888 * imalloc_sample ignores ind if dopts->alignment > 0.
1889 */
1890 allocation = imalloc_sample(
1891 sopts, dopts, tsd, usize, ind);
1892 alloc_ctx.slab = false;
1893 } else {
1894 allocation = NULL;
1895 }
1896
1897 if (unlikely(allocation == NULL)) {
1898 prof_alloc_rollback(tsd, tctx, true);
1899 goto label_oom;
1900 }
1901 prof_malloc(tsd_tsdn(tsd), allocation, usize, &alloc_ctx, tctx);
1902 } else {
1903 /*
1904 * If dopts->alignment > 0, then ind is still 0, but usize was
1905 * computed in the previous if statement. Down the positive
1906 * alignment path, imalloc_no_sample ignores ind and size
1907 * (relying only on usize).
1908 */
1909 allocation = imalloc_no_sample(sopts, dopts, tsd, size, usize,
1910 ind);
1911 if (unlikely(allocation == NULL)) {
1912 goto label_oom;
1913 }
1914 }
1915
1916 /*
1917 * Allocation has been done at this point. We still have some
1918 * post-allocation work to do though.
1919 */
1920 assert(dopts->alignment == 0
1921 || ((uintptr_t)allocation & (dopts->alignment - 1)) == ZU(0));
1922
1923 if (config_stats) {
1924 assert(usize == isalloc(tsd_tsdn(tsd), allocation));
1925 *tsd_thread_allocatedp_get(tsd) += usize;
1926 }
1927
1928 if (sopts->slow) {
1929 UTRACE(0, size, allocation);
1930 }
1931
1932 /* Success! */
1933 check_entry_exit_locking(tsd_tsdn(tsd));
1934 *dopts->result = allocation;
1935 return 0;
1936
1937 label_oom:
1938 if (unlikely(sopts->slow) && config_xmalloc && unlikely(opt_xmalloc)) {
1939 malloc_write(sopts->oom_string);
1940 abort();
1941 }
1942
1943 if (sopts->slow) {
1944 UTRACE(NULL, size, NULL);
1945 }
1946
1947 check_entry_exit_locking(tsd_tsdn(tsd));
1948
1949 if (sopts->set_errno_on_error) {
1950 set_errno(ENOMEM);
1951 }
1952
1953 if (sopts->null_out_result_on_error) {
1954 *dopts->result = NULL;
1955 }
1956
1957 return ENOMEM;
1958
1959 /*
1960 * This label is only jumped to by one goto; we move it out of line
1961 * anyways to avoid obscuring the non-error paths, and for symmetry with
1962 * the oom case.
1963 */
1964 label_invalid_alignment:
1965 if (config_xmalloc && unlikely(opt_xmalloc)) {
1966 malloc_write(sopts->invalid_alignment_string);
1967 abort();
1968 }
1969
1970 if (sopts->set_errno_on_error) {
1971 set_errno(EINVAL);
1972 }
1973
1974 if (sopts->slow) {
1975 UTRACE(NULL, size, NULL);
1976 }
1977
1978 check_entry_exit_locking(tsd_tsdn(tsd));
1979
1980 if (sopts->null_out_result_on_error) {
1981 *dopts->result = NULL;
1982 }
1983
1984 return EINVAL;
1985 }
1986
1987 /* Returns the errno-style error code of the allocation. */
1988 JEMALLOC_ALWAYS_INLINE int
imalloc(static_opts_t * sopts,dynamic_opts_t * dopts)1989 imalloc(static_opts_t *sopts, dynamic_opts_t *dopts) {
1990 if (unlikely(!malloc_initialized()) && unlikely(malloc_init())) {
1991 if (config_xmalloc && unlikely(opt_xmalloc)) {
1992 malloc_write(sopts->oom_string);
1993 abort();
1994 }
1995 UTRACE(NULL, dopts->num_items * dopts->item_size, NULL);
1996 set_errno(ENOMEM);
1997 *dopts->result = NULL;
1998
1999 return ENOMEM;
2000 }
2001
2002 /* We always need the tsd. Let's grab it right away. */
2003 tsd_t *tsd = tsd_fetch();
2004 assert(tsd);
2005 if (likely(tsd_fast(tsd))) {
2006 /* Fast and common path. */
2007 tsd_assert_fast(tsd);
2008 sopts->slow = false;
2009 return imalloc_body(sopts, dopts, tsd);
2010 } else {
2011 sopts->slow = true;
2012 return imalloc_body(sopts, dopts, tsd);
2013 }
2014 }
2015 /******************************************************************************/
2016 /*
2017 * Begin malloc(3)-compatible functions.
2018 */
2019
2020 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2021 void JEMALLOC_NOTHROW *
JEMALLOC_ATTR(malloc)2022 JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1)
2023 je_malloc(size_t size) {
2024 void *ret;
2025 static_opts_t sopts;
2026 dynamic_opts_t dopts;
2027
2028 LOG("core.malloc.entry", "size: %zu", size);
2029
2030 static_opts_init(&sopts);
2031 dynamic_opts_init(&dopts);
2032
2033 sopts.bump_empty_alloc = true;
2034 sopts.null_out_result_on_error = true;
2035 sopts.set_errno_on_error = true;
2036 sopts.oom_string = "<jemalloc>: Error in malloc(): out of memory\n";
2037
2038 dopts.result = &ret;
2039 dopts.num_items = 1;
2040 dopts.item_size = size;
2041
2042 imalloc(&sopts, &dopts);
2043
2044 LOG("core.malloc.exit", "result: %p", ret);
2045
2046 return ret;
2047 }
2048
2049 JEMALLOC_EXPORT int JEMALLOC_NOTHROW
2050 JEMALLOC_ATTR(nonnull(1))
je_posix_memalign(void ** memptr,size_t alignment,size_t size)2051 je_posix_memalign(void **memptr, size_t alignment, size_t size) {
2052 int ret;
2053 static_opts_t sopts;
2054 dynamic_opts_t dopts;
2055
2056 LOG("core.posix_memalign.entry", "mem ptr: %p, alignment: %zu, "
2057 "size: %zu", memptr, alignment, size);
2058
2059 static_opts_init(&sopts);
2060 dynamic_opts_init(&dopts);
2061
2062 sopts.bump_empty_alloc = true;
2063 sopts.min_alignment = sizeof(void *);
2064 sopts.oom_string =
2065 "<jemalloc>: Error allocating aligned memory: out of memory\n";
2066 sopts.invalid_alignment_string =
2067 "<jemalloc>: Error allocating aligned memory: invalid alignment\n";
2068
2069 dopts.result = memptr;
2070 dopts.num_items = 1;
2071 dopts.item_size = size;
2072 dopts.alignment = alignment;
2073
2074 ret = imalloc(&sopts, &dopts);
2075
2076 LOG("core.posix_memalign.exit", "result: %d, alloc ptr: %p", ret,
2077 *memptr);
2078
2079 return ret;
2080 }
2081
2082 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2083 void JEMALLOC_NOTHROW *
JEMALLOC_ATTR(malloc)2084 JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(2)
2085 je_aligned_alloc(size_t alignment, size_t size) {
2086 void *ret;
2087
2088 static_opts_t sopts;
2089 dynamic_opts_t dopts;
2090
2091 LOG("core.aligned_alloc.entry", "alignment: %zu, size: %zu\n",
2092 alignment, size);
2093
2094 static_opts_init(&sopts);
2095 dynamic_opts_init(&dopts);
2096
2097 sopts.bump_empty_alloc = true;
2098 sopts.null_out_result_on_error = true;
2099 sopts.set_errno_on_error = true;
2100 sopts.min_alignment = 1;
2101 sopts.oom_string =
2102 "<jemalloc>: Error allocating aligned memory: out of memory\n";
2103 sopts.invalid_alignment_string =
2104 "<jemalloc>: Error allocating aligned memory: invalid alignment\n";
2105
2106 dopts.result = &ret;
2107 dopts.num_items = 1;
2108 dopts.item_size = size;
2109 dopts.alignment = alignment;
2110
2111 imalloc(&sopts, &dopts);
2112
2113 LOG("core.aligned_alloc.exit", "result: %p", ret);
2114
2115 return ret;
2116 }
2117
2118 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2119 void JEMALLOC_NOTHROW *
JEMALLOC_ATTR(malloc)2120 JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2)
2121 je_calloc(size_t num, size_t size) {
2122 void *ret;
2123 static_opts_t sopts;
2124 dynamic_opts_t dopts;
2125
2126 LOG("core.calloc.entry", "num: %zu, size: %zu\n", num, size);
2127
2128 static_opts_init(&sopts);
2129 dynamic_opts_init(&dopts);
2130
2131 sopts.may_overflow = true;
2132 sopts.bump_empty_alloc = true;
2133 sopts.null_out_result_on_error = true;
2134 sopts.set_errno_on_error = true;
2135 sopts.oom_string = "<jemalloc>: Error in calloc(): out of memory\n";
2136
2137 dopts.result = &ret;
2138 dopts.num_items = num;
2139 dopts.item_size = size;
2140 dopts.zero = true;
2141
2142 imalloc(&sopts, &dopts);
2143
2144 LOG("core.calloc.exit", "result: %p", ret);
2145
2146 return ret;
2147 }
2148
2149 static void *
irealloc_prof_sample(tsd_t * tsd,void * old_ptr,size_t old_usize,size_t usize,prof_tctx_t * tctx)2150 irealloc_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize,
2151 prof_tctx_t *tctx) {
2152 void *p;
2153
2154 if (tctx == NULL) {
2155 return NULL;
2156 }
2157 if (usize <= SMALL_MAXCLASS) {
2158 p = iralloc(tsd, old_ptr, old_usize, LARGE_MINCLASS, 0, false);
2159 if (p == NULL) {
2160 return NULL;
2161 }
2162 arena_prof_promote(tsd_tsdn(tsd), p, usize);
2163 } else {
2164 p = iralloc(tsd, old_ptr, old_usize, usize, 0, false);
2165 }
2166
2167 return p;
2168 }
2169
2170 JEMALLOC_ALWAYS_INLINE void *
irealloc_prof(tsd_t * tsd,void * old_ptr,size_t old_usize,size_t usize,alloc_ctx_t * alloc_ctx)2171 irealloc_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize,
2172 alloc_ctx_t *alloc_ctx) {
2173 void *p;
2174 bool prof_active;
2175 prof_tctx_t *old_tctx, *tctx;
2176
2177 prof_active = prof_active_get_unlocked();
2178 old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr, alloc_ctx);
2179 tctx = prof_alloc_prep(tsd, usize, prof_active, true);
2180 if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
2181 p = irealloc_prof_sample(tsd, old_ptr, old_usize, usize, tctx);
2182 } else {
2183 p = iralloc(tsd, old_ptr, old_usize, usize, 0, false);
2184 }
2185 if (unlikely(p == NULL)) {
2186 prof_alloc_rollback(tsd, tctx, true);
2187 return NULL;
2188 }
2189 prof_realloc(tsd, p, usize, tctx, prof_active, true, old_ptr, old_usize,
2190 old_tctx);
2191
2192 return p;
2193 }
2194
2195 JEMALLOC_ALWAYS_INLINE void
ifree(tsd_t * tsd,void * ptr,tcache_t * tcache,bool slow_path)2196 ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) {
2197 if (!slow_path) {
2198 tsd_assert_fast(tsd);
2199 }
2200 check_entry_exit_locking(tsd_tsdn(tsd));
2201 if (tsd_reentrancy_level_get(tsd) != 0) {
2202 assert(slow_path);
2203 }
2204
2205 assert(ptr != NULL);
2206 assert(malloc_initialized() || IS_INITIALIZER);
2207
2208 alloc_ctx_t alloc_ctx;
2209 rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
2210 rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
2211 (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
2212 assert(alloc_ctx.szind != NSIZES);
2213
2214 size_t usize;
2215 if (config_prof && opt_prof) {
2216 usize = sz_index2size(alloc_ctx.szind);
2217 prof_free(tsd, ptr, usize, &alloc_ctx);
2218 } else if (config_stats) {
2219 usize = sz_index2size(alloc_ctx.szind);
2220 }
2221 if (config_stats) {
2222 *tsd_thread_deallocatedp_get(tsd) += usize;
2223 }
2224
2225 if (likely(!slow_path)) {
2226 idalloctm(tsd_tsdn(tsd), ptr, tcache, &alloc_ctx, false,
2227 false);
2228 } else {
2229 idalloctm(tsd_tsdn(tsd), ptr, tcache, &alloc_ctx, false,
2230 true);
2231 }
2232 }
2233
2234 JEMALLOC_ALWAYS_INLINE void
isfree(tsd_t * tsd,void * ptr,size_t usize,tcache_t * tcache,bool slow_path)2235 isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path) {
2236 if (!slow_path) {
2237 tsd_assert_fast(tsd);
2238 }
2239 check_entry_exit_locking(tsd_tsdn(tsd));
2240 if (tsd_reentrancy_level_get(tsd) != 0) {
2241 assert(slow_path);
2242 }
2243
2244 assert(ptr != NULL);
2245 assert(malloc_initialized() || IS_INITIALIZER);
2246
2247 alloc_ctx_t alloc_ctx, *ctx;
2248 if (!config_cache_oblivious && ((uintptr_t)ptr & PAGE_MASK) != 0) {
2249 /*
2250 * When cache_oblivious is disabled and ptr is not page aligned,
2251 * the allocation was not sampled -- usize can be used to
2252 * determine szind directly.
2253 */
2254 alloc_ctx.szind = sz_size2index(usize);
2255 alloc_ctx.slab = true;
2256 ctx = &alloc_ctx;
2257 if (config_debug) {
2258 alloc_ctx_t dbg_ctx;
2259 rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
2260 rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree,
2261 rtree_ctx, (uintptr_t)ptr, true, &dbg_ctx.szind,
2262 &dbg_ctx.slab);
2263 assert(dbg_ctx.szind == alloc_ctx.szind);
2264 assert(dbg_ctx.slab == alloc_ctx.slab);
2265 }
2266 } else if (config_prof && opt_prof) {
2267 rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
2268 rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
2269 (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
2270 assert(alloc_ctx.szind == sz_size2index(usize));
2271 ctx = &alloc_ctx;
2272 } else {
2273 ctx = NULL;
2274 }
2275
2276 if (config_prof && opt_prof) {
2277 prof_free(tsd, ptr, usize, ctx);
2278 }
2279 if (config_stats) {
2280 *tsd_thread_deallocatedp_get(tsd) += usize;
2281 }
2282
2283 if (likely(!slow_path)) {
2284 isdalloct(tsd_tsdn(tsd), ptr, usize, tcache, ctx, false);
2285 } else {
2286 isdalloct(tsd_tsdn(tsd), ptr, usize, tcache, ctx, true);
2287 }
2288 }
2289
2290 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2291 void JEMALLOC_NOTHROW *
2292 JEMALLOC_ALLOC_SIZE(2)
je_realloc(void * ptr,size_t size)2293 je_realloc(void *ptr, size_t size) {
2294 void *ret;
2295 tsdn_t *tsdn JEMALLOC_CC_SILENCE_INIT(NULL);
2296 size_t usize JEMALLOC_CC_SILENCE_INIT(0);
2297 size_t old_usize = 0;
2298
2299 LOG("core.realloc.entry", "ptr: %p, size: %zu\n", ptr, size);
2300
2301 if (unlikely(size == 0)) {
2302 if (ptr != NULL) {
2303 /* realloc(ptr, 0) is equivalent to free(ptr). */
2304 UTRACE(ptr, 0, 0);
2305 tcache_t *tcache;
2306 tsd_t *tsd = tsd_fetch();
2307 if (tsd_reentrancy_level_get(tsd) == 0) {
2308 tcache = tcache_get(tsd);
2309 } else {
2310 tcache = NULL;
2311 }
2312 ifree(tsd, ptr, tcache, true);
2313
2314 LOG("core.realloc.exit", "result: %p", NULL);
2315 return NULL;
2316 }
2317 size = 1;
2318 }
2319
2320 if (likely(ptr != NULL)) {
2321 assert(malloc_initialized() || IS_INITIALIZER);
2322 tsd_t *tsd = tsd_fetch();
2323
2324 check_entry_exit_locking(tsd_tsdn(tsd));
2325
2326 alloc_ctx_t alloc_ctx;
2327 rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
2328 rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
2329 (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
2330 assert(alloc_ctx.szind != NSIZES);
2331 old_usize = sz_index2size(alloc_ctx.szind);
2332 assert(old_usize == isalloc(tsd_tsdn(tsd), ptr));
2333 if (config_prof && opt_prof) {
2334 usize = sz_s2u(size);
2335 ret = unlikely(usize == 0 || usize > LARGE_MAXCLASS) ?
2336 NULL : irealloc_prof(tsd, ptr, old_usize, usize,
2337 &alloc_ctx);
2338 } else {
2339 if (config_stats) {
2340 usize = sz_s2u(size);
2341 }
2342 ret = iralloc(tsd, ptr, old_usize, size, 0, false);
2343 }
2344 tsdn = tsd_tsdn(tsd);
2345 } else {
2346 /* realloc(NULL, size) is equivalent to malloc(size). */
2347 void *ret = je_malloc(size);
2348 LOG("core.realloc.exit", "result: %p", ret);
2349 return ret;
2350 }
2351
2352 if (unlikely(ret == NULL)) {
2353 if (config_xmalloc && unlikely(opt_xmalloc)) {
2354 malloc_write("<jemalloc>: Error in realloc(): "
2355 "out of memory\n");
2356 abort();
2357 }
2358 set_errno(ENOMEM);
2359 }
2360 if (config_stats && likely(ret != NULL)) {
2361 tsd_t *tsd;
2362
2363 assert(usize == isalloc(tsdn, ret));
2364 tsd = tsdn_tsd(tsdn);
2365 *tsd_thread_allocatedp_get(tsd) += usize;
2366 *tsd_thread_deallocatedp_get(tsd) += old_usize;
2367 }
2368 UTRACE(ptr, size, ret);
2369 check_entry_exit_locking(tsdn);
2370
2371 LOG("core.realloc.exit", "result: %p", ret);
2372 return ret;
2373 }
2374
2375 JEMALLOC_EXPORT void JEMALLOC_NOTHROW
je_free(void * ptr)2376 je_free(void *ptr) {
2377 LOG("core.free.entry", "ptr: %p", ptr);
2378
2379 UTRACE(ptr, 0, 0);
2380 if (likely(ptr != NULL)) {
2381 /*
2382 * We avoid setting up tsd fully (e.g. tcache, arena binding)
2383 * based on only free() calls -- other activities trigger the
2384 * minimal to full transition. This is because free() may
2385 * happen during thread shutdown after tls deallocation: if a
2386 * thread never had any malloc activities until then, a
2387 * fully-setup tsd won't be destructed properly.
2388 */
2389 tsd_t *tsd = tsd_fetch_min();
2390 check_entry_exit_locking(tsd_tsdn(tsd));
2391
2392 tcache_t *tcache;
2393 if (likely(tsd_fast(tsd))) {
2394 tsd_assert_fast(tsd);
2395 /* Unconditionally get tcache ptr on fast path. */
2396 tcache = tsd_tcachep_get(tsd);
2397 ifree(tsd, ptr, tcache, false);
2398 } else {
2399 if (likely(tsd_reentrancy_level_get(tsd) == 0)) {
2400 tcache = tcache_get(tsd);
2401 } else {
2402 tcache = NULL;
2403 }
2404 ifree(tsd, ptr, tcache, true);
2405 }
2406 check_entry_exit_locking(tsd_tsdn(tsd));
2407 }
2408 LOG("core.free.exit", "");
2409 }
2410
2411 /*
2412 * End malloc(3)-compatible functions.
2413 */
2414 /******************************************************************************/
2415 /*
2416 * Begin non-standard override functions.
2417 */
2418
2419 #ifdef JEMALLOC_OVERRIDE_MEMALIGN
2420 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2421 void JEMALLOC_NOTHROW *
JEMALLOC_ATTR(malloc)2422 JEMALLOC_ATTR(malloc)
2423 je_memalign(size_t alignment, size_t size) {
2424 void *ret;
2425 static_opts_t sopts;
2426 dynamic_opts_t dopts;
2427
2428 LOG("core.memalign.entry", "alignment: %zu, size: %zu\n", alignment,
2429 size);
2430
2431 static_opts_init(&sopts);
2432 dynamic_opts_init(&dopts);
2433
2434 sopts.bump_empty_alloc = true;
2435 sopts.min_alignment = 1;
2436 sopts.oom_string =
2437 "<jemalloc>: Error allocating aligned memory: out of memory\n";
2438 sopts.invalid_alignment_string =
2439 "<jemalloc>: Error allocating aligned memory: invalid alignment\n";
2440 sopts.null_out_result_on_error = true;
2441
2442 dopts.result = &ret;
2443 dopts.num_items = 1;
2444 dopts.item_size = size;
2445 dopts.alignment = alignment;
2446
2447 imalloc(&sopts, &dopts);
2448
2449 LOG("core.memalign.exit", "result: %p", ret);
2450 return ret;
2451 }
2452 #endif
2453
2454 #ifdef JEMALLOC_OVERRIDE_VALLOC
2455 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2456 void JEMALLOC_NOTHROW *
JEMALLOC_ATTR(malloc)2457 JEMALLOC_ATTR(malloc)
2458 je_valloc(size_t size) {
2459 void *ret;
2460
2461 static_opts_t sopts;
2462 dynamic_opts_t dopts;
2463
2464 LOG("core.valloc.entry", "size: %zu\n", size);
2465
2466 static_opts_init(&sopts);
2467 dynamic_opts_init(&dopts);
2468
2469 sopts.bump_empty_alloc = true;
2470 sopts.null_out_result_on_error = true;
2471 sopts.min_alignment = PAGE;
2472 sopts.oom_string =
2473 "<jemalloc>: Error allocating aligned memory: out of memory\n";
2474 sopts.invalid_alignment_string =
2475 "<jemalloc>: Error allocating aligned memory: invalid alignment\n";
2476
2477 dopts.result = &ret;
2478 dopts.num_items = 1;
2479 dopts.item_size = size;
2480 dopts.alignment = PAGE;
2481
2482 imalloc(&sopts, &dopts);
2483
2484 LOG("core.valloc.exit", "result: %p\n", ret);
2485 return ret;
2486 }
2487 #endif
2488
2489 #if defined(JEMALLOC_IS_MALLOC) && defined(JEMALLOC_GLIBC_MALLOC_HOOK)
2490 /*
2491 * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible
2492 * to inconsistently reference libc's malloc(3)-compatible functions
2493 * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541).
2494 *
2495 * These definitions interpose hooks in glibc. The functions are actually
2496 * passed an extra argument for the caller return address, which will be
2497 * ignored.
2498 */
2499 JEMALLOC_EXPORT void (*__free_hook)(void *ptr) = je_free;
2500 JEMALLOC_EXPORT void *(*__malloc_hook)(size_t size) = je_malloc;
2501 JEMALLOC_EXPORT void *(*__realloc_hook)(void *ptr, size_t size) = je_realloc;
2502 # ifdef JEMALLOC_GLIBC_MEMALIGN_HOOK
2503 JEMALLOC_EXPORT void *(*__memalign_hook)(size_t alignment, size_t size) =
2504 je_memalign;
2505 # endif
2506
2507 # ifdef CPU_COUNT
2508 /*
2509 * To enable static linking with glibc, the libc specific malloc interface must
2510 * be implemented also, so none of glibc's malloc.o functions are added to the
2511 * link.
2512 */
2513 # define ALIAS(je_fn) __attribute__((alias (#je_fn), used))
2514 /* To force macro expansion of je_ prefix before stringification. */
2515 # define PREALIAS(je_fn) ALIAS(je_fn)
2516 # ifdef JEMALLOC_OVERRIDE___LIBC_CALLOC
2517 void *__libc_calloc(size_t n, size_t size) PREALIAS(je_calloc);
2518 # endif
2519 # ifdef JEMALLOC_OVERRIDE___LIBC_FREE
2520 void __libc_free(void* ptr) PREALIAS(je_free);
2521 # endif
2522 # ifdef JEMALLOC_OVERRIDE___LIBC_MALLOC
2523 void *__libc_malloc(size_t size) PREALIAS(je_malloc);
2524 # endif
2525 # ifdef JEMALLOC_OVERRIDE___LIBC_MEMALIGN
2526 void *__libc_memalign(size_t align, size_t s) PREALIAS(je_memalign);
2527 # endif
2528 # ifdef JEMALLOC_OVERRIDE___LIBC_REALLOC
2529 void *__libc_realloc(void* ptr, size_t size) PREALIAS(je_realloc);
2530 # endif
2531 # ifdef JEMALLOC_OVERRIDE___LIBC_VALLOC
2532 void *__libc_valloc(size_t size) PREALIAS(je_valloc);
2533 # endif
2534 # ifdef JEMALLOC_OVERRIDE___POSIX_MEMALIGN
2535 int __posix_memalign(void** r, size_t a, size_t s) PREALIAS(je_posix_memalign);
2536 # endif
2537 # undef PREALIAS
2538 # undef ALIAS
2539 # endif
2540 #endif
2541
2542 /*
2543 * End non-standard override functions.
2544 */
2545 /******************************************************************************/
2546 /*
2547 * Begin non-standard functions.
2548 */
2549
2550 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2551 void JEMALLOC_NOTHROW *
JEMALLOC_ATTR(malloc)2552 JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1)
2553 je_mallocx(size_t size, int flags) {
2554 void *ret;
2555 static_opts_t sopts;
2556 dynamic_opts_t dopts;
2557
2558 LOG("core.mallocx.entry", "size: %zu, flags: %d", size, flags);
2559
2560 static_opts_init(&sopts);
2561 dynamic_opts_init(&dopts);
2562
2563 sopts.assert_nonempty_alloc = true;
2564 sopts.null_out_result_on_error = true;
2565 sopts.oom_string = "<jemalloc>: Error in mallocx(): out of memory\n";
2566
2567 dopts.result = &ret;
2568 dopts.num_items = 1;
2569 dopts.item_size = size;
2570 if (unlikely(flags != 0)) {
2571 if ((flags & MALLOCX_LG_ALIGN_MASK) != 0) {
2572 dopts.alignment = MALLOCX_ALIGN_GET_SPECIFIED(flags);
2573 }
2574
2575 dopts.zero = MALLOCX_ZERO_GET(flags);
2576
2577 if ((flags & MALLOCX_TCACHE_MASK) != 0) {
2578 if ((flags & MALLOCX_TCACHE_MASK)
2579 == MALLOCX_TCACHE_NONE) {
2580 dopts.tcache_ind = TCACHE_IND_NONE;
2581 } else {
2582 dopts.tcache_ind = MALLOCX_TCACHE_GET(flags);
2583 }
2584 } else {
2585 dopts.tcache_ind = TCACHE_IND_AUTOMATIC;
2586 }
2587
2588 if ((flags & MALLOCX_ARENA_MASK) != 0)
2589 dopts.arena_ind = MALLOCX_ARENA_GET(flags);
2590 }
2591
2592 imalloc(&sopts, &dopts);
2593
2594 LOG("core.mallocx.exit", "result: %p", ret);
2595 return ret;
2596 }
2597
2598 static void *
irallocx_prof_sample(tsdn_t * tsdn,void * old_ptr,size_t old_usize,size_t usize,size_t alignment,bool zero,tcache_t * tcache,arena_t * arena,prof_tctx_t * tctx)2599 irallocx_prof_sample(tsdn_t *tsdn, void *old_ptr, size_t old_usize,
2600 size_t usize, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena,
2601 prof_tctx_t *tctx) {
2602 void *p;
2603
2604 if (tctx == NULL) {
2605 return NULL;
2606 }
2607 if (usize <= SMALL_MAXCLASS) {
2608 p = iralloct(tsdn, old_ptr, old_usize, LARGE_MINCLASS,
2609 alignment, zero, tcache, arena);
2610 if (p == NULL) {
2611 return NULL;
2612 }
2613 arena_prof_promote(tsdn, p, usize);
2614 } else {
2615 p = iralloct(tsdn, old_ptr, old_usize, usize, alignment, zero,
2616 tcache, arena);
2617 }
2618
2619 return p;
2620 }
2621
2622 JEMALLOC_ALWAYS_INLINE void *
irallocx_prof(tsd_t * tsd,void * old_ptr,size_t old_usize,size_t size,size_t alignment,size_t * usize,bool zero,tcache_t * tcache,arena_t * arena,alloc_ctx_t * alloc_ctx)2623 irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size,
2624 size_t alignment, size_t *usize, bool zero, tcache_t *tcache,
2625 arena_t *arena, alloc_ctx_t *alloc_ctx) {
2626 void *p;
2627 bool prof_active;
2628 prof_tctx_t *old_tctx, *tctx;
2629
2630 prof_active = prof_active_get_unlocked();
2631 old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr, alloc_ctx);
2632 tctx = prof_alloc_prep(tsd, *usize, prof_active, false);
2633 if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
2634 p = irallocx_prof_sample(tsd_tsdn(tsd), old_ptr, old_usize,
2635 *usize, alignment, zero, tcache, arena, tctx);
2636 } else {
2637 p = iralloct(tsd_tsdn(tsd), old_ptr, old_usize, size, alignment,
2638 zero, tcache, arena);
2639 }
2640 if (unlikely(p == NULL)) {
2641 prof_alloc_rollback(tsd, tctx, false);
2642 return NULL;
2643 }
2644
2645 if (p == old_ptr && alignment != 0) {
2646 /*
2647 * The allocation did not move, so it is possible that the size
2648 * class is smaller than would guarantee the requested
2649 * alignment, and that the alignment constraint was
2650 * serendipitously satisfied. Additionally, old_usize may not
2651 * be the same as the current usize because of in-place large
2652 * reallocation. Therefore, query the actual value of usize.
2653 */
2654 *usize = isalloc(tsd_tsdn(tsd), p);
2655 }
2656 prof_realloc(tsd, p, *usize, tctx, prof_active, false, old_ptr,
2657 old_usize, old_tctx);
2658
2659 return p;
2660 }
2661
2662 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2663 void JEMALLOC_NOTHROW *
2664 JEMALLOC_ALLOC_SIZE(2)
je_rallocx(void * ptr,size_t size,int flags)2665 je_rallocx(void *ptr, size_t size, int flags) {
2666 void *p;
2667 tsd_t *tsd;
2668 size_t usize;
2669 size_t old_usize;
2670 size_t alignment = MALLOCX_ALIGN_GET(flags);
2671 bool zero = flags & MALLOCX_ZERO;
2672 arena_t *arena;
2673 tcache_t *tcache;
2674
2675 LOG("core.rallocx.entry", "ptr: %p, size: %zu, flags: %d", ptr,
2676 size, flags);
2677
2678
2679 assert(ptr != NULL);
2680 assert(size != 0);
2681 assert(malloc_initialized() || IS_INITIALIZER);
2682 tsd = tsd_fetch();
2683 check_entry_exit_locking(tsd_tsdn(tsd));
2684
2685 if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) {
2686 unsigned arena_ind = MALLOCX_ARENA_GET(flags);
2687 arena = arena_get(tsd_tsdn(tsd), arena_ind, true);
2688 if (unlikely(arena == NULL)) {
2689 goto label_oom;
2690 }
2691 } else {
2692 arena = NULL;
2693 }
2694
2695 if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
2696 if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) {
2697 tcache = NULL;
2698 } else {
2699 tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
2700 }
2701 } else {
2702 tcache = tcache_get(tsd);
2703 }
2704
2705 alloc_ctx_t alloc_ctx;
2706 rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
2707 rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
2708 (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
2709 assert(alloc_ctx.szind != NSIZES);
2710 old_usize = sz_index2size(alloc_ctx.szind);
2711 assert(old_usize == isalloc(tsd_tsdn(tsd), ptr));
2712 if (config_prof && opt_prof) {
2713 usize = (alignment == 0) ?
2714 sz_s2u(size) : sz_sa2u(size, alignment);
2715 if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
2716 goto label_oom;
2717 }
2718 p = irallocx_prof(tsd, ptr, old_usize, size, alignment, &usize,
2719 zero, tcache, arena, &alloc_ctx);
2720 if (unlikely(p == NULL)) {
2721 goto label_oom;
2722 }
2723 } else {
2724 p = iralloct(tsd_tsdn(tsd), ptr, old_usize, size, alignment,
2725 zero, tcache, arena);
2726 if (unlikely(p == NULL)) {
2727 goto label_oom;
2728 }
2729 if (config_stats) {
2730 usize = isalloc(tsd_tsdn(tsd), p);
2731 }
2732 }
2733 assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
2734
2735 if (config_stats) {
2736 *tsd_thread_allocatedp_get(tsd) += usize;
2737 *tsd_thread_deallocatedp_get(tsd) += old_usize;
2738 }
2739 UTRACE(ptr, size, p);
2740 check_entry_exit_locking(tsd_tsdn(tsd));
2741
2742 LOG("core.rallocx.exit", "result: %p", p);
2743 return p;
2744 label_oom:
2745 if (config_xmalloc && unlikely(opt_xmalloc)) {
2746 malloc_write("<jemalloc>: Error in rallocx(): out of memory\n");
2747 abort();
2748 }
2749 UTRACE(ptr, size, 0);
2750 check_entry_exit_locking(tsd_tsdn(tsd));
2751
2752 LOG("core.rallocx.exit", "result: %p", NULL);
2753 return NULL;
2754 }
2755
2756 JEMALLOC_ALWAYS_INLINE size_t
ixallocx_helper(tsdn_t * tsdn,void * ptr,size_t old_usize,size_t size,size_t extra,size_t alignment,bool zero)2757 ixallocx_helper(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size,
2758 size_t extra, size_t alignment, bool zero) {
2759 size_t usize;
2760
2761 if (ixalloc(tsdn, ptr, old_usize, size, extra, alignment, zero)) {
2762 return old_usize;
2763 }
2764 usize = isalloc(tsdn, ptr);
2765
2766 return usize;
2767 }
2768
2769 static size_t
ixallocx_prof_sample(tsdn_t * tsdn,void * ptr,size_t old_usize,size_t size,size_t extra,size_t alignment,bool zero,prof_tctx_t * tctx)2770 ixallocx_prof_sample(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size,
2771 size_t extra, size_t alignment, bool zero, prof_tctx_t *tctx) {
2772 size_t usize;
2773
2774 if (tctx == NULL) {
2775 return old_usize;
2776 }
2777 usize = ixallocx_helper(tsdn, ptr, old_usize, size, extra, alignment,
2778 zero);
2779
2780 return usize;
2781 }
2782
2783 JEMALLOC_ALWAYS_INLINE size_t
ixallocx_prof(tsd_t * tsd,void * ptr,size_t old_usize,size_t size,size_t extra,size_t alignment,bool zero,alloc_ctx_t * alloc_ctx)2784 ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size,
2785 size_t extra, size_t alignment, bool zero, alloc_ctx_t *alloc_ctx) {
2786 size_t usize_max, usize;
2787 bool prof_active;
2788 prof_tctx_t *old_tctx, *tctx;
2789
2790 prof_active = prof_active_get_unlocked();
2791 old_tctx = prof_tctx_get(tsd_tsdn(tsd), ptr, alloc_ctx);
2792 /*
2793 * usize isn't knowable before ixalloc() returns when extra is non-zero.
2794 * Therefore, compute its maximum possible value and use that in
2795 * prof_alloc_prep() to decide whether to capture a backtrace.
2796 * prof_realloc() will use the actual usize to decide whether to sample.
2797 */
2798 if (alignment == 0) {
2799 usize_max = sz_s2u(size+extra);
2800 assert(usize_max > 0 && usize_max <= LARGE_MAXCLASS);
2801 } else {
2802 usize_max = sz_sa2u(size+extra, alignment);
2803 if (unlikely(usize_max == 0 || usize_max > LARGE_MAXCLASS)) {
2804 /*
2805 * usize_max is out of range, and chances are that
2806 * allocation will fail, but use the maximum possible
2807 * value and carry on with prof_alloc_prep(), just in
2808 * case allocation succeeds.
2809 */
2810 usize_max = LARGE_MAXCLASS;
2811 }
2812 }
2813 tctx = prof_alloc_prep(tsd, usize_max, prof_active, false);
2814
2815 if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
2816 usize = ixallocx_prof_sample(tsd_tsdn(tsd), ptr, old_usize,
2817 size, extra, alignment, zero, tctx);
2818 } else {
2819 usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size,
2820 extra, alignment, zero);
2821 }
2822 if (usize == old_usize) {
2823 prof_alloc_rollback(tsd, tctx, false);
2824 return usize;
2825 }
2826 prof_realloc(tsd, ptr, usize, tctx, prof_active, false, ptr, old_usize,
2827 old_tctx);
2828
2829 return usize;
2830 }
2831
2832 JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
je_xallocx(void * ptr,size_t size,size_t extra,int flags)2833 je_xallocx(void *ptr, size_t size, size_t extra, int flags) {
2834 tsd_t *tsd;
2835 size_t usize, old_usize;
2836 size_t alignment = MALLOCX_ALIGN_GET(flags);
2837 bool zero = flags & MALLOCX_ZERO;
2838
2839 LOG("core.xallocx.entry", "ptr: %p, size: %zu, extra: %zu, "
2840 "flags: %d", ptr, size, extra, flags);
2841
2842 assert(ptr != NULL);
2843 assert(size != 0);
2844 assert(SIZE_T_MAX - size >= extra);
2845 assert(malloc_initialized() || IS_INITIALIZER);
2846 tsd = tsd_fetch();
2847 check_entry_exit_locking(tsd_tsdn(tsd));
2848
2849 alloc_ctx_t alloc_ctx;
2850 rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
2851 rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
2852 (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
2853 assert(alloc_ctx.szind != NSIZES);
2854 old_usize = sz_index2size(alloc_ctx.szind);
2855 assert(old_usize == isalloc(tsd_tsdn(tsd), ptr));
2856 /*
2857 * The API explicitly absolves itself of protecting against (size +
2858 * extra) numerical overflow, but we may need to clamp extra to avoid
2859 * exceeding LARGE_MAXCLASS.
2860 *
2861 * Ordinarily, size limit checking is handled deeper down, but here we
2862 * have to check as part of (size + extra) clamping, since we need the
2863 * clamped value in the above helper functions.
2864 */
2865 if (unlikely(size > LARGE_MAXCLASS)) {
2866 usize = old_usize;
2867 goto label_not_resized;
2868 }
2869 if (unlikely(LARGE_MAXCLASS - size < extra)) {
2870 extra = LARGE_MAXCLASS - size;
2871 }
2872
2873 if (config_prof && opt_prof) {
2874 usize = ixallocx_prof(tsd, ptr, old_usize, size, extra,
2875 alignment, zero, &alloc_ctx);
2876 } else {
2877 usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size,
2878 extra, alignment, zero);
2879 }
2880 if (unlikely(usize == old_usize)) {
2881 goto label_not_resized;
2882 }
2883
2884 if (config_stats) {
2885 *tsd_thread_allocatedp_get(tsd) += usize;
2886 *tsd_thread_deallocatedp_get(tsd) += old_usize;
2887 }
2888 label_not_resized:
2889 UTRACE(ptr, size, ptr);
2890 check_entry_exit_locking(tsd_tsdn(tsd));
2891
2892 LOG("core.xallocx.exit", "result: %zu", usize);
2893 return usize;
2894 }
2895
2896 JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
JEMALLOC_ATTR(pure)2897 JEMALLOC_ATTR(pure)
2898 je_sallocx(const void *ptr, UNUSED int flags) {
2899 size_t usize;
2900 tsdn_t *tsdn;
2901
2902 LOG("core.sallocx.entry", "ptr: %p, flags: %d", ptr, flags);
2903
2904 assert(malloc_initialized() || IS_INITIALIZER);
2905 assert(ptr != NULL);
2906
2907 tsdn = tsdn_fetch();
2908 check_entry_exit_locking(tsdn);
2909
2910 if (config_debug || force_ivsalloc) {
2911 usize = ivsalloc(tsdn, ptr);
2912 assert(force_ivsalloc || usize != 0);
2913 } else {
2914 usize = isalloc(tsdn, ptr);
2915 }
2916
2917 check_entry_exit_locking(tsdn);
2918
2919 LOG("core.sallocx.exit", "result: %zu", usize);
2920 return usize;
2921 }
2922
2923 JEMALLOC_EXPORT void JEMALLOC_NOTHROW
je_dallocx(void * ptr,int flags)2924 je_dallocx(void *ptr, int flags) {
2925 LOG("core.dallocx.entry", "ptr: %p, flags: %d", ptr, flags);
2926
2927 assert(ptr != NULL);
2928 assert(malloc_initialized() || IS_INITIALIZER);
2929
2930 tsd_t *tsd = tsd_fetch();
2931 bool fast = tsd_fast(tsd);
2932 check_entry_exit_locking(tsd_tsdn(tsd));
2933
2934 tcache_t *tcache;
2935 if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
2936 /* Not allowed to be reentrant and specify a custom tcache. */
2937 assert(tsd_reentrancy_level_get(tsd) == 0);
2938 if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) {
2939 tcache = NULL;
2940 } else {
2941 tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
2942 }
2943 } else {
2944 if (likely(fast)) {
2945 tcache = tsd_tcachep_get(tsd);
2946 assert(tcache == tcache_get(tsd));
2947 } else {
2948 if (likely(tsd_reentrancy_level_get(tsd) == 0)) {
2949 tcache = tcache_get(tsd);
2950 } else {
2951 tcache = NULL;
2952 }
2953 }
2954 }
2955
2956 UTRACE(ptr, 0, 0);
2957 if (likely(fast)) {
2958 tsd_assert_fast(tsd);
2959 ifree(tsd, ptr, tcache, false);
2960 } else {
2961 ifree(tsd, ptr, tcache, true);
2962 }
2963 check_entry_exit_locking(tsd_tsdn(tsd));
2964
2965 LOG("core.dallocx.exit", "");
2966 }
2967
2968 JEMALLOC_ALWAYS_INLINE size_t
inallocx(tsdn_t * tsdn,size_t size,int flags)2969 inallocx(tsdn_t *tsdn, size_t size, int flags) {
2970 check_entry_exit_locking(tsdn);
2971
2972 size_t usize;
2973 if (likely((flags & MALLOCX_LG_ALIGN_MASK) == 0)) {
2974 usize = sz_s2u(size);
2975 } else {
2976 usize = sz_sa2u(size, MALLOCX_ALIGN_GET_SPECIFIED(flags));
2977 }
2978 check_entry_exit_locking(tsdn);
2979 return usize;
2980 }
2981
2982 JEMALLOC_EXPORT void JEMALLOC_NOTHROW
je_sdallocx(void * ptr,size_t size,int flags)2983 je_sdallocx(void *ptr, size_t size, int flags) {
2984 assert(ptr != NULL);
2985 assert(malloc_initialized() || IS_INITIALIZER);
2986
2987 LOG("core.sdallocx.entry", "ptr: %p, size: %zu, flags: %d", ptr,
2988 size, flags);
2989
2990 tsd_t *tsd = tsd_fetch();
2991 bool fast = tsd_fast(tsd);
2992 size_t usize = inallocx(tsd_tsdn(tsd), size, flags);
2993 assert(usize == isalloc(tsd_tsdn(tsd), ptr));
2994 check_entry_exit_locking(tsd_tsdn(tsd));
2995
2996 tcache_t *tcache;
2997 if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
2998 /* Not allowed to be reentrant and specify a custom tcache. */
2999 assert(tsd_reentrancy_level_get(tsd) == 0);
3000 if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) {
3001 tcache = NULL;
3002 } else {
3003 tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
3004 }
3005 } else {
3006 if (likely(fast)) {
3007 tcache = tsd_tcachep_get(tsd);
3008 assert(tcache == tcache_get(tsd));
3009 } else {
3010 if (likely(tsd_reentrancy_level_get(tsd) == 0)) {
3011 tcache = tcache_get(tsd);
3012 } else {
3013 tcache = NULL;
3014 }
3015 }
3016 }
3017
3018 UTRACE(ptr, 0, 0);
3019 if (likely(fast)) {
3020 tsd_assert_fast(tsd);
3021 isfree(tsd, ptr, usize, tcache, false);
3022 } else {
3023 isfree(tsd, ptr, usize, tcache, true);
3024 }
3025 check_entry_exit_locking(tsd_tsdn(tsd));
3026
3027 LOG("core.sdallocx.exit", "");
3028 }
3029
3030 JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
JEMALLOC_ATTR(pure)3031 JEMALLOC_ATTR(pure)
3032 je_nallocx(size_t size, int flags) {
3033 size_t usize;
3034 tsdn_t *tsdn;
3035
3036 assert(size != 0);
3037
3038 if (unlikely(malloc_init())) {
3039 LOG("core.nallocx.exit", "result: %zu", ZU(0));
3040 return 0;
3041 }
3042
3043 tsdn = tsdn_fetch();
3044 check_entry_exit_locking(tsdn);
3045
3046 usize = inallocx(tsdn, size, flags);
3047 if (unlikely(usize > LARGE_MAXCLASS)) {
3048 LOG("core.nallocx.exit", "result: %zu", ZU(0));
3049 return 0;
3050 }
3051
3052 check_entry_exit_locking(tsdn);
3053 LOG("core.nallocx.exit", "result: %zu", usize);
3054 return usize;
3055 }
3056
3057 JEMALLOC_EXPORT int JEMALLOC_NOTHROW
je_mallctl(const char * name,void * oldp,size_t * oldlenp,void * newp,size_t newlen)3058 je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
3059 size_t newlen) {
3060 int ret;
3061 tsd_t *tsd;
3062
3063 LOG("core.mallctl.entry", "name: %s", name);
3064
3065 if (unlikely(malloc_init())) {
3066 LOG("core.mallctl.exit", "result: %d", EAGAIN);
3067 return EAGAIN;
3068 }
3069
3070 tsd = tsd_fetch();
3071 check_entry_exit_locking(tsd_tsdn(tsd));
3072 ret = ctl_byname(tsd, name, oldp, oldlenp, newp, newlen);
3073 check_entry_exit_locking(tsd_tsdn(tsd));
3074
3075 LOG("core.mallctl.exit", "result: %d", ret);
3076 return ret;
3077 }
3078
3079 JEMALLOC_EXPORT int JEMALLOC_NOTHROW
je_mallctlnametomib(const char * name,size_t * mibp,size_t * miblenp)3080 je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp) {
3081 int ret;
3082
3083 LOG("core.mallctlnametomib.entry", "name: %s", name);
3084
3085 if (unlikely(malloc_init())) {
3086 LOG("core.mallctlnametomib.exit", "result: %d", EAGAIN);
3087 return EAGAIN;
3088 }
3089
3090 tsd_t *tsd = tsd_fetch();
3091 check_entry_exit_locking(tsd_tsdn(tsd));
3092 ret = ctl_nametomib(tsd, name, mibp, miblenp);
3093 check_entry_exit_locking(tsd_tsdn(tsd));
3094
3095 LOG("core.mallctlnametomib.exit", "result: %d", ret);
3096 return ret;
3097 }
3098
3099 JEMALLOC_EXPORT int JEMALLOC_NOTHROW
je_mallctlbymib(const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)3100 je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
3101 void *newp, size_t newlen) {
3102 int ret;
3103 tsd_t *tsd;
3104
3105 LOG("core.mallctlbymib.entry", "");
3106
3107 if (unlikely(malloc_init())) {
3108 LOG("core.mallctlbymib.exit", "result: %d", EAGAIN);
3109 return EAGAIN;
3110 }
3111
3112 tsd = tsd_fetch();
3113 check_entry_exit_locking(tsd_tsdn(tsd));
3114 ret = ctl_bymib(tsd, mib, miblen, oldp, oldlenp, newp, newlen);
3115 check_entry_exit_locking(tsd_tsdn(tsd));
3116 LOG("core.mallctlbymib.exit", "result: %d", ret);
3117 return ret;
3118 }
3119
3120 JEMALLOC_EXPORT void JEMALLOC_NOTHROW
je_malloc_stats_print(void (* write_cb)(void *,const char *),void * cbopaque,const char * opts)3121 je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
3122 const char *opts) {
3123 tsdn_t *tsdn;
3124
3125 LOG("core.malloc_stats_print.entry", "");
3126
3127 tsdn = tsdn_fetch();
3128 check_entry_exit_locking(tsdn);
3129 stats_print(write_cb, cbopaque, opts);
3130 check_entry_exit_locking(tsdn);
3131 LOG("core.malloc_stats_print.exit", "");
3132 }
3133
3134 JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void * ptr)3135 je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr) {
3136 size_t ret;
3137 tsdn_t *tsdn;
3138
3139 LOG("core.malloc_usable_size.entry", "ptr: %p", ptr);
3140
3141 assert(malloc_initialized() || IS_INITIALIZER);
3142
3143 tsdn = tsdn_fetch();
3144 check_entry_exit_locking(tsdn);
3145
3146 if (unlikely(ptr == NULL)) {
3147 ret = 0;
3148 } else {
3149 if (config_debug || force_ivsalloc) {
3150 ret = ivsalloc(tsdn, ptr);
3151 assert(force_ivsalloc || ret != 0);
3152 } else {
3153 ret = isalloc(tsdn, ptr);
3154 }
3155 }
3156
3157 check_entry_exit_locking(tsdn);
3158 LOG("core.malloc_usable_size.exit", "result: %zu", ret);
3159 return ret;
3160 }
3161
3162 /*
3163 * End non-standard functions.
3164 */
3165 /******************************************************************************/
3166 /*
3167 * Begin compatibility functions.
3168 */
3169
3170 #define ALLOCM_LG_ALIGN(la) (la)
3171 #define ALLOCM_ALIGN(a) (ffsl(a)-1)
3172 #define ALLOCM_ZERO ((int)0x40)
3173 #define ALLOCM_NO_MOVE ((int)0x80)
3174
3175 #define ALLOCM_SUCCESS 0
3176 #define ALLOCM_ERR_OOM 1
3177 #define ALLOCM_ERR_NOT_MOVED 2
3178
3179 int
je_allocm(void ** ptr,size_t * rsize,size_t size,int flags)3180 je_allocm(void **ptr, size_t *rsize, size_t size, int flags) {
3181 assert(ptr != NULL);
3182
3183 void *p = je_mallocx(size, flags);
3184 if (p == NULL) {
3185 return (ALLOCM_ERR_OOM);
3186 }
3187 if (rsize != NULL) {
3188 *rsize = isalloc(tsdn_fetch(), p);
3189 }
3190 *ptr = p;
3191 return ALLOCM_SUCCESS;
3192 }
3193
3194 int
je_rallocm(void ** ptr,size_t * rsize,size_t size,size_t extra,int flags)3195 je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags) {
3196 assert(ptr != NULL);
3197 assert(*ptr != NULL);
3198 assert(size != 0);
3199 assert(SIZE_T_MAX - size >= extra);
3200
3201 int ret;
3202 bool no_move = flags & ALLOCM_NO_MOVE;
3203
3204 if (no_move) {
3205 size_t usize = je_xallocx(*ptr, size, extra, flags);
3206 ret = (usize >= size) ? ALLOCM_SUCCESS : ALLOCM_ERR_NOT_MOVED;
3207 if (rsize != NULL) {
3208 *rsize = usize;
3209 }
3210 } else {
3211 void *p = je_rallocx(*ptr, size+extra, flags);
3212 if (p != NULL) {
3213 *ptr = p;
3214 ret = ALLOCM_SUCCESS;
3215 } else {
3216 ret = ALLOCM_ERR_OOM;
3217 }
3218 if (rsize != NULL) {
3219 *rsize = isalloc(tsdn_fetch(), *ptr);
3220 }
3221 }
3222 return ret;
3223 }
3224
3225 int
je_sallocm(const void * ptr,size_t * rsize,int flags)3226 je_sallocm(const void *ptr, size_t *rsize, int flags) {
3227 assert(rsize != NULL);
3228 *rsize = je_sallocx(ptr, flags);
3229 return ALLOCM_SUCCESS;
3230 }
3231
3232 int
je_dallocm(void * ptr,int flags)3233 je_dallocm(void *ptr, int flags) {
3234 je_dallocx(ptr, flags);
3235 return ALLOCM_SUCCESS;
3236 }
3237
3238 int
je_nallocm(size_t * rsize,size_t size,int flags)3239 je_nallocm(size_t *rsize, size_t size, int flags) {
3240 size_t usize = je_nallocx(size, flags);
3241 if (usize == 0) {
3242 return ALLOCM_ERR_OOM;
3243 }
3244 if (rsize != NULL) {
3245 *rsize = usize;
3246 }
3247 return ALLOCM_SUCCESS;
3248 }
3249
3250 #undef ALLOCM_LG_ALIGN
3251 #undef ALLOCM_ALIGN
3252 #undef ALLOCM_ZERO
3253 #undef ALLOCM_NO_MOVE
3254
3255 #undef ALLOCM_SUCCESS
3256 #undef ALLOCM_ERR_OOM
3257 #undef ALLOCM_ERR_NOT_MOVED
3258
3259 /*
3260 * End compatibility functions.
3261 */
3262 /******************************************************************************/
3263 /*
3264 * The following functions are used by threading libraries for protection of
3265 * malloc during fork().
3266 */
3267
3268 /*
3269 * If an application creates a thread before doing any allocation in the main
3270 * thread, then calls fork(2) in the main thread followed by memory allocation
3271 * in the child process, a race can occur that results in deadlock within the
3272 * child: the main thread may have forked while the created thread had
3273 * partially initialized the allocator. Ordinarily jemalloc prevents
3274 * fork/malloc races via the following functions it registers during
3275 * initialization using pthread_atfork(), but of course that does no good if
3276 * the allocator isn't fully initialized at fork time. The following library
3277 * constructor is a partial solution to this problem. It may still be possible
3278 * to trigger the deadlock described above, but doing so would involve forking
3279 * via a library constructor that runs before jemalloc's runs.
3280 */
3281 #ifndef JEMALLOC_JET
JEMALLOC_ATTR(constructor)3282 JEMALLOC_ATTR(constructor)
3283 static void
3284 jemalloc_constructor(void) {
3285 malloc_init();
3286 }
3287 #endif
3288
3289 #ifndef JEMALLOC_MUTEX_INIT_CB
3290 void
jemalloc_prefork(void)3291 jemalloc_prefork(void)
3292 #else
3293 JEMALLOC_EXPORT void
3294 _malloc_prefork(void)
3295 #endif
3296 {
3297 tsd_t *tsd;
3298 unsigned i, j, narenas;
3299 arena_t *arena;
3300
3301 #ifdef JEMALLOC_MUTEX_INIT_CB
3302 if (!malloc_initialized()) {
3303 return;
3304 }
3305 #endif
3306 assert(malloc_initialized());
3307
3308 tsd = tsd_fetch();
3309
3310 narenas = narenas_total_get();
3311
3312 witness_prefork(tsd_witness_tsdp_get(tsd));
3313 /* Acquire all mutexes in a safe order. */
3314 ctl_prefork(tsd_tsdn(tsd));
3315 tcache_prefork(tsd_tsdn(tsd));
3316 malloc_mutex_prefork(tsd_tsdn(tsd), &arenas_lock);
3317 if (have_background_thread) {
3318 background_thread_prefork0(tsd_tsdn(tsd));
3319 }
3320 prof_prefork0(tsd_tsdn(tsd));
3321 if (have_background_thread) {
3322 background_thread_prefork1(tsd_tsdn(tsd));
3323 }
3324 /* Break arena prefork into stages to preserve lock order. */
3325 for (i = 0; i < 8; i++) {
3326 for (j = 0; j < narenas; j++) {
3327 if ((arena = arena_get(tsd_tsdn(tsd), j, false)) !=
3328 NULL) {
3329 switch (i) {
3330 case 0:
3331 arena_prefork0(tsd_tsdn(tsd), arena);
3332 break;
3333 case 1:
3334 arena_prefork1(tsd_tsdn(tsd), arena);
3335 break;
3336 case 2:
3337 arena_prefork2(tsd_tsdn(tsd), arena);
3338 break;
3339 case 3:
3340 arena_prefork3(tsd_tsdn(tsd), arena);
3341 break;
3342 case 4:
3343 arena_prefork4(tsd_tsdn(tsd), arena);
3344 break;
3345 case 5:
3346 arena_prefork5(tsd_tsdn(tsd), arena);
3347 break;
3348 case 6:
3349 arena_prefork6(tsd_tsdn(tsd), arena);
3350 break;
3351 case 7:
3352 arena_prefork7(tsd_tsdn(tsd), arena);
3353 break;
3354 default: not_reached();
3355 }
3356 }
3357 }
3358 }
3359 prof_prefork1(tsd_tsdn(tsd));
3360 }
3361
3362 #ifndef JEMALLOC_MUTEX_INIT_CB
3363 void
jemalloc_postfork_parent(void)3364 jemalloc_postfork_parent(void)
3365 #else
3366 JEMALLOC_EXPORT void
3367 _malloc_postfork(void)
3368 #endif
3369 {
3370 tsd_t *tsd;
3371 unsigned i, narenas;
3372
3373 #ifdef JEMALLOC_MUTEX_INIT_CB
3374 if (!malloc_initialized()) {
3375 return;
3376 }
3377 #endif
3378 assert(malloc_initialized());
3379
3380 tsd = tsd_fetch();
3381
3382 witness_postfork_parent(tsd_witness_tsdp_get(tsd));
3383 /* Release all mutexes, now that fork() has completed. */
3384 for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
3385 arena_t *arena;
3386
3387 if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) {
3388 arena_postfork_parent(tsd_tsdn(tsd), arena);
3389 }
3390 }
3391 prof_postfork_parent(tsd_tsdn(tsd));
3392 if (have_background_thread) {
3393 background_thread_postfork_parent(tsd_tsdn(tsd));
3394 }
3395 malloc_mutex_postfork_parent(tsd_tsdn(tsd), &arenas_lock);
3396 tcache_postfork_parent(tsd_tsdn(tsd));
3397 ctl_postfork_parent(tsd_tsdn(tsd));
3398 }
3399
3400 void
jemalloc_postfork_child(void)3401 jemalloc_postfork_child(void) {
3402 tsd_t *tsd;
3403 unsigned i, narenas;
3404
3405 assert(malloc_initialized());
3406
3407 tsd = tsd_fetch();
3408
3409 witness_postfork_child(tsd_witness_tsdp_get(tsd));
3410 /* Release all mutexes, now that fork() has completed. */
3411 for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
3412 arena_t *arena;
3413
3414 if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) {
3415 arena_postfork_child(tsd_tsdn(tsd), arena);
3416 }
3417 }
3418 prof_postfork_child(tsd_tsdn(tsd));
3419 if (have_background_thread) {
3420 background_thread_postfork_child(tsd_tsdn(tsd));
3421 }
3422 malloc_mutex_postfork_child(tsd_tsdn(tsd), &arenas_lock);
3423 tcache_postfork_child(tsd_tsdn(tsd));
3424 ctl_postfork_child(tsd_tsdn(tsd));
3425 }
3426
3427 void
_malloc_first_thread(void)3428 _malloc_first_thread(void)
3429 {
3430
3431 (void)malloc_mutex_first_thread();
3432 }
3433
3434 /******************************************************************************/
3435