1 #define JEMALLOC_CHUNK_C_
2 #include "jemalloc/internal/jemalloc_internal.h"
3
4 /******************************************************************************/
5 /* Data. */
6
7 const char *opt_dss = DSS_DEFAULT;
8 size_t opt_lg_chunk = 0;
9
10 /* Used exclusively for gdump triggering. */
11 static size_t curchunks;
12 static size_t highchunks;
13
14 rtree_t chunks_rtree;
15
16 /* Various chunk-related settings. */
17 size_t chunksize;
18 size_t chunksize_mask; /* (chunksize - 1). */
19 size_t chunk_npages;
20
21 static void *chunk_alloc_default(void *new_addr, size_t size,
22 size_t alignment, bool *zero, bool *commit, unsigned arena_ind);
23 static bool chunk_dalloc_default(void *chunk, size_t size, bool committed,
24 unsigned arena_ind);
25 static bool chunk_commit_default(void *chunk, size_t size, size_t offset,
26 size_t length, unsigned arena_ind);
27 static bool chunk_decommit_default(void *chunk, size_t size, size_t offset,
28 size_t length, unsigned arena_ind);
29 static bool chunk_purge_default(void *chunk, size_t size, size_t offset,
30 size_t length, unsigned arena_ind);
31 static bool chunk_split_default(void *chunk, size_t size, size_t size_a,
32 size_t size_b, bool committed, unsigned arena_ind);
33 static bool chunk_merge_default(void *chunk_a, size_t size_a, void *chunk_b,
34 size_t size_b, bool committed, unsigned arena_ind);
35
36 const chunk_hooks_t chunk_hooks_default = {
37 chunk_alloc_default,
38 chunk_dalloc_default,
39 chunk_commit_default,
40 chunk_decommit_default,
41 chunk_purge_default,
42 chunk_split_default,
43 chunk_merge_default
44 };
45
46 /******************************************************************************/
47 /*
48 * Function prototypes for static functions that are referenced prior to
49 * definition.
50 */
51
52 static void chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks,
53 extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache,
54 void *chunk, size_t size, bool zeroed, bool committed);
55
56 /******************************************************************************/
57
58 static chunk_hooks_t
chunk_hooks_get_locked(arena_t * arena)59 chunk_hooks_get_locked(arena_t *arena)
60 {
61
62 return (arena->chunk_hooks);
63 }
64
65 chunk_hooks_t
chunk_hooks_get(arena_t * arena)66 chunk_hooks_get(arena_t *arena)
67 {
68 chunk_hooks_t chunk_hooks;
69
70 malloc_mutex_lock(&arena->chunks_mtx);
71 chunk_hooks = chunk_hooks_get_locked(arena);
72 malloc_mutex_unlock(&arena->chunks_mtx);
73
74 return (chunk_hooks);
75 }
76
77 chunk_hooks_t
chunk_hooks_set(arena_t * arena,const chunk_hooks_t * chunk_hooks)78 chunk_hooks_set(arena_t *arena, const chunk_hooks_t *chunk_hooks)
79 {
80 chunk_hooks_t old_chunk_hooks;
81
82 malloc_mutex_lock(&arena->chunks_mtx);
83 old_chunk_hooks = arena->chunk_hooks;
84 /*
85 * Copy each field atomically so that it is impossible for readers to
86 * see partially updated pointers. There are places where readers only
87 * need one hook function pointer (therefore no need to copy the
88 * entirety of arena->chunk_hooks), and stale reads do not affect
89 * correctness, so they perform unlocked reads.
90 */
91 #define ATOMIC_COPY_HOOK(n) do { \
92 union { \
93 chunk_##n##_t **n; \
94 void **v; \
95 } u; \
96 u.n = &arena->chunk_hooks.n; \
97 atomic_write_p(u.v, chunk_hooks->n); \
98 } while (0)
99 ATOMIC_COPY_HOOK(alloc);
100 ATOMIC_COPY_HOOK(dalloc);
101 ATOMIC_COPY_HOOK(commit);
102 ATOMIC_COPY_HOOK(decommit);
103 ATOMIC_COPY_HOOK(purge);
104 ATOMIC_COPY_HOOK(split);
105 ATOMIC_COPY_HOOK(merge);
106 #undef ATOMIC_COPY_HOOK
107 malloc_mutex_unlock(&arena->chunks_mtx);
108
109 return (old_chunk_hooks);
110 }
111
112 static void
chunk_hooks_assure_initialized_impl(arena_t * arena,chunk_hooks_t * chunk_hooks,bool locked)113 chunk_hooks_assure_initialized_impl(arena_t *arena, chunk_hooks_t *chunk_hooks,
114 bool locked)
115 {
116 static const chunk_hooks_t uninitialized_hooks =
117 CHUNK_HOOKS_INITIALIZER;
118
119 if (memcmp(chunk_hooks, &uninitialized_hooks, sizeof(chunk_hooks_t)) ==
120 0) {
121 *chunk_hooks = locked ? chunk_hooks_get_locked(arena) :
122 chunk_hooks_get(arena);
123 }
124 }
125
126 static void
chunk_hooks_assure_initialized_locked(arena_t * arena,chunk_hooks_t * chunk_hooks)127 chunk_hooks_assure_initialized_locked(arena_t *arena,
128 chunk_hooks_t *chunk_hooks)
129 {
130
131 chunk_hooks_assure_initialized_impl(arena, chunk_hooks, true);
132 }
133
134 static void
chunk_hooks_assure_initialized(arena_t * arena,chunk_hooks_t * chunk_hooks)135 chunk_hooks_assure_initialized(arena_t *arena, chunk_hooks_t *chunk_hooks)
136 {
137
138 chunk_hooks_assure_initialized_impl(arena, chunk_hooks, false);
139 }
140
141 bool
chunk_register(const void * chunk,const extent_node_t * node)142 chunk_register(const void *chunk, const extent_node_t *node)
143 {
144
145 assert(extent_node_addr_get(node) == chunk);
146
147 if (rtree_set(&chunks_rtree, (uintptr_t)chunk, node))
148 return (true);
149 if (config_prof && opt_prof) {
150 size_t size = extent_node_size_get(node);
151 size_t nadd = (size == 0) ? 1 : size / chunksize;
152 size_t cur = atomic_add_z(&curchunks, nadd);
153 size_t high = atomic_read_z(&highchunks);
154 while (cur > high && atomic_cas_z(&highchunks, high, cur)) {
155 /*
156 * Don't refresh cur, because it may have decreased
157 * since this thread lost the highchunks update race.
158 */
159 high = atomic_read_z(&highchunks);
160 }
161 if (cur > high && prof_gdump_get_unlocked())
162 prof_gdump();
163 }
164
165 return (false);
166 }
167
168 void
chunk_deregister(const void * chunk,const extent_node_t * node)169 chunk_deregister(const void *chunk, const extent_node_t *node)
170 {
171 bool err;
172
173 err = rtree_set(&chunks_rtree, (uintptr_t)chunk, NULL);
174 assert(!err);
175 if (config_prof && opt_prof) {
176 size_t size = extent_node_size_get(node);
177 size_t nsub = (size == 0) ? 1 : size / chunksize;
178 assert(atomic_read_z(&curchunks) >= nsub);
179 atomic_sub_z(&curchunks, nsub);
180 }
181 }
182
183 /*
184 * Do first-best-fit chunk selection, i.e. select the lowest chunk that best
185 * fits.
186 */
187 static extent_node_t *
chunk_first_best_fit(arena_t * arena,extent_tree_t * chunks_szad,extent_tree_t * chunks_ad,size_t size)188 chunk_first_best_fit(arena_t *arena, extent_tree_t *chunks_szad,
189 extent_tree_t *chunks_ad, size_t size)
190 {
191 extent_node_t key;
192
193 assert(size == CHUNK_CEILING(size));
194
195 extent_node_init(&key, arena, NULL, size, false, false);
196 return (extent_tree_szad_nsearch(chunks_szad, &key));
197 }
198
199 static void *
chunk_recycle(arena_t * arena,chunk_hooks_t * chunk_hooks,extent_tree_t * chunks_szad,extent_tree_t * chunks_ad,bool cache,void * new_addr,size_t size,size_t alignment,bool * zero,bool * commit,bool dalloc_node)200 chunk_recycle(arena_t *arena, chunk_hooks_t *chunk_hooks,
201 extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache,
202 void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit,
203 bool dalloc_node)
204 {
205 void *ret;
206 extent_node_t *node;
207 size_t alloc_size, leadsize, trailsize;
208 bool zeroed, committed;
209
210 assert(new_addr == NULL || alignment == chunksize);
211 /*
212 * Cached chunks use the node linkage embedded in their headers, in
213 * which case dalloc_node is true, and new_addr is non-NULL because
214 * we're operating on a specific chunk.
215 */
216 assert(dalloc_node || new_addr != NULL);
217
218 alloc_size = CHUNK_CEILING(s2u(size + alignment - chunksize));
219 /* Beware size_t wrap-around. */
220 if (alloc_size < size)
221 return (NULL);
222 malloc_mutex_lock(&arena->chunks_mtx);
223 chunk_hooks_assure_initialized_locked(arena, chunk_hooks);
224 if (new_addr != NULL) {
225 extent_node_t key;
226 extent_node_init(&key, arena, new_addr, alloc_size, false,
227 false);
228 node = extent_tree_ad_search(chunks_ad, &key);
229 } else {
230 node = chunk_first_best_fit(arena, chunks_szad, chunks_ad,
231 alloc_size);
232 }
233 if (node == NULL || (new_addr != NULL && extent_node_size_get(node) <
234 size)) {
235 malloc_mutex_unlock(&arena->chunks_mtx);
236 return (NULL);
237 }
238 leadsize = ALIGNMENT_CEILING((uintptr_t)extent_node_addr_get(node),
239 alignment) - (uintptr_t)extent_node_addr_get(node);
240 assert(new_addr == NULL || leadsize == 0);
241 assert(extent_node_size_get(node) >= leadsize + size);
242 trailsize = extent_node_size_get(node) - leadsize - size;
243 ret = (void *)((uintptr_t)extent_node_addr_get(node) + leadsize);
244 zeroed = extent_node_zeroed_get(node);
245 if (zeroed)
246 *zero = true;
247 committed = extent_node_committed_get(node);
248 if (committed)
249 *commit = true;
250 /* Split the lead. */
251 if (leadsize != 0 &&
252 chunk_hooks->split(extent_node_addr_get(node),
253 extent_node_size_get(node), leadsize, size, false, arena->ind)) {
254 malloc_mutex_unlock(&arena->chunks_mtx);
255 return (NULL);
256 }
257 /* Remove node from the tree. */
258 extent_tree_szad_remove(chunks_szad, node);
259 extent_tree_ad_remove(chunks_ad, node);
260 arena_chunk_cache_maybe_remove(arena, node, cache);
261 if (leadsize != 0) {
262 /* Insert the leading space as a smaller chunk. */
263 extent_node_size_set(node, leadsize);
264 extent_tree_szad_insert(chunks_szad, node);
265 extent_tree_ad_insert(chunks_ad, node);
266 arena_chunk_cache_maybe_insert(arena, node, cache);
267 node = NULL;
268 }
269 if (trailsize != 0) {
270 /* Split the trail. */
271 if (chunk_hooks->split(ret, size + trailsize, size,
272 trailsize, false, arena->ind)) {
273 if (dalloc_node && node != NULL)
274 arena_node_dalloc(arena, node);
275 malloc_mutex_unlock(&arena->chunks_mtx);
276 chunk_record(arena, chunk_hooks, chunks_szad, chunks_ad,
277 cache, ret, size + trailsize, zeroed, committed);
278 return (NULL);
279 }
280 /* Insert the trailing space as a smaller chunk. */
281 if (node == NULL) {
282 node = arena_node_alloc(arena);
283 if (node == NULL) {
284 malloc_mutex_unlock(&arena->chunks_mtx);
285 chunk_record(arena, chunk_hooks, chunks_szad,
286 chunks_ad, cache, ret, size + trailsize,
287 zeroed, committed);
288 return (NULL);
289 }
290 }
291 extent_node_init(node, arena, (void *)((uintptr_t)(ret) + size),
292 trailsize, zeroed, committed);
293 extent_tree_szad_insert(chunks_szad, node);
294 extent_tree_ad_insert(chunks_ad, node);
295 arena_chunk_cache_maybe_insert(arena, node, cache);
296 node = NULL;
297 }
298 if (!committed && chunk_hooks->commit(ret, size, 0, size, arena->ind)) {
299 malloc_mutex_unlock(&arena->chunks_mtx);
300 chunk_record(arena, chunk_hooks, chunks_szad, chunks_ad, cache,
301 ret, size, zeroed, committed);
302 return (NULL);
303 }
304 malloc_mutex_unlock(&arena->chunks_mtx);
305
306 assert(dalloc_node || node != NULL);
307 if (dalloc_node && node != NULL)
308 arena_node_dalloc(arena, node);
309 if (*zero) {
310 if (!zeroed)
311 memset(ret, 0, size);
312 else if (config_debug) {
313 size_t i;
314 size_t *p = (size_t *)(uintptr_t)ret;
315
316 JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret, size);
317 for (i = 0; i < size / sizeof(size_t); i++)
318 assert(p[i] == 0);
319 }
320 }
321 return (ret);
322 }
323
324 /*
325 * If the caller specifies (!*zero), it is still possible to receive zeroed
326 * memory, in which case *zero is toggled to true. arena_chunk_alloc() takes
327 * advantage of this to avoid demanding zeroed chunks, but taking advantage of
328 * them if they are returned.
329 */
330 static void *
chunk_alloc_core(arena_t * arena,void * new_addr,size_t size,size_t alignment,bool * zero,bool * commit,dss_prec_t dss_prec)331 chunk_alloc_core(arena_t *arena, void *new_addr, size_t size, size_t alignment,
332 bool *zero, bool *commit, dss_prec_t dss_prec)
333 {
334 void *ret;
335 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
336
337 assert(size != 0);
338 assert((size & chunksize_mask) == 0);
339 assert(alignment != 0);
340 assert((alignment & chunksize_mask) == 0);
341
342 /* Retained. */
343 if ((ret = chunk_recycle(arena, &chunk_hooks,
344 &arena->chunks_szad_retained, &arena->chunks_ad_retained, false,
345 new_addr, size, alignment, zero, commit, true)) != NULL)
346 return (ret);
347
348 /* "primary" dss. */
349 if (have_dss && dss_prec == dss_prec_primary && (ret =
350 chunk_alloc_dss(arena, new_addr, size, alignment, zero, commit)) !=
351 NULL)
352 return (ret);
353 /*
354 * mmap. Requesting an address is not implemented for
355 * chunk_alloc_mmap(), so only call it if (new_addr == NULL).
356 */
357 if (new_addr == NULL && (ret = chunk_alloc_mmap(size, alignment, zero,
358 commit)) != NULL)
359 return (ret);
360 /* "secondary" dss. */
361 if (have_dss && dss_prec == dss_prec_secondary && (ret =
362 chunk_alloc_dss(arena, new_addr, size, alignment, zero, commit)) !=
363 NULL)
364 return (ret);
365
366 /* All strategies for allocation failed. */
367 return (NULL);
368 }
369
370 void *
chunk_alloc_base(size_t size)371 chunk_alloc_base(size_t size)
372 {
373 void *ret;
374 bool zero, commit;
375
376 /*
377 * Directly call chunk_alloc_mmap() rather than chunk_alloc_core()
378 * because it's critical that chunk_alloc_base() return untouched
379 * demand-zeroed virtual memory.
380 */
381 zero = true;
382 commit = true;
383 ret = chunk_alloc_mmap(size, chunksize, &zero, &commit);
384 if (ret == NULL)
385 return (NULL);
386 if (config_valgrind)
387 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
388
389 return (ret);
390 }
391
392 void *
chunk_alloc_cache(arena_t * arena,chunk_hooks_t * chunk_hooks,void * new_addr,size_t size,size_t alignment,bool * zero,bool dalloc_node)393 chunk_alloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr,
394 size_t size, size_t alignment, bool *zero, bool dalloc_node)
395 {
396 void *ret;
397 bool commit;
398
399 assert(size != 0);
400 assert((size & chunksize_mask) == 0);
401 assert(alignment != 0);
402 assert((alignment & chunksize_mask) == 0);
403
404 commit = true;
405 ret = chunk_recycle(arena, chunk_hooks, &arena->chunks_szad_cached,
406 &arena->chunks_ad_cached, true, new_addr, size, alignment, zero,
407 &commit, dalloc_node);
408 if (ret == NULL)
409 return (NULL);
410 assert(commit);
411 if (config_valgrind)
412 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
413 return (ret);
414 }
415
416 static arena_t *
chunk_arena_get(unsigned arena_ind)417 chunk_arena_get(unsigned arena_ind)
418 {
419 arena_t *arena;
420
421 /* Dodge tsd for a0 in order to avoid bootstrapping issues. */
422 arena = (arena_ind == 0) ? a0get() : arena_get(tsd_fetch(), arena_ind,
423 false, true);
424 /*
425 * The arena we're allocating on behalf of must have been initialized
426 * already.
427 */
428 assert(arena != NULL);
429 return (arena);
430 }
431
432 static void *
chunk_alloc_default(void * new_addr,size_t size,size_t alignment,bool * zero,bool * commit,unsigned arena_ind)433 chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero,
434 bool *commit, unsigned arena_ind)
435 {
436 void *ret;
437 arena_t *arena;
438
439 arena = chunk_arena_get(arena_ind);
440 ret = chunk_alloc_core(arena, new_addr, size, alignment, zero,
441 commit, arena->dss_prec);
442 if (ret == NULL)
443 return (NULL);
444 if (config_valgrind)
445 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
446
447 return (ret);
448 }
449
450 void *
chunk_alloc_wrapper(arena_t * arena,chunk_hooks_t * chunk_hooks,void * new_addr,size_t size,size_t alignment,bool * zero,bool * commit)451 chunk_alloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr,
452 size_t size, size_t alignment, bool *zero, bool *commit)
453 {
454 void *ret;
455
456 chunk_hooks_assure_initialized(arena, chunk_hooks);
457 ret = chunk_hooks->alloc(new_addr, size, alignment, zero, commit,
458 arena->ind);
459 if (ret == NULL)
460 return (NULL);
461 if (config_valgrind && chunk_hooks->alloc != chunk_alloc_default)
462 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, chunksize);
463 return (ret);
464 }
465
466 static void
chunk_record(arena_t * arena,chunk_hooks_t * chunk_hooks,extent_tree_t * chunks_szad,extent_tree_t * chunks_ad,bool cache,void * chunk,size_t size,bool zeroed,bool committed)467 chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks,
468 extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache,
469 void *chunk, size_t size, bool zeroed, bool committed)
470 {
471 bool unzeroed;
472 extent_node_t *node, *prev;
473 extent_node_t key;
474
475 assert(!cache || !zeroed);
476 unzeroed = cache || !zeroed;
477 JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size);
478
479 malloc_mutex_lock(&arena->chunks_mtx);
480 chunk_hooks_assure_initialized_locked(arena, chunk_hooks);
481 extent_node_init(&key, arena, (void *)((uintptr_t)chunk + size), 0,
482 false, false);
483 node = extent_tree_ad_nsearch(chunks_ad, &key);
484 /* Try to coalesce forward. */
485 if (node != NULL && extent_node_addr_get(node) ==
486 extent_node_addr_get(&key) && extent_node_committed_get(node) ==
487 committed && !chunk_hooks->merge(chunk, size,
488 extent_node_addr_get(node), extent_node_size_get(node), false,
489 arena->ind)) {
490 /*
491 * Coalesce chunk with the following address range. This does
492 * not change the position within chunks_ad, so only
493 * remove/insert from/into chunks_szad.
494 */
495 extent_tree_szad_remove(chunks_szad, node);
496 arena_chunk_cache_maybe_remove(arena, node, cache);
497 extent_node_addr_set(node, chunk);
498 extent_node_size_set(node, size + extent_node_size_get(node));
499 extent_node_zeroed_set(node, extent_node_zeroed_get(node) &&
500 !unzeroed);
501 extent_tree_szad_insert(chunks_szad, node);
502 arena_chunk_cache_maybe_insert(arena, node, cache);
503 } else {
504 /* Coalescing forward failed, so insert a new node. */
505 node = arena_node_alloc(arena);
506 if (node == NULL) {
507 /*
508 * Node allocation failed, which is an exceedingly
509 * unlikely failure. Leak chunk after making sure its
510 * pages have already been purged, so that this is only
511 * a virtual memory leak.
512 */
513 if (cache) {
514 chunk_purge_wrapper(arena, chunk_hooks, chunk,
515 size, 0, size);
516 }
517 goto label_return;
518 }
519 extent_node_init(node, arena, chunk, size, !unzeroed,
520 committed);
521 extent_tree_ad_insert(chunks_ad, node);
522 extent_tree_szad_insert(chunks_szad, node);
523 arena_chunk_cache_maybe_insert(arena, node, cache);
524 }
525
526 /* Try to coalesce backward. */
527 prev = extent_tree_ad_prev(chunks_ad, node);
528 if (prev != NULL && (void *)((uintptr_t)extent_node_addr_get(prev) +
529 extent_node_size_get(prev)) == chunk &&
530 extent_node_committed_get(prev) == committed &&
531 !chunk_hooks->merge(extent_node_addr_get(prev),
532 extent_node_size_get(prev), chunk, size, false, arena->ind)) {
533 /*
534 * Coalesce chunk with the previous address range. This does
535 * not change the position within chunks_ad, so only
536 * remove/insert node from/into chunks_szad.
537 */
538 extent_tree_szad_remove(chunks_szad, prev);
539 extent_tree_ad_remove(chunks_ad, prev);
540 arena_chunk_cache_maybe_remove(arena, prev, cache);
541 extent_tree_szad_remove(chunks_szad, node);
542 arena_chunk_cache_maybe_remove(arena, node, cache);
543 extent_node_addr_set(node, extent_node_addr_get(prev));
544 extent_node_size_set(node, extent_node_size_get(prev) +
545 extent_node_size_get(node));
546 extent_node_zeroed_set(node, extent_node_zeroed_get(prev) &&
547 extent_node_zeroed_get(node));
548 extent_tree_szad_insert(chunks_szad, node);
549 arena_chunk_cache_maybe_insert(arena, node, cache);
550
551 arena_node_dalloc(arena, prev);
552 }
553
554 label_return:
555 malloc_mutex_unlock(&arena->chunks_mtx);
556 }
557
558 void
chunk_dalloc_cache(arena_t * arena,chunk_hooks_t * chunk_hooks,void * chunk,size_t size,bool committed)559 chunk_dalloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
560 size_t size, bool committed)
561 {
562
563 assert(chunk != NULL);
564 assert(CHUNK_ADDR2BASE(chunk) == chunk);
565 assert(size != 0);
566 assert((size & chunksize_mask) == 0);
567
568 chunk_record(arena, chunk_hooks, &arena->chunks_szad_cached,
569 &arena->chunks_ad_cached, true, chunk, size, false, committed);
570 arena_maybe_purge(arena);
571 }
572
573 void
chunk_dalloc_arena(arena_t * arena,chunk_hooks_t * chunk_hooks,void * chunk,size_t size,bool zeroed,bool committed)574 chunk_dalloc_arena(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
575 size_t size, bool zeroed, bool committed)
576 {
577
578 assert(chunk != NULL);
579 assert(CHUNK_ADDR2BASE(chunk) == chunk);
580 assert(size != 0);
581 assert((size & chunksize_mask) == 0);
582
583 chunk_hooks_assure_initialized(arena, chunk_hooks);
584 /* Try to deallocate. */
585 if (!chunk_hooks->dalloc(chunk, size, committed, arena->ind))
586 return;
587 /* Try to decommit; purge if that fails. */
588 if (committed) {
589 committed = chunk_hooks->decommit(chunk, size, 0, size,
590 arena->ind);
591 }
592 zeroed = !committed || !chunk_hooks->purge(chunk, size, 0, size,
593 arena->ind);
594 chunk_record(arena, chunk_hooks, &arena->chunks_szad_retained,
595 &arena->chunks_ad_retained, false, chunk, size, zeroed, committed);
596 }
597
598 static bool
chunk_dalloc_default(void * chunk,size_t size,bool committed,unsigned arena_ind)599 chunk_dalloc_default(void *chunk, size_t size, bool committed,
600 unsigned arena_ind)
601 {
602
603 if (!have_dss || !chunk_in_dss(chunk))
604 return (chunk_dalloc_mmap(chunk, size));
605 return (true);
606 }
607
608 void
chunk_dalloc_wrapper(arena_t * arena,chunk_hooks_t * chunk_hooks,void * chunk,size_t size,bool committed)609 chunk_dalloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
610 size_t size, bool committed)
611 {
612
613 chunk_hooks_assure_initialized(arena, chunk_hooks);
614 chunk_hooks->dalloc(chunk, size, committed, arena->ind);
615 if (config_valgrind && chunk_hooks->dalloc != chunk_dalloc_default)
616 JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size);
617 }
618
619 static bool
chunk_commit_default(void * chunk,size_t size,size_t offset,size_t length,unsigned arena_ind)620 chunk_commit_default(void *chunk, size_t size, size_t offset, size_t length,
621 unsigned arena_ind)
622 {
623
624 return (pages_commit((void *)((uintptr_t)chunk + (uintptr_t)offset),
625 length));
626 }
627
628 static bool
chunk_decommit_default(void * chunk,size_t size,size_t offset,size_t length,unsigned arena_ind)629 chunk_decommit_default(void *chunk, size_t size, size_t offset, size_t length,
630 unsigned arena_ind)
631 {
632
633 return (pages_decommit((void *)((uintptr_t)chunk + (uintptr_t)offset),
634 length));
635 }
636
637 bool
chunk_purge_arena(arena_t * arena,void * chunk,size_t offset,size_t length)638 chunk_purge_arena(arena_t *arena, void *chunk, size_t offset, size_t length)
639 {
640
641 assert(chunk != NULL);
642 assert(CHUNK_ADDR2BASE(chunk) == chunk);
643 assert((offset & PAGE_MASK) == 0);
644 assert(length != 0);
645 assert((length & PAGE_MASK) == 0);
646
647 return (pages_purge((void *)((uintptr_t)chunk + (uintptr_t)offset),
648 length));
649 }
650
651 static bool
chunk_purge_default(void * chunk,size_t size,size_t offset,size_t length,unsigned arena_ind)652 chunk_purge_default(void *chunk, size_t size, size_t offset, size_t length,
653 unsigned arena_ind)
654 {
655
656 return (chunk_purge_arena(chunk_arena_get(arena_ind), chunk, offset,
657 length));
658 }
659
660 bool
chunk_purge_wrapper(arena_t * arena,chunk_hooks_t * chunk_hooks,void * chunk,size_t size,size_t offset,size_t length)661 chunk_purge_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
662 size_t size, size_t offset, size_t length)
663 {
664
665 chunk_hooks_assure_initialized(arena, chunk_hooks);
666 return (chunk_hooks->purge(chunk, size, offset, length, arena->ind));
667 }
668
669 static bool
chunk_split_default(void * chunk,size_t size,size_t size_a,size_t size_b,bool committed,unsigned arena_ind)670 chunk_split_default(void *chunk, size_t size, size_t size_a, size_t size_b,
671 bool committed, unsigned arena_ind)
672 {
673
674 if (!maps_coalesce)
675 return (true);
676 return (false);
677 }
678
679 static bool
chunk_merge_default(void * chunk_a,size_t size_a,void * chunk_b,size_t size_b,bool committed,unsigned arena_ind)680 chunk_merge_default(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b,
681 bool committed, unsigned arena_ind)
682 {
683
684 if (!maps_coalesce)
685 return (true);
686 if (have_dss && chunk_in_dss(chunk_a) != chunk_in_dss(chunk_b))
687 return (true);
688
689 return (false);
690 }
691
692 static rtree_node_elm_t *
chunks_rtree_node_alloc(size_t nelms)693 chunks_rtree_node_alloc(size_t nelms)
694 {
695
696 return ((rtree_node_elm_t *)base_alloc(nelms *
697 sizeof(rtree_node_elm_t)));
698 }
699
700 bool
chunk_boot(void)701 chunk_boot(void)
702 {
703 #ifdef _WIN32
704 SYSTEM_INFO info;
705 GetSystemInfo(&info);
706
707 /*
708 * Verify actual page size is equal to or an integral multiple of
709 * configured page size.
710 */
711 if (info.dwPageSize & ((1U << LG_PAGE) - 1))
712 return (true);
713
714 /*
715 * Configure chunksize (if not set) to match granularity (usually 64K),
716 * so pages_map will always take fast path.
717 */
718 if (!opt_lg_chunk) {
719 opt_lg_chunk = jemalloc_ffs((int)info.dwAllocationGranularity)
720 - 1;
721 }
722 #else
723 if (!opt_lg_chunk)
724 opt_lg_chunk = LG_CHUNK_DEFAULT;
725 #endif
726
727 /* Set variables according to the value of opt_lg_chunk. */
728 chunksize = (ZU(1) << opt_lg_chunk);
729 assert(chunksize >= PAGE);
730 chunksize_mask = chunksize - 1;
731 chunk_npages = (chunksize >> LG_PAGE);
732
733 if (have_dss && chunk_dss_boot())
734 return (true);
735 if (rtree_new(&chunks_rtree, (ZU(1) << (LG_SIZEOF_PTR+3)) -
736 opt_lg_chunk, chunks_rtree_node_alloc, NULL))
737 return (true);
738
739 return (false);
740 }
741
742 void
chunk_prefork(void)743 chunk_prefork(void)
744 {
745
746 chunk_dss_prefork();
747 }
748
749 void
chunk_postfork_parent(void)750 chunk_postfork_parent(void)
751 {
752
753 chunk_dss_postfork_parent();
754 }
755
756 void
chunk_postfork_child(void)757 chunk_postfork_child(void)
758 {
759
760 chunk_dss_postfork_child();
761 }
762