xref: /redis-3.2.3/deps/jemalloc/src/huge.c (revision 5268379e)
1 #define	JEMALLOC_HUGE_C_
2 #include "jemalloc/internal/jemalloc_internal.h"
3 
4 /******************************************************************************/
5 
6 static extent_node_t *
huge_node_get(const void * ptr)7 huge_node_get(const void *ptr)
8 {
9 	extent_node_t *node;
10 
11 	node = chunk_lookup(ptr, true);
12 	assert(!extent_node_achunk_get(node));
13 
14 	return (node);
15 }
16 
17 static bool
huge_node_set(const void * ptr,extent_node_t * node)18 huge_node_set(const void *ptr, extent_node_t *node)
19 {
20 
21 	assert(extent_node_addr_get(node) == ptr);
22 	assert(!extent_node_achunk_get(node));
23 	return (chunk_register(ptr, node));
24 }
25 
26 static void
huge_node_unset(const void * ptr,const extent_node_t * node)27 huge_node_unset(const void *ptr, const extent_node_t *node)
28 {
29 
30 	chunk_deregister(ptr, node);
31 }
32 
33 void *
huge_malloc(tsd_t * tsd,arena_t * arena,size_t size,bool zero,tcache_t * tcache)34 huge_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero,
35     tcache_t *tcache)
36 {
37 	size_t usize;
38 
39 	usize = s2u(size);
40 	if (usize == 0) {
41 		/* size_t overflow. */
42 		return (NULL);
43 	}
44 
45 	return (huge_palloc(tsd, arena, usize, chunksize, zero, tcache));
46 }
47 
48 void *
huge_palloc(tsd_t * tsd,arena_t * arena,size_t size,size_t alignment,bool zero,tcache_t * tcache)49 huge_palloc(tsd_t *tsd, arena_t *arena, size_t size, size_t alignment,
50     bool zero, tcache_t *tcache)
51 {
52 	void *ret;
53 	size_t usize;
54 	extent_node_t *node;
55 	bool is_zeroed;
56 
57 	/* Allocate one or more contiguous chunks for this request. */
58 
59 	usize = sa2u(size, alignment);
60 	if (unlikely(usize == 0))
61 		return (NULL);
62 	assert(usize >= chunksize);
63 
64 	/* Allocate an extent node with which to track the chunk. */
65 	node = ipallocztm(tsd, CACHELINE_CEILING(sizeof(extent_node_t)),
66 	    CACHELINE, false, tcache, true, arena);
67 	if (node == NULL)
68 		return (NULL);
69 
70 	/*
71 	 * Copy zero into is_zeroed and pass the copy to chunk_alloc(), so that
72 	 * it is possible to make correct junk/zero fill decisions below.
73 	 */
74 	is_zeroed = zero;
75 	arena = arena_choose(tsd, arena);
76 	if (unlikely(arena == NULL) || (ret = arena_chunk_alloc_huge(arena,
77 	    size, alignment, &is_zeroed)) == NULL) {
78 		idalloctm(tsd, node, tcache, true);
79 		return (NULL);
80 	}
81 
82 	extent_node_init(node, arena, ret, size, is_zeroed, true);
83 
84 	if (huge_node_set(ret, node)) {
85 		arena_chunk_dalloc_huge(arena, ret, size);
86 		idalloctm(tsd, node, tcache, true);
87 		return (NULL);
88 	}
89 
90 	/* Insert node into huge. */
91 	malloc_mutex_lock(&arena->huge_mtx);
92 	ql_elm_new(node, ql_link);
93 	ql_tail_insert(&arena->huge, node, ql_link);
94 	malloc_mutex_unlock(&arena->huge_mtx);
95 
96 	if (zero || (config_fill && unlikely(opt_zero))) {
97 		if (!is_zeroed)
98 			memset(ret, 0, size);
99 	} else if (config_fill && unlikely(opt_junk_alloc))
100 		memset(ret, 0xa5, size);
101 
102 	return (ret);
103 }
104 
105 #ifdef JEMALLOC_JET
106 #undef huge_dalloc_junk
107 #define	huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk_impl)
108 #endif
109 static void
huge_dalloc_junk(void * ptr,size_t usize)110 huge_dalloc_junk(void *ptr, size_t usize)
111 {
112 
113 	if (config_fill && have_dss && unlikely(opt_junk_free)) {
114 		/*
115 		 * Only bother junk filling if the chunk isn't about to be
116 		 * unmapped.
117 		 */
118 		if (!config_munmap || (have_dss && chunk_in_dss(ptr)))
119 			memset(ptr, 0x5a, usize);
120 	}
121 }
122 #ifdef JEMALLOC_JET
123 #undef huge_dalloc_junk
124 #define	huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk)
125 huge_dalloc_junk_t *huge_dalloc_junk = JEMALLOC_N(huge_dalloc_junk_impl);
126 #endif
127 
128 static void
huge_ralloc_no_move_similar(void * ptr,size_t oldsize,size_t usize_min,size_t usize_max,bool zero)129 huge_ralloc_no_move_similar(void *ptr, size_t oldsize, size_t usize_min,
130     size_t usize_max, bool zero)
131 {
132 	size_t usize, usize_next;
133 	extent_node_t *node;
134 	arena_t *arena;
135 	chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
136 	bool pre_zeroed, post_zeroed;
137 
138 	/* Increase usize to incorporate extra. */
139 	for (usize = usize_min; usize < usize_max && (usize_next = s2u(usize+1))
140 	    <= oldsize; usize = usize_next)
141 		; /* Do nothing. */
142 
143 	if (oldsize == usize)
144 		return;
145 
146 	node = huge_node_get(ptr);
147 	arena = extent_node_arena_get(node);
148 	pre_zeroed = extent_node_zeroed_get(node);
149 
150 	/* Fill if necessary (shrinking). */
151 	if (oldsize > usize) {
152 		size_t sdiff = oldsize - usize;
153 		if (config_fill && unlikely(opt_junk_free)) {
154 			memset((void *)((uintptr_t)ptr + usize), 0x5a, sdiff);
155 			post_zeroed = false;
156 		} else {
157 			post_zeroed = !chunk_purge_wrapper(arena, &chunk_hooks,
158 			    ptr, CHUNK_CEILING(oldsize), usize, sdiff);
159 		}
160 	} else
161 		post_zeroed = pre_zeroed;
162 
163 	malloc_mutex_lock(&arena->huge_mtx);
164 	/* Update the size of the huge allocation. */
165 	assert(extent_node_size_get(node) != usize);
166 	extent_node_size_set(node, usize);
167 	/* Update zeroed. */
168 	extent_node_zeroed_set(node, post_zeroed);
169 	malloc_mutex_unlock(&arena->huge_mtx);
170 
171 	arena_chunk_ralloc_huge_similar(arena, ptr, oldsize, usize);
172 
173 	/* Fill if necessary (growing). */
174 	if (oldsize < usize) {
175 		if (zero || (config_fill && unlikely(opt_zero))) {
176 			if (!pre_zeroed) {
177 				memset((void *)((uintptr_t)ptr + oldsize), 0,
178 				    usize - oldsize);
179 			}
180 		} else if (config_fill && unlikely(opt_junk_alloc)) {
181 			memset((void *)((uintptr_t)ptr + oldsize), 0xa5, usize -
182 			    oldsize);
183 		}
184 	}
185 }
186 
187 static bool
huge_ralloc_no_move_shrink(void * ptr,size_t oldsize,size_t usize)188 huge_ralloc_no_move_shrink(void *ptr, size_t oldsize, size_t usize)
189 {
190 	extent_node_t *node;
191 	arena_t *arena;
192 	chunk_hooks_t chunk_hooks;
193 	size_t cdiff;
194 	bool pre_zeroed, post_zeroed;
195 
196 	node = huge_node_get(ptr);
197 	arena = extent_node_arena_get(node);
198 	pre_zeroed = extent_node_zeroed_get(node);
199 	chunk_hooks = chunk_hooks_get(arena);
200 
201 	assert(oldsize > usize);
202 
203 	/* Split excess chunks. */
204 	cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize);
205 	if (cdiff != 0 && chunk_hooks.split(ptr, CHUNK_CEILING(oldsize),
206 	    CHUNK_CEILING(usize), cdiff, true, arena->ind))
207 		return (true);
208 
209 	if (oldsize > usize) {
210 		size_t sdiff = oldsize - usize;
211 		if (config_fill && unlikely(opt_junk_free)) {
212 			huge_dalloc_junk((void *)((uintptr_t)ptr + usize),
213 			    sdiff);
214 			post_zeroed = false;
215 		} else {
216 			post_zeroed = !chunk_purge_wrapper(arena, &chunk_hooks,
217 			    CHUNK_ADDR2BASE((uintptr_t)ptr + usize),
218 			    CHUNK_CEILING(oldsize),
219 			    CHUNK_ADDR2OFFSET((uintptr_t)ptr + usize), sdiff);
220 		}
221 	} else
222 		post_zeroed = pre_zeroed;
223 
224 	malloc_mutex_lock(&arena->huge_mtx);
225 	/* Update the size of the huge allocation. */
226 	extent_node_size_set(node, usize);
227 	/* Update zeroed. */
228 	extent_node_zeroed_set(node, post_zeroed);
229 	malloc_mutex_unlock(&arena->huge_mtx);
230 
231 	/* Zap the excess chunks. */
232 	arena_chunk_ralloc_huge_shrink(arena, ptr, oldsize, usize);
233 
234 	return (false);
235 }
236 
237 static bool
huge_ralloc_no_move_expand(void * ptr,size_t oldsize,size_t usize,bool zero)238 huge_ralloc_no_move_expand(void *ptr, size_t oldsize, size_t usize, bool zero) {
239 	extent_node_t *node;
240 	arena_t *arena;
241 	bool is_zeroed_subchunk, is_zeroed_chunk;
242 
243 	node = huge_node_get(ptr);
244 	arena = extent_node_arena_get(node);
245 	malloc_mutex_lock(&arena->huge_mtx);
246 	is_zeroed_subchunk = extent_node_zeroed_get(node);
247 	malloc_mutex_unlock(&arena->huge_mtx);
248 
249 	/*
250 	 * Copy zero into is_zeroed_chunk and pass the copy to chunk_alloc(), so
251 	 * that it is possible to make correct junk/zero fill decisions below.
252 	 */
253 	is_zeroed_chunk = zero;
254 
255 	if (arena_chunk_ralloc_huge_expand(arena, ptr, oldsize, usize,
256 	     &is_zeroed_chunk))
257 		return (true);
258 
259 	malloc_mutex_lock(&arena->huge_mtx);
260 	/* Update the size of the huge allocation. */
261 	extent_node_size_set(node, usize);
262 	malloc_mutex_unlock(&arena->huge_mtx);
263 
264 	if (zero || (config_fill && unlikely(opt_zero))) {
265 		if (!is_zeroed_subchunk) {
266 			memset((void *)((uintptr_t)ptr + oldsize), 0,
267 			    CHUNK_CEILING(oldsize) - oldsize);
268 		}
269 		if (!is_zeroed_chunk) {
270 			memset((void *)((uintptr_t)ptr +
271 			    CHUNK_CEILING(oldsize)), 0, usize -
272 			    CHUNK_CEILING(oldsize));
273 		}
274 	} else if (config_fill && unlikely(opt_junk_alloc)) {
275 		memset((void *)((uintptr_t)ptr + oldsize), 0xa5, usize -
276 		    oldsize);
277 	}
278 
279 	return (false);
280 }
281 
282 bool
huge_ralloc_no_move(void * ptr,size_t oldsize,size_t usize_min,size_t usize_max,bool zero)283 huge_ralloc_no_move(void *ptr, size_t oldsize, size_t usize_min,
284     size_t usize_max, bool zero)
285 {
286 
287 	assert(s2u(oldsize) == oldsize);
288 
289 	/* Both allocations must be huge to avoid a move. */
290 	if (oldsize < chunksize || usize_max < chunksize)
291 		return (true);
292 
293 	if (CHUNK_CEILING(usize_max) > CHUNK_CEILING(oldsize)) {
294 		/* Attempt to expand the allocation in-place. */
295 		if (!huge_ralloc_no_move_expand(ptr, oldsize, usize_max, zero))
296 			return (false);
297 		/* Try again, this time with usize_min. */
298 		if (usize_min < usize_max && CHUNK_CEILING(usize_min) >
299 		    CHUNK_CEILING(oldsize) && huge_ralloc_no_move_expand(ptr,
300 		    oldsize, usize_min, zero))
301 			return (false);
302 	}
303 
304 	/*
305 	 * Avoid moving the allocation if the existing chunk size accommodates
306 	 * the new size.
307 	 */
308 	if (CHUNK_CEILING(oldsize) >= CHUNK_CEILING(usize_min)
309 	    && CHUNK_CEILING(oldsize) <= CHUNK_CEILING(usize_max)) {
310 		huge_ralloc_no_move_similar(ptr, oldsize, usize_min, usize_max,
311 		    zero);
312 		return (false);
313 	}
314 
315 	/* Attempt to shrink the allocation in-place. */
316 	if (CHUNK_CEILING(oldsize) > CHUNK_CEILING(usize_max))
317 		return (huge_ralloc_no_move_shrink(ptr, oldsize, usize_max));
318 	return (true);
319 }
320 
321 static void *
huge_ralloc_move_helper(tsd_t * tsd,arena_t * arena,size_t usize,size_t alignment,bool zero,tcache_t * tcache)322 huge_ralloc_move_helper(tsd_t *tsd, arena_t *arena, size_t usize,
323     size_t alignment, bool zero, tcache_t *tcache)
324 {
325 
326 	if (alignment <= chunksize)
327 		return (huge_malloc(tsd, arena, usize, zero, tcache));
328 	return (huge_palloc(tsd, arena, usize, alignment, zero, tcache));
329 }
330 
331 void *
huge_ralloc(tsd_t * tsd,arena_t * arena,void * ptr,size_t oldsize,size_t usize,size_t alignment,bool zero,tcache_t * tcache)332 huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t usize,
333     size_t alignment, bool zero, tcache_t *tcache)
334 {
335 	void *ret;
336 	size_t copysize;
337 
338 	/* Try to avoid moving the allocation. */
339 	if (!huge_ralloc_no_move(ptr, oldsize, usize, usize, zero))
340 		return (ptr);
341 
342 	/*
343 	 * usize and oldsize are different enough that we need to use a
344 	 * different size class.  In that case, fall back to allocating new
345 	 * space and copying.
346 	 */
347 	ret = huge_ralloc_move_helper(tsd, arena, usize, alignment, zero,
348 	    tcache);
349 	if (ret == NULL)
350 		return (NULL);
351 
352 	copysize = (usize < oldsize) ? usize : oldsize;
353 	memcpy(ret, ptr, copysize);
354 	isqalloc(tsd, ptr, oldsize, tcache);
355 	return (ret);
356 }
357 
358 void
huge_dalloc(tsd_t * tsd,void * ptr,tcache_t * tcache)359 huge_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache)
360 {
361 	extent_node_t *node;
362 	arena_t *arena;
363 
364 	node = huge_node_get(ptr);
365 	arena = extent_node_arena_get(node);
366 	huge_node_unset(ptr, node);
367 	malloc_mutex_lock(&arena->huge_mtx);
368 	ql_remove(&arena->huge, node, ql_link);
369 	malloc_mutex_unlock(&arena->huge_mtx);
370 
371 	huge_dalloc_junk(extent_node_addr_get(node),
372 	    extent_node_size_get(node));
373 	arena_chunk_dalloc_huge(extent_node_arena_get(node),
374 	    extent_node_addr_get(node), extent_node_size_get(node));
375 	idalloctm(tsd, node, tcache, true);
376 }
377 
378 arena_t *
huge_aalloc(const void * ptr)379 huge_aalloc(const void *ptr)
380 {
381 
382 	return (extent_node_arena_get(huge_node_get(ptr)));
383 }
384 
385 size_t
huge_salloc(const void * ptr)386 huge_salloc(const void *ptr)
387 {
388 	size_t size;
389 	extent_node_t *node;
390 	arena_t *arena;
391 
392 	node = huge_node_get(ptr);
393 	arena = extent_node_arena_get(node);
394 	malloc_mutex_lock(&arena->huge_mtx);
395 	size = extent_node_size_get(node);
396 	malloc_mutex_unlock(&arena->huge_mtx);
397 
398 	return (size);
399 }
400 
401 prof_tctx_t *
huge_prof_tctx_get(const void * ptr)402 huge_prof_tctx_get(const void *ptr)
403 {
404 	prof_tctx_t *tctx;
405 	extent_node_t *node;
406 	arena_t *arena;
407 
408 	node = huge_node_get(ptr);
409 	arena = extent_node_arena_get(node);
410 	malloc_mutex_lock(&arena->huge_mtx);
411 	tctx = extent_node_prof_tctx_get(node);
412 	malloc_mutex_unlock(&arena->huge_mtx);
413 
414 	return (tctx);
415 }
416 
417 void
huge_prof_tctx_set(const void * ptr,prof_tctx_t * tctx)418 huge_prof_tctx_set(const void *ptr, prof_tctx_t *tctx)
419 {
420 	extent_node_t *node;
421 	arena_t *arena;
422 
423 	node = huge_node_get(ptr);
424 	arena = extent_node_arena_get(node);
425 	malloc_mutex_lock(&arena->huge_mtx);
426 	extent_node_prof_tctx_set(node, tctx);
427 	malloc_mutex_unlock(&arena->huge_mtx);
428 }
429 
430 void
huge_prof_tctx_reset(const void * ptr)431 huge_prof_tctx_reset(const void *ptr)
432 {
433 
434 	huge_prof_tctx_set(ptr, (prof_tctx_t *)(uintptr_t)1U);
435 }
436