xref: /linux-6.15/include/linux/slab.h (revision 04518e4c)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Written by Mark Hemment, 1996 ([email protected]).
4  *
5  * (C) SGI 2006, Christoph Lameter
6  * 	Cleaned up and restructured to ease the addition of alternative
7  * 	implementations of SLAB allocators.
8  * (C) Linux Foundation 2008-2013
9  *      Unified interface for all slab allocators
10  */
11 
12 #ifndef _LINUX_SLAB_H
13 #define	_LINUX_SLAB_H
14 
15 #include <linux/gfp.h>
16 #include <linux/overflow.h>
17 #include <linux/types.h>
18 #include <linux/workqueue.h>
19 #include <linux/percpu-refcount.h>
20 
21 
22 /*
23  * Flags to pass to kmem_cache_create().
24  * The ones marked DEBUG are only valid if CONFIG_DEBUG_SLAB is set.
25  */
26 /* DEBUG: Perform (expensive) checks on alloc/free */
27 #define SLAB_CONSISTENCY_CHECKS	((slab_flags_t __force)0x00000100U)
28 /* DEBUG: Red zone objs in a cache */
29 #define SLAB_RED_ZONE		((slab_flags_t __force)0x00000400U)
30 /* DEBUG: Poison objects */
31 #define SLAB_POISON		((slab_flags_t __force)0x00000800U)
32 /* Indicate a kmalloc slab */
33 #define SLAB_KMALLOC		((slab_flags_t __force)0x00001000U)
34 /* Align objs on cache lines */
35 #define SLAB_HWCACHE_ALIGN	((slab_flags_t __force)0x00002000U)
36 /* Use GFP_DMA memory */
37 #define SLAB_CACHE_DMA		((slab_flags_t __force)0x00004000U)
38 /* Use GFP_DMA32 memory */
39 #define SLAB_CACHE_DMA32	((slab_flags_t __force)0x00008000U)
40 /* DEBUG: Store the last owner for bug hunting */
41 #define SLAB_STORE_USER		((slab_flags_t __force)0x00010000U)
42 /* Panic if kmem_cache_create() fails */
43 #define SLAB_PANIC		((slab_flags_t __force)0x00040000U)
44 /*
45  * SLAB_TYPESAFE_BY_RCU - **WARNING** READ THIS!
46  *
47  * This delays freeing the SLAB page by a grace period, it does _NOT_
48  * delay object freeing. This means that if you do kmem_cache_free()
49  * that memory location is free to be reused at any time. Thus it may
50  * be possible to see another object there in the same RCU grace period.
51  *
52  * This feature only ensures the memory location backing the object
53  * stays valid, the trick to using this is relying on an independent
54  * object validation pass. Something like:
55  *
56  *  rcu_read_lock()
57  * again:
58  *  obj = lockless_lookup(key);
59  *  if (obj) {
60  *    if (!try_get_ref(obj)) // might fail for free objects
61  *      goto again;
62  *
63  *    if (obj->key != key) { // not the object we expected
64  *      put_ref(obj);
65  *      goto again;
66  *    }
67  *  }
68  *  rcu_read_unlock();
69  *
70  * This is useful if we need to approach a kernel structure obliquely,
71  * from its address obtained without the usual locking. We can lock
72  * the structure to stabilize it and check it's still at the given address,
73  * only if we can be sure that the memory has not been meanwhile reused
74  * for some other kind of object (which our subsystem's lock might corrupt).
75  *
76  * rcu_read_lock before reading the address, then rcu_read_unlock after
77  * taking the spinlock within the structure expected at that address.
78  *
79  * Note that SLAB_TYPESAFE_BY_RCU was originally named SLAB_DESTROY_BY_RCU.
80  */
81 /* Defer freeing slabs to RCU */
82 #define SLAB_TYPESAFE_BY_RCU	((slab_flags_t __force)0x00080000U)
83 /* Spread some memory over cpuset */
84 #define SLAB_MEM_SPREAD		((slab_flags_t __force)0x00100000U)
85 /* Trace allocations and frees */
86 #define SLAB_TRACE		((slab_flags_t __force)0x00200000U)
87 
88 /* Flag to prevent checks on free */
89 #ifdef CONFIG_DEBUG_OBJECTS
90 # define SLAB_DEBUG_OBJECTS	((slab_flags_t __force)0x00400000U)
91 #else
92 # define SLAB_DEBUG_OBJECTS	0
93 #endif
94 
95 /* Avoid kmemleak tracing */
96 #define SLAB_NOLEAKTRACE	((slab_flags_t __force)0x00800000U)
97 
98 /* Fault injection mark */
99 #ifdef CONFIG_FAILSLAB
100 # define SLAB_FAILSLAB		((slab_flags_t __force)0x02000000U)
101 #else
102 # define SLAB_FAILSLAB		0
103 #endif
104 /* Account to memcg */
105 #ifdef CONFIG_MEMCG_KMEM
106 # define SLAB_ACCOUNT		((slab_flags_t __force)0x04000000U)
107 #else
108 # define SLAB_ACCOUNT		0
109 #endif
110 
111 #ifdef CONFIG_KASAN
112 #define SLAB_KASAN		((slab_flags_t __force)0x08000000U)
113 #else
114 #define SLAB_KASAN		0
115 #endif
116 
117 /*
118  * Ignore user specified debugging flags.
119  * Intended for caches created for self-tests so they have only flags
120  * specified in the code and other flags are ignored.
121  */
122 #define SLAB_NO_USER_FLAGS	((slab_flags_t __force)0x10000000U)
123 
124 /* The following flags affect the page allocator grouping pages by mobility */
125 /* Objects are reclaimable */
126 #define SLAB_RECLAIM_ACCOUNT	((slab_flags_t __force)0x00020000U)
127 #define SLAB_TEMPORARY		SLAB_RECLAIM_ACCOUNT	/* Objects are short-lived */
128 
129 /*
130  * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
131  *
132  * Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault.
133  *
134  * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
135  * Both make kfree a no-op.
136  */
137 #define ZERO_SIZE_PTR ((void *)16)
138 
139 #define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
140 				(unsigned long)ZERO_SIZE_PTR)
141 
142 #include <linux/kasan.h>
143 
144 struct list_lru;
145 struct mem_cgroup;
146 /*
147  * struct kmem_cache related prototypes
148  */
149 void __init kmem_cache_init(void);
150 bool slab_is_available(void);
151 
152 struct kmem_cache *kmem_cache_create(const char *name, unsigned int size,
153 			unsigned int align, slab_flags_t flags,
154 			void (*ctor)(void *));
155 struct kmem_cache *kmem_cache_create_usercopy(const char *name,
156 			unsigned int size, unsigned int align,
157 			slab_flags_t flags,
158 			unsigned int useroffset, unsigned int usersize,
159 			void (*ctor)(void *));
160 void kmem_cache_destroy(struct kmem_cache *s);
161 int kmem_cache_shrink(struct kmem_cache *s);
162 
163 /*
164  * Please use this macro to create slab caches. Simply specify the
165  * name of the structure and maybe some flags that are listed above.
166  *
167  * The alignment of the struct determines object alignment. If you
168  * f.e. add ____cacheline_aligned_in_smp to the struct declaration
169  * then the objects will be properly aligned in SMP configurations.
170  */
171 #define KMEM_CACHE(__struct, __flags)					\
172 		kmem_cache_create(#__struct, sizeof(struct __struct),	\
173 			__alignof__(struct __struct), (__flags), NULL)
174 
175 /*
176  * To whitelist a single field for copying to/from usercopy, use this
177  * macro instead for KMEM_CACHE() above.
178  */
179 #define KMEM_CACHE_USERCOPY(__struct, __flags, __field)			\
180 		kmem_cache_create_usercopy(#__struct,			\
181 			sizeof(struct __struct),			\
182 			__alignof__(struct __struct), (__flags),	\
183 			offsetof(struct __struct, __field),		\
184 			sizeof_field(struct __struct, __field), NULL)
185 
186 /*
187  * Common kmalloc functions provided by all allocators
188  */
189 void * __must_check krealloc(const void *objp, size_t new_size, gfp_t flags) __realloc_size(2);
190 void kfree(const void *objp);
191 void kfree_sensitive(const void *objp);
192 size_t __ksize(const void *objp);
193 
194 /**
195  * ksize - Report actual allocation size of associated object
196  *
197  * @objp: Pointer returned from a prior kmalloc()-family allocation.
198  *
199  * This should not be used for writing beyond the originally requested
200  * allocation size. Either use krealloc() or round up the allocation size
201  * with kmalloc_size_roundup() prior to allocation. If this is used to
202  * access beyond the originally requested allocation size, UBSAN_BOUNDS
203  * and/or FORTIFY_SOURCE may trip, since they only know about the
204  * originally allocated size via the __alloc_size attribute.
205  */
206 size_t ksize(const void *objp);
207 
208 #ifdef CONFIG_PRINTK
209 bool kmem_valid_obj(void *object);
210 void kmem_dump_obj(void *object);
211 #endif
212 
213 /*
214  * Some archs want to perform DMA into kmalloc caches and need a guaranteed
215  * alignment larger than the alignment of a 64-bit integer.
216  * Setting ARCH_DMA_MINALIGN in arch headers allows that.
217  */
218 #if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8
219 #define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
220 #define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN
221 #define KMALLOC_SHIFT_LOW ilog2(ARCH_DMA_MINALIGN)
222 #else
223 #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
224 #endif
225 
226 /*
227  * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment.
228  * Intended for arches that get misalignment faults even for 64 bit integer
229  * aligned buffers.
230  */
231 #ifndef ARCH_SLAB_MINALIGN
232 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
233 #endif
234 
235 /*
236  * Arches can define this function if they want to decide the minimum slab
237  * alignment at runtime. The value returned by the function must be a power
238  * of two and >= ARCH_SLAB_MINALIGN.
239  */
240 #ifndef arch_slab_minalign
241 static inline unsigned int arch_slab_minalign(void)
242 {
243 	return ARCH_SLAB_MINALIGN;
244 }
245 #endif
246 
247 /*
248  * kmem_cache_alloc and friends return pointers aligned to ARCH_SLAB_MINALIGN.
249  * kmalloc and friends return pointers aligned to both ARCH_KMALLOC_MINALIGN
250  * and ARCH_SLAB_MINALIGN, but here we only assume the former alignment.
251  */
252 #define __assume_kmalloc_alignment __assume_aligned(ARCH_KMALLOC_MINALIGN)
253 #define __assume_slab_alignment __assume_aligned(ARCH_SLAB_MINALIGN)
254 #define __assume_page_alignment __assume_aligned(PAGE_SIZE)
255 
256 /*
257  * Kmalloc array related definitions
258  */
259 
260 #ifdef CONFIG_SLAB
261 /*
262  * SLAB and SLUB directly allocates requests fitting in to an order-1 page
263  * (PAGE_SIZE*2).  Larger requests are passed to the page allocator.
264  */
265 #define KMALLOC_SHIFT_HIGH	(PAGE_SHIFT + 1)
266 #define KMALLOC_SHIFT_MAX	(MAX_ORDER + PAGE_SHIFT - 1)
267 #ifndef KMALLOC_SHIFT_LOW
268 #define KMALLOC_SHIFT_LOW	5
269 #endif
270 #endif
271 
272 #ifdef CONFIG_SLUB
273 #define KMALLOC_SHIFT_HIGH	(PAGE_SHIFT + 1)
274 #define KMALLOC_SHIFT_MAX	(MAX_ORDER + PAGE_SHIFT - 1)
275 #ifndef KMALLOC_SHIFT_LOW
276 #define KMALLOC_SHIFT_LOW	3
277 #endif
278 #endif
279 
280 #ifdef CONFIG_SLOB
281 /*
282  * SLOB passes all requests larger than one page to the page allocator.
283  * No kmalloc array is necessary since objects of different sizes can
284  * be allocated from the same page.
285  */
286 #define KMALLOC_SHIFT_HIGH	PAGE_SHIFT
287 #define KMALLOC_SHIFT_MAX	(MAX_ORDER + PAGE_SHIFT - 1)
288 #ifndef KMALLOC_SHIFT_LOW
289 #define KMALLOC_SHIFT_LOW	3
290 #endif
291 #endif
292 
293 /* Maximum allocatable size */
294 #define KMALLOC_MAX_SIZE	(1UL << KMALLOC_SHIFT_MAX)
295 /* Maximum size for which we actually use a slab cache */
296 #define KMALLOC_MAX_CACHE_SIZE	(1UL << KMALLOC_SHIFT_HIGH)
297 /* Maximum order allocatable via the slab allocator */
298 #define KMALLOC_MAX_ORDER	(KMALLOC_SHIFT_MAX - PAGE_SHIFT)
299 
300 /*
301  * Kmalloc subsystem.
302  */
303 #ifndef KMALLOC_MIN_SIZE
304 #define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW)
305 #endif
306 
307 /*
308  * This restriction comes from byte sized index implementation.
309  * Page size is normally 2^12 bytes and, in this case, if we want to use
310  * byte sized index which can represent 2^8 entries, the size of the object
311  * should be equal or greater to 2^12 / 2^8 = 2^4 = 16.
312  * If minimum size of kmalloc is less than 16, we use it as minimum object
313  * size and give up to use byte sized index.
314  */
315 #define SLAB_OBJ_MIN_SIZE      (KMALLOC_MIN_SIZE < 16 ? \
316                                (KMALLOC_MIN_SIZE) : 16)
317 
318 /*
319  * Whenever changing this, take care of that kmalloc_type() and
320  * create_kmalloc_caches() still work as intended.
321  *
322  * KMALLOC_NORMAL can contain only unaccounted objects whereas KMALLOC_CGROUP
323  * is for accounted but unreclaimable and non-dma objects. All the other
324  * kmem caches can have both accounted and unaccounted objects.
325  */
326 enum kmalloc_cache_type {
327 	KMALLOC_NORMAL = 0,
328 #ifndef CONFIG_ZONE_DMA
329 	KMALLOC_DMA = KMALLOC_NORMAL,
330 #endif
331 #ifndef CONFIG_MEMCG_KMEM
332 	KMALLOC_CGROUP = KMALLOC_NORMAL,
333 #else
334 	KMALLOC_CGROUP,
335 #endif
336 	KMALLOC_RECLAIM,
337 #ifdef CONFIG_ZONE_DMA
338 	KMALLOC_DMA,
339 #endif
340 	NR_KMALLOC_TYPES
341 };
342 
343 #ifndef CONFIG_SLOB
344 extern struct kmem_cache *
345 kmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1];
346 
347 /*
348  * Define gfp bits that should not be set for KMALLOC_NORMAL.
349  */
350 #define KMALLOC_NOT_NORMAL_BITS					\
351 	(__GFP_RECLAIMABLE |					\
352 	(IS_ENABLED(CONFIG_ZONE_DMA)   ? __GFP_DMA : 0) |	\
353 	(IS_ENABLED(CONFIG_MEMCG_KMEM) ? __GFP_ACCOUNT : 0))
354 
355 static __always_inline enum kmalloc_cache_type kmalloc_type(gfp_t flags)
356 {
357 	/*
358 	 * The most common case is KMALLOC_NORMAL, so test for it
359 	 * with a single branch for all the relevant flags.
360 	 */
361 	if (likely((flags & KMALLOC_NOT_NORMAL_BITS) == 0))
362 		return KMALLOC_NORMAL;
363 
364 	/*
365 	 * At least one of the flags has to be set. Their priorities in
366 	 * decreasing order are:
367 	 *  1) __GFP_DMA
368 	 *  2) __GFP_RECLAIMABLE
369 	 *  3) __GFP_ACCOUNT
370 	 */
371 	if (IS_ENABLED(CONFIG_ZONE_DMA) && (flags & __GFP_DMA))
372 		return KMALLOC_DMA;
373 	if (!IS_ENABLED(CONFIG_MEMCG_KMEM) || (flags & __GFP_RECLAIMABLE))
374 		return KMALLOC_RECLAIM;
375 	else
376 		return KMALLOC_CGROUP;
377 }
378 
379 /*
380  * Figure out which kmalloc slab an allocation of a certain size
381  * belongs to.
382  * 0 = zero alloc
383  * 1 =  65 .. 96 bytes
384  * 2 = 129 .. 192 bytes
385  * n = 2^(n-1)+1 .. 2^n
386  *
387  * Note: __kmalloc_index() is compile-time optimized, and not runtime optimized;
388  * typical usage is via kmalloc_index() and therefore evaluated at compile-time.
389  * Callers where !size_is_constant should only be test modules, where runtime
390  * overheads of __kmalloc_index() can be tolerated.  Also see kmalloc_slab().
391  */
392 static __always_inline unsigned int __kmalloc_index(size_t size,
393 						    bool size_is_constant)
394 {
395 	if (!size)
396 		return 0;
397 
398 	if (size <= KMALLOC_MIN_SIZE)
399 		return KMALLOC_SHIFT_LOW;
400 
401 	if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96)
402 		return 1;
403 	if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192)
404 		return 2;
405 	if (size <=          8) return 3;
406 	if (size <=         16) return 4;
407 	if (size <=         32) return 5;
408 	if (size <=         64) return 6;
409 	if (size <=        128) return 7;
410 	if (size <=        256) return 8;
411 	if (size <=        512) return 9;
412 	if (size <=       1024) return 10;
413 	if (size <=   2 * 1024) return 11;
414 	if (size <=   4 * 1024) return 12;
415 	if (size <=   8 * 1024) return 13;
416 	if (size <=  16 * 1024) return 14;
417 	if (size <=  32 * 1024) return 15;
418 	if (size <=  64 * 1024) return 16;
419 	if (size <= 128 * 1024) return 17;
420 	if (size <= 256 * 1024) return 18;
421 	if (size <= 512 * 1024) return 19;
422 	if (size <= 1024 * 1024) return 20;
423 	if (size <=  2 * 1024 * 1024) return 21;
424 
425 	if (!IS_ENABLED(CONFIG_PROFILE_ALL_BRANCHES) && size_is_constant)
426 		BUILD_BUG_ON_MSG(1, "unexpected size in kmalloc_index()");
427 	else
428 		BUG();
429 
430 	/* Will never be reached. Needed because the compiler may complain */
431 	return -1;
432 }
433 static_assert(PAGE_SHIFT <= 20);
434 #define kmalloc_index(s) __kmalloc_index(s, true)
435 #endif /* !CONFIG_SLOB */
436 
437 void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __alloc_size(1);
438 void *kmem_cache_alloc(struct kmem_cache *s, gfp_t flags) __assume_slab_alignment __malloc;
439 void *kmem_cache_alloc_lru(struct kmem_cache *s, struct list_lru *lru,
440 			   gfp_t gfpflags) __assume_slab_alignment __malloc;
441 void kmem_cache_free(struct kmem_cache *s, void *objp);
442 
443 /*
444  * Bulk allocation and freeing operations. These are accelerated in an
445  * allocator specific way to avoid taking locks repeatedly or building
446  * metadata structures unnecessarily.
447  *
448  * Note that interrupts must be enabled when calling these functions.
449  */
450 void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p);
451 int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, void **p);
452 
453 /*
454  * Caller must not use kfree_bulk() on memory not originally allocated
455  * by kmalloc(), because the SLOB allocator cannot handle this.
456  */
457 static __always_inline void kfree_bulk(size_t size, void **p)
458 {
459 	kmem_cache_free_bulk(NULL, size, p);
460 }
461 
462 void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment
463 							 __alloc_size(1);
464 void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node) __assume_slab_alignment
465 									 __malloc;
466 
467 #ifdef CONFIG_TRACING
468 void *kmalloc_trace(struct kmem_cache *s, gfp_t flags, size_t size)
469 		    __assume_kmalloc_alignment __alloc_size(3);
470 
471 void *kmalloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
472 			 int node, size_t size) __assume_kmalloc_alignment
473 						__alloc_size(4);
474 #else /* CONFIG_TRACING */
475 /* Save a function call when CONFIG_TRACING=n */
476 static __always_inline __alloc_size(3)
477 void *kmalloc_trace(struct kmem_cache *s, gfp_t flags, size_t size)
478 {
479 	void *ret = kmem_cache_alloc(s, flags);
480 
481 	ret = kasan_kmalloc(s, ret, size, flags);
482 	return ret;
483 }
484 
485 static __always_inline __alloc_size(4)
486 void *kmalloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
487 			 int node, size_t size)
488 {
489 	void *ret = kmem_cache_alloc_node(s, gfpflags, node);
490 
491 	ret = kasan_kmalloc(s, ret, size, gfpflags);
492 	return ret;
493 }
494 #endif /* CONFIG_TRACING */
495 
496 void *kmalloc_large(size_t size, gfp_t flags) __assume_page_alignment
497 					      __alloc_size(1);
498 
499 void *kmalloc_large_node(size_t size, gfp_t flags, int node) __assume_page_alignment
500 							     __alloc_size(1);
501 
502 /**
503  * kmalloc - allocate memory
504  * @size: how many bytes of memory are required.
505  * @flags: the type of memory to allocate.
506  *
507  * kmalloc is the normal method of allocating memory
508  * for objects smaller than page size in the kernel.
509  *
510  * The allocated object address is aligned to at least ARCH_KMALLOC_MINALIGN
511  * bytes. For @size of power of two bytes, the alignment is also guaranteed
512  * to be at least to the size.
513  *
514  * The @flags argument may be one of the GFP flags defined at
515  * include/linux/gfp.h and described at
516  * :ref:`Documentation/core-api/mm-api.rst <mm-api-gfp-flags>`
517  *
518  * The recommended usage of the @flags is described at
519  * :ref:`Documentation/core-api/memory-allocation.rst <memory_allocation>`
520  *
521  * Below is a brief outline of the most useful GFP flags
522  *
523  * %GFP_KERNEL
524  *	Allocate normal kernel ram. May sleep.
525  *
526  * %GFP_NOWAIT
527  *	Allocation will not sleep.
528  *
529  * %GFP_ATOMIC
530  *	Allocation will not sleep.  May use emergency pools.
531  *
532  * %GFP_HIGHUSER
533  *	Allocate memory from high memory on behalf of user.
534  *
535  * Also it is possible to set different flags by OR'ing
536  * in one or more of the following additional @flags:
537  *
538  * %__GFP_HIGH
539  *	This allocation has high priority and may use emergency pools.
540  *
541  * %__GFP_NOFAIL
542  *	Indicate that this allocation is in no way allowed to fail
543  *	(think twice before using).
544  *
545  * %__GFP_NORETRY
546  *	If memory is not immediately available,
547  *	then give up at once.
548  *
549  * %__GFP_NOWARN
550  *	If allocation fails, don't issue any warnings.
551  *
552  * %__GFP_RETRY_MAYFAIL
553  *	Try really hard to succeed the allocation but fail
554  *	eventually.
555  */
556 static __always_inline __alloc_size(1) void *kmalloc(size_t size, gfp_t flags)
557 {
558 	if (__builtin_constant_p(size)) {
559 #ifndef CONFIG_SLOB
560 		unsigned int index;
561 #endif
562 		if (size > KMALLOC_MAX_CACHE_SIZE)
563 			return kmalloc_large(size, flags);
564 #ifndef CONFIG_SLOB
565 		index = kmalloc_index(size);
566 
567 		if (!index)
568 			return ZERO_SIZE_PTR;
569 
570 		return kmalloc_trace(
571 				kmalloc_caches[kmalloc_type(flags)][index],
572 				flags, size);
573 #endif
574 	}
575 	return __kmalloc(size, flags);
576 }
577 
578 #ifndef CONFIG_SLOB
579 static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t flags, int node)
580 {
581 	if (__builtin_constant_p(size)) {
582 		unsigned int index;
583 
584 		if (size > KMALLOC_MAX_CACHE_SIZE)
585 			return kmalloc_large_node(size, flags, node);
586 
587 		index = kmalloc_index(size);
588 
589 		if (!index)
590 			return ZERO_SIZE_PTR;
591 
592 		return kmalloc_node_trace(
593 				kmalloc_caches[kmalloc_type(flags)][index],
594 				flags, node, size);
595 	}
596 	return __kmalloc_node(size, flags, node);
597 }
598 #else
599 static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t flags, int node)
600 {
601 	if (__builtin_constant_p(size) && size > KMALLOC_MAX_CACHE_SIZE)
602 		return kmalloc_large_node(size, flags, node);
603 
604 	return __kmalloc_node(size, flags, node);
605 }
606 #endif
607 
608 /**
609  * kmalloc_array - allocate memory for an array.
610  * @n: number of elements.
611  * @size: element size.
612  * @flags: the type of memory to allocate (see kmalloc).
613  */
614 static inline __alloc_size(1, 2) void *kmalloc_array(size_t n, size_t size, gfp_t flags)
615 {
616 	size_t bytes;
617 
618 	if (unlikely(check_mul_overflow(n, size, &bytes)))
619 		return NULL;
620 	if (__builtin_constant_p(n) && __builtin_constant_p(size))
621 		return kmalloc(bytes, flags);
622 	return __kmalloc(bytes, flags);
623 }
624 
625 /**
626  * krealloc_array - reallocate memory for an array.
627  * @p: pointer to the memory chunk to reallocate
628  * @new_n: new number of elements to alloc
629  * @new_size: new size of a single member of the array
630  * @flags: the type of memory to allocate (see kmalloc)
631  */
632 static inline __realloc_size(2, 3) void * __must_check krealloc_array(void *p,
633 								      size_t new_n,
634 								      size_t new_size,
635 								      gfp_t flags)
636 {
637 	size_t bytes;
638 
639 	if (unlikely(check_mul_overflow(new_n, new_size, &bytes)))
640 		return NULL;
641 
642 	return krealloc(p, bytes, flags);
643 }
644 
645 /**
646  * kcalloc - allocate memory for an array. The memory is set to zero.
647  * @n: number of elements.
648  * @size: element size.
649  * @flags: the type of memory to allocate (see kmalloc).
650  */
651 static inline __alloc_size(1, 2) void *kcalloc(size_t n, size_t size, gfp_t flags)
652 {
653 	return kmalloc_array(n, size, flags | __GFP_ZERO);
654 }
655 
656 void *__kmalloc_node_track_caller(size_t size, gfp_t flags, int node,
657 				  unsigned long caller) __alloc_size(1);
658 #define kmalloc_node_track_caller(size, flags, node) \
659 	__kmalloc_node_track_caller(size, flags, node, \
660 				    _RET_IP_)
661 
662 /*
663  * kmalloc_track_caller is a special version of kmalloc that records the
664  * calling function of the routine calling it for slab leak tracking instead
665  * of just the calling function (confusing, eh?).
666  * It's useful when the call to kmalloc comes from a widely-used standard
667  * allocator where we care about the real place the memory allocation
668  * request comes from.
669  */
670 #define kmalloc_track_caller(size, flags) \
671 	__kmalloc_node_track_caller(size, flags, \
672 				    NUMA_NO_NODE, _RET_IP_)
673 
674 static inline __alloc_size(1, 2) void *kmalloc_array_node(size_t n, size_t size, gfp_t flags,
675 							  int node)
676 {
677 	size_t bytes;
678 
679 	if (unlikely(check_mul_overflow(n, size, &bytes)))
680 		return NULL;
681 	if (__builtin_constant_p(n) && __builtin_constant_p(size))
682 		return kmalloc_node(bytes, flags, node);
683 	return __kmalloc_node(bytes, flags, node);
684 }
685 
686 static inline __alloc_size(1, 2) void *kcalloc_node(size_t n, size_t size, gfp_t flags, int node)
687 {
688 	return kmalloc_array_node(n, size, flags | __GFP_ZERO, node);
689 }
690 
691 /*
692  * Shortcuts
693  */
694 static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags)
695 {
696 	return kmem_cache_alloc(k, flags | __GFP_ZERO);
697 }
698 
699 /**
700  * kzalloc - allocate memory. The memory is set to zero.
701  * @size: how many bytes of memory are required.
702  * @flags: the type of memory to allocate (see kmalloc).
703  */
704 static inline __alloc_size(1) void *kzalloc(size_t size, gfp_t flags)
705 {
706 	return kmalloc(size, flags | __GFP_ZERO);
707 }
708 
709 /**
710  * kzalloc_node - allocate zeroed memory from a particular memory node.
711  * @size: how many bytes of memory are required.
712  * @flags: the type of memory to allocate (see kmalloc).
713  * @node: memory node from which to allocate
714  */
715 static inline __alloc_size(1) void *kzalloc_node(size_t size, gfp_t flags, int node)
716 {
717 	return kmalloc_node(size, flags | __GFP_ZERO, node);
718 }
719 
720 extern void *kvmalloc_node(size_t size, gfp_t flags, int node) __alloc_size(1);
721 static inline __alloc_size(1) void *kvmalloc(size_t size, gfp_t flags)
722 {
723 	return kvmalloc_node(size, flags, NUMA_NO_NODE);
724 }
725 static inline __alloc_size(1) void *kvzalloc_node(size_t size, gfp_t flags, int node)
726 {
727 	return kvmalloc_node(size, flags | __GFP_ZERO, node);
728 }
729 static inline __alloc_size(1) void *kvzalloc(size_t size, gfp_t flags)
730 {
731 	return kvmalloc(size, flags | __GFP_ZERO);
732 }
733 
734 static inline __alloc_size(1, 2) void *kvmalloc_array(size_t n, size_t size, gfp_t flags)
735 {
736 	size_t bytes;
737 
738 	if (unlikely(check_mul_overflow(n, size, &bytes)))
739 		return NULL;
740 
741 	return kvmalloc(bytes, flags);
742 }
743 
744 static inline __alloc_size(1, 2) void *kvcalloc(size_t n, size_t size, gfp_t flags)
745 {
746 	return kvmalloc_array(n, size, flags | __GFP_ZERO);
747 }
748 
749 extern void *kvrealloc(const void *p, size_t oldsize, size_t newsize, gfp_t flags)
750 		      __realloc_size(3);
751 extern void kvfree(const void *addr);
752 extern void kvfree_sensitive(const void *addr, size_t len);
753 
754 unsigned int kmem_cache_size(struct kmem_cache *s);
755 
756 /**
757  * kmalloc_size_roundup - Report allocation bucket size for the given size
758  *
759  * @size: Number of bytes to round up from.
760  *
761  * This returns the number of bytes that would be available in a kmalloc()
762  * allocation of @size bytes. For example, a 126 byte request would be
763  * rounded up to the next sized kmalloc bucket, 128 bytes. (This is strictly
764  * for the general-purpose kmalloc()-based allocations, and is not for the
765  * pre-sized kmem_cache_alloc()-based allocations.)
766  *
767  * Use this to kmalloc() the full bucket size ahead of time instead of using
768  * ksize() to query the size after an allocation.
769  */
770 size_t kmalloc_size_roundup(size_t size);
771 
772 void __init kmem_cache_init_late(void);
773 
774 #if defined(CONFIG_SMP) && defined(CONFIG_SLAB)
775 int slab_prepare_cpu(unsigned int cpu);
776 int slab_dead_cpu(unsigned int cpu);
777 #else
778 #define slab_prepare_cpu	NULL
779 #define slab_dead_cpu		NULL
780 #endif
781 
782 #endif	/* _LINUX_SLAB_H */
783