xref: /linux-6.15/include/linux/slab.h (revision d34599bc)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Written by Mark Hemment, 1996 ([email protected]).
4  *
5  * (C) SGI 2006, Christoph Lameter
6  * 	Cleaned up and restructured to ease the addition of alternative
7  * 	implementations of SLAB allocators.
8  * (C) Linux Foundation 2008-2013
9  *      Unified interface for all slab allocators
10  */
11 
12 #ifndef _LINUX_SLAB_H
13 #define	_LINUX_SLAB_H
14 
15 #include <linux/cache.h>
16 #include <linux/gfp.h>
17 #include <linux/overflow.h>
18 #include <linux/types.h>
19 #include <linux/workqueue.h>
20 #include <linux/percpu-refcount.h>
21 #include <linux/cleanup.h>
22 
23 
24 /*
25  * Flags to pass to kmem_cache_create().
26  * The ones marked DEBUG are only valid if CONFIG_DEBUG_SLAB is set.
27  */
28 /* DEBUG: Perform (expensive) checks on alloc/free */
29 #define SLAB_CONSISTENCY_CHECKS	((slab_flags_t __force)0x00000100U)
30 /* DEBUG: Red zone objs in a cache */
31 #define SLAB_RED_ZONE		((slab_flags_t __force)0x00000400U)
32 /* DEBUG: Poison objects */
33 #define SLAB_POISON		((slab_flags_t __force)0x00000800U)
34 /* Indicate a kmalloc slab */
35 #define SLAB_KMALLOC		((slab_flags_t __force)0x00001000U)
36 /* Align objs on cache lines */
37 #define SLAB_HWCACHE_ALIGN	((slab_flags_t __force)0x00002000U)
38 /* Use GFP_DMA memory */
39 #define SLAB_CACHE_DMA		((slab_flags_t __force)0x00004000U)
40 /* Use GFP_DMA32 memory */
41 #define SLAB_CACHE_DMA32	((slab_flags_t __force)0x00008000U)
42 /* DEBUG: Store the last owner for bug hunting */
43 #define SLAB_STORE_USER		((slab_flags_t __force)0x00010000U)
44 /* Panic if kmem_cache_create() fails */
45 #define SLAB_PANIC		((slab_flags_t __force)0x00040000U)
46 /*
47  * SLAB_TYPESAFE_BY_RCU - **WARNING** READ THIS!
48  *
49  * This delays freeing the SLAB page by a grace period, it does _NOT_
50  * delay object freeing. This means that if you do kmem_cache_free()
51  * that memory location is free to be reused at any time. Thus it may
52  * be possible to see another object there in the same RCU grace period.
53  *
54  * This feature only ensures the memory location backing the object
55  * stays valid, the trick to using this is relying on an independent
56  * object validation pass. Something like:
57  *
58  * begin:
59  *  rcu_read_lock();
60  *  obj = lockless_lookup(key);
61  *  if (obj) {
62  *    if (!try_get_ref(obj)) // might fail for free objects
63  *      rcu_read_unlock();
64  *      goto begin;
65  *
66  *    if (obj->key != key) { // not the object we expected
67  *      put_ref(obj);
68  *      rcu_read_unlock();
69  *      goto begin;
70  *    }
71  *  }
72  *  rcu_read_unlock();
73  *
74  * This is useful if we need to approach a kernel structure obliquely,
75  * from its address obtained without the usual locking. We can lock
76  * the structure to stabilize it and check it's still at the given address,
77  * only if we can be sure that the memory has not been meanwhile reused
78  * for some other kind of object (which our subsystem's lock might corrupt).
79  *
80  * rcu_read_lock before reading the address, then rcu_read_unlock after
81  * taking the spinlock within the structure expected at that address.
82  *
83  * Note that it is not possible to acquire a lock within a structure
84  * allocated with SLAB_TYPESAFE_BY_RCU without first acquiring a reference
85  * as described above.  The reason is that SLAB_TYPESAFE_BY_RCU pages
86  * are not zeroed before being given to the slab, which means that any
87  * locks must be initialized after each and every kmem_struct_alloc().
88  * Alternatively, make the ctor passed to kmem_cache_create() initialize
89  * the locks at page-allocation time, as is done in __i915_request_ctor(),
90  * sighand_ctor(), and anon_vma_ctor().  Such a ctor permits readers
91  * to safely acquire those ctor-initialized locks under rcu_read_lock()
92  * protection.
93  *
94  * Note that SLAB_TYPESAFE_BY_RCU was originally named SLAB_DESTROY_BY_RCU.
95  */
96 /* Defer freeing slabs to RCU */
97 #define SLAB_TYPESAFE_BY_RCU	((slab_flags_t __force)0x00080000U)
98 /* Spread some memory over cpuset */
99 #define SLAB_MEM_SPREAD		((slab_flags_t __force)0x00100000U)
100 /* Trace allocations and frees */
101 #define SLAB_TRACE		((slab_flags_t __force)0x00200000U)
102 
103 /* Flag to prevent checks on free */
104 #ifdef CONFIG_DEBUG_OBJECTS
105 # define SLAB_DEBUG_OBJECTS	((slab_flags_t __force)0x00400000U)
106 #else
107 # define SLAB_DEBUG_OBJECTS	0
108 #endif
109 
110 /* Avoid kmemleak tracing */
111 #define SLAB_NOLEAKTRACE	((slab_flags_t __force)0x00800000U)
112 
113 /*
114  * Prevent merging with compatible kmem caches. This flag should be used
115  * cautiously. Valid use cases:
116  *
117  * - caches created for self-tests (e.g. kunit)
118  * - general caches created and used by a subsystem, only when a
119  *   (subsystem-specific) debug option is enabled
120  * - performance critical caches, should be very rare and consulted with slab
121  *   maintainers, and not used together with CONFIG_SLUB_TINY
122  */
123 #define SLAB_NO_MERGE		((slab_flags_t __force)0x01000000U)
124 
125 /* Fault injection mark */
126 #ifdef CONFIG_FAILSLAB
127 # define SLAB_FAILSLAB		((slab_flags_t __force)0x02000000U)
128 #else
129 # define SLAB_FAILSLAB		0
130 #endif
131 /* Account to memcg */
132 #ifdef CONFIG_MEMCG_KMEM
133 # define SLAB_ACCOUNT		((slab_flags_t __force)0x04000000U)
134 #else
135 # define SLAB_ACCOUNT		0
136 #endif
137 
138 #ifdef CONFIG_KASAN_GENERIC
139 #define SLAB_KASAN		((slab_flags_t __force)0x08000000U)
140 #else
141 #define SLAB_KASAN		0
142 #endif
143 
144 /*
145  * Ignore user specified debugging flags.
146  * Intended for caches created for self-tests so they have only flags
147  * specified in the code and other flags are ignored.
148  */
149 #define SLAB_NO_USER_FLAGS	((slab_flags_t __force)0x10000000U)
150 
151 #ifdef CONFIG_KFENCE
152 #define SLAB_SKIP_KFENCE	((slab_flags_t __force)0x20000000U)
153 #else
154 #define SLAB_SKIP_KFENCE	0
155 #endif
156 
157 /* The following flags affect the page allocator grouping pages by mobility */
158 /* Objects are reclaimable */
159 #ifndef CONFIG_SLUB_TINY
160 #define SLAB_RECLAIM_ACCOUNT	((slab_flags_t __force)0x00020000U)
161 #else
162 #define SLAB_RECLAIM_ACCOUNT	((slab_flags_t __force)0)
163 #endif
164 #define SLAB_TEMPORARY		SLAB_RECLAIM_ACCOUNT	/* Objects are short-lived */
165 
166 /*
167  * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
168  *
169  * Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault.
170  *
171  * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
172  * Both make kfree a no-op.
173  */
174 #define ZERO_SIZE_PTR ((void *)16)
175 
176 #define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
177 				(unsigned long)ZERO_SIZE_PTR)
178 
179 #include <linux/kasan.h>
180 
181 struct list_lru;
182 struct mem_cgroup;
183 /*
184  * struct kmem_cache related prototypes
185  */
186 bool slab_is_available(void);
187 
188 struct kmem_cache *kmem_cache_create(const char *name, unsigned int size,
189 			unsigned int align, slab_flags_t flags,
190 			void (*ctor)(void *));
191 struct kmem_cache *kmem_cache_create_usercopy(const char *name,
192 			unsigned int size, unsigned int align,
193 			slab_flags_t flags,
194 			unsigned int useroffset, unsigned int usersize,
195 			void (*ctor)(void *));
196 void kmem_cache_destroy(struct kmem_cache *s);
197 int kmem_cache_shrink(struct kmem_cache *s);
198 
199 /*
200  * Please use this macro to create slab caches. Simply specify the
201  * name of the structure and maybe some flags that are listed above.
202  *
203  * The alignment of the struct determines object alignment. If you
204  * f.e. add ____cacheline_aligned_in_smp to the struct declaration
205  * then the objects will be properly aligned in SMP configurations.
206  */
207 #define KMEM_CACHE(__struct, __flags)					\
208 		kmem_cache_create(#__struct, sizeof(struct __struct),	\
209 			__alignof__(struct __struct), (__flags), NULL)
210 
211 /*
212  * To whitelist a single field for copying to/from usercopy, use this
213  * macro instead for KMEM_CACHE() above.
214  */
215 #define KMEM_CACHE_USERCOPY(__struct, __flags, __field)			\
216 		kmem_cache_create_usercopy(#__struct,			\
217 			sizeof(struct __struct),			\
218 			__alignof__(struct __struct), (__flags),	\
219 			offsetof(struct __struct, __field),		\
220 			sizeof_field(struct __struct, __field), NULL)
221 
222 /*
223  * Common kmalloc functions provided by all allocators
224  */
225 void * __must_check krealloc(const void *objp, size_t new_size, gfp_t flags) __realloc_size(2);
226 void kfree(const void *objp);
227 void kfree_sensitive(const void *objp);
228 size_t __ksize(const void *objp);
229 
230 DEFINE_FREE(kfree, void *, if (_T) kfree(_T))
231 
232 /**
233  * ksize - Report actual allocation size of associated object
234  *
235  * @objp: Pointer returned from a prior kmalloc()-family allocation.
236  *
237  * This should not be used for writing beyond the originally requested
238  * allocation size. Either use krealloc() or round up the allocation size
239  * with kmalloc_size_roundup() prior to allocation. If this is used to
240  * access beyond the originally requested allocation size, UBSAN_BOUNDS
241  * and/or FORTIFY_SOURCE may trip, since they only know about the
242  * originally allocated size via the __alloc_size attribute.
243  */
244 size_t ksize(const void *objp);
245 
246 #ifdef CONFIG_PRINTK
247 bool kmem_valid_obj(void *object);
248 void kmem_dump_obj(void *object);
249 #endif
250 
251 /*
252  * Some archs want to perform DMA into kmalloc caches and need a guaranteed
253  * alignment larger than the alignment of a 64-bit integer.
254  * Setting ARCH_DMA_MINALIGN in arch headers allows that.
255  */
256 #ifdef ARCH_HAS_DMA_MINALIGN
257 #if ARCH_DMA_MINALIGN > 8 && !defined(ARCH_KMALLOC_MINALIGN)
258 #define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
259 #endif
260 #endif
261 
262 #ifndef ARCH_KMALLOC_MINALIGN
263 #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
264 #elif ARCH_KMALLOC_MINALIGN > 8
265 #define KMALLOC_MIN_SIZE ARCH_KMALLOC_MINALIGN
266 #define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE)
267 #endif
268 
269 /*
270  * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment.
271  * Intended for arches that get misalignment faults even for 64 bit integer
272  * aligned buffers.
273  */
274 #ifndef ARCH_SLAB_MINALIGN
275 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
276 #endif
277 
278 /*
279  * Arches can define this function if they want to decide the minimum slab
280  * alignment at runtime. The value returned by the function must be a power
281  * of two and >= ARCH_SLAB_MINALIGN.
282  */
283 #ifndef arch_slab_minalign
284 static inline unsigned int arch_slab_minalign(void)
285 {
286 	return ARCH_SLAB_MINALIGN;
287 }
288 #endif
289 
290 /*
291  * kmem_cache_alloc and friends return pointers aligned to ARCH_SLAB_MINALIGN.
292  * kmalloc and friends return pointers aligned to both ARCH_KMALLOC_MINALIGN
293  * and ARCH_SLAB_MINALIGN, but here we only assume the former alignment.
294  */
295 #define __assume_kmalloc_alignment __assume_aligned(ARCH_KMALLOC_MINALIGN)
296 #define __assume_slab_alignment __assume_aligned(ARCH_SLAB_MINALIGN)
297 #define __assume_page_alignment __assume_aligned(PAGE_SIZE)
298 
299 /*
300  * Kmalloc array related definitions
301  */
302 
303 #ifdef CONFIG_SLAB
304 /*
305  * SLAB and SLUB directly allocates requests fitting in to an order-1 page
306  * (PAGE_SIZE*2).  Larger requests are passed to the page allocator.
307  */
308 #define KMALLOC_SHIFT_HIGH	(PAGE_SHIFT + 1)
309 #define KMALLOC_SHIFT_MAX	(MAX_ORDER + PAGE_SHIFT)
310 #ifndef KMALLOC_SHIFT_LOW
311 #define KMALLOC_SHIFT_LOW	5
312 #endif
313 #endif
314 
315 #ifdef CONFIG_SLUB
316 #define KMALLOC_SHIFT_HIGH	(PAGE_SHIFT + 1)
317 #define KMALLOC_SHIFT_MAX	(MAX_ORDER + PAGE_SHIFT)
318 #ifndef KMALLOC_SHIFT_LOW
319 #define KMALLOC_SHIFT_LOW	3
320 #endif
321 #endif
322 
323 /* Maximum allocatable size */
324 #define KMALLOC_MAX_SIZE	(1UL << KMALLOC_SHIFT_MAX)
325 /* Maximum size for which we actually use a slab cache */
326 #define KMALLOC_MAX_CACHE_SIZE	(1UL << KMALLOC_SHIFT_HIGH)
327 /* Maximum order allocatable via the slab allocator */
328 #define KMALLOC_MAX_ORDER	(KMALLOC_SHIFT_MAX - PAGE_SHIFT)
329 
330 /*
331  * Kmalloc subsystem.
332  */
333 #ifndef KMALLOC_MIN_SIZE
334 #define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW)
335 #endif
336 
337 /*
338  * This restriction comes from byte sized index implementation.
339  * Page size is normally 2^12 bytes and, in this case, if we want to use
340  * byte sized index which can represent 2^8 entries, the size of the object
341  * should be equal or greater to 2^12 / 2^8 = 2^4 = 16.
342  * If minimum size of kmalloc is less than 16, we use it as minimum object
343  * size and give up to use byte sized index.
344  */
345 #define SLAB_OBJ_MIN_SIZE      (KMALLOC_MIN_SIZE < 16 ? \
346                                (KMALLOC_MIN_SIZE) : 16)
347 
348 /*
349  * Whenever changing this, take care of that kmalloc_type() and
350  * create_kmalloc_caches() still work as intended.
351  *
352  * KMALLOC_NORMAL can contain only unaccounted objects whereas KMALLOC_CGROUP
353  * is for accounted but unreclaimable and non-dma objects. All the other
354  * kmem caches can have both accounted and unaccounted objects.
355  */
356 enum kmalloc_cache_type {
357 	KMALLOC_NORMAL = 0,
358 #ifndef CONFIG_ZONE_DMA
359 	KMALLOC_DMA = KMALLOC_NORMAL,
360 #endif
361 #ifndef CONFIG_MEMCG_KMEM
362 	KMALLOC_CGROUP = KMALLOC_NORMAL,
363 #endif
364 #ifdef CONFIG_SLUB_TINY
365 	KMALLOC_RECLAIM = KMALLOC_NORMAL,
366 #else
367 	KMALLOC_RECLAIM,
368 #endif
369 #ifdef CONFIG_ZONE_DMA
370 	KMALLOC_DMA,
371 #endif
372 #ifdef CONFIG_MEMCG_KMEM
373 	KMALLOC_CGROUP,
374 #endif
375 	NR_KMALLOC_TYPES
376 };
377 
378 extern struct kmem_cache *
379 kmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1];
380 
381 /*
382  * Define gfp bits that should not be set for KMALLOC_NORMAL.
383  */
384 #define KMALLOC_NOT_NORMAL_BITS					\
385 	(__GFP_RECLAIMABLE |					\
386 	(IS_ENABLED(CONFIG_ZONE_DMA)   ? __GFP_DMA : 0) |	\
387 	(IS_ENABLED(CONFIG_MEMCG_KMEM) ? __GFP_ACCOUNT : 0))
388 
389 static __always_inline enum kmalloc_cache_type kmalloc_type(gfp_t flags)
390 {
391 	/*
392 	 * The most common case is KMALLOC_NORMAL, so test for it
393 	 * with a single branch for all the relevant flags.
394 	 */
395 	if (likely((flags & KMALLOC_NOT_NORMAL_BITS) == 0))
396 		return KMALLOC_NORMAL;
397 
398 	/*
399 	 * At least one of the flags has to be set. Their priorities in
400 	 * decreasing order are:
401 	 *  1) __GFP_DMA
402 	 *  2) __GFP_RECLAIMABLE
403 	 *  3) __GFP_ACCOUNT
404 	 */
405 	if (IS_ENABLED(CONFIG_ZONE_DMA) && (flags & __GFP_DMA))
406 		return KMALLOC_DMA;
407 	if (!IS_ENABLED(CONFIG_MEMCG_KMEM) || (flags & __GFP_RECLAIMABLE))
408 		return KMALLOC_RECLAIM;
409 	else
410 		return KMALLOC_CGROUP;
411 }
412 
413 /*
414  * Figure out which kmalloc slab an allocation of a certain size
415  * belongs to.
416  * 0 = zero alloc
417  * 1 =  65 .. 96 bytes
418  * 2 = 129 .. 192 bytes
419  * n = 2^(n-1)+1 .. 2^n
420  *
421  * Note: __kmalloc_index() is compile-time optimized, and not runtime optimized;
422  * typical usage is via kmalloc_index() and therefore evaluated at compile-time.
423  * Callers where !size_is_constant should only be test modules, where runtime
424  * overheads of __kmalloc_index() can be tolerated.  Also see kmalloc_slab().
425  */
426 static __always_inline unsigned int __kmalloc_index(size_t size,
427 						    bool size_is_constant)
428 {
429 	if (!size)
430 		return 0;
431 
432 	if (size <= KMALLOC_MIN_SIZE)
433 		return KMALLOC_SHIFT_LOW;
434 
435 	if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96)
436 		return 1;
437 	if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192)
438 		return 2;
439 	if (size <=          8) return 3;
440 	if (size <=         16) return 4;
441 	if (size <=         32) return 5;
442 	if (size <=         64) return 6;
443 	if (size <=        128) return 7;
444 	if (size <=        256) return 8;
445 	if (size <=        512) return 9;
446 	if (size <=       1024) return 10;
447 	if (size <=   2 * 1024) return 11;
448 	if (size <=   4 * 1024) return 12;
449 	if (size <=   8 * 1024) return 13;
450 	if (size <=  16 * 1024) return 14;
451 	if (size <=  32 * 1024) return 15;
452 	if (size <=  64 * 1024) return 16;
453 	if (size <= 128 * 1024) return 17;
454 	if (size <= 256 * 1024) return 18;
455 	if (size <= 512 * 1024) return 19;
456 	if (size <= 1024 * 1024) return 20;
457 	if (size <=  2 * 1024 * 1024) return 21;
458 
459 	if (!IS_ENABLED(CONFIG_PROFILE_ALL_BRANCHES) && size_is_constant)
460 		BUILD_BUG_ON_MSG(1, "unexpected size in kmalloc_index()");
461 	else
462 		BUG();
463 
464 	/* Will never be reached. Needed because the compiler may complain */
465 	return -1;
466 }
467 static_assert(PAGE_SHIFT <= 20);
468 #define kmalloc_index(s) __kmalloc_index(s, true)
469 
470 void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __alloc_size(1);
471 
472 /**
473  * kmem_cache_alloc - Allocate an object
474  * @cachep: The cache to allocate from.
475  * @flags: See kmalloc().
476  *
477  * Allocate an object from this cache.
478  * See kmem_cache_zalloc() for a shortcut of adding __GFP_ZERO to flags.
479  *
480  * Return: pointer to the new object or %NULL in case of error
481  */
482 void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) __assume_slab_alignment __malloc;
483 void *kmem_cache_alloc_lru(struct kmem_cache *s, struct list_lru *lru,
484 			   gfp_t gfpflags) __assume_slab_alignment __malloc;
485 void kmem_cache_free(struct kmem_cache *s, void *objp);
486 
487 /*
488  * Bulk allocation and freeing operations. These are accelerated in an
489  * allocator specific way to avoid taking locks repeatedly or building
490  * metadata structures unnecessarily.
491  *
492  * Note that interrupts must be enabled when calling these functions.
493  */
494 void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p);
495 int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, void **p);
496 
497 static __always_inline void kfree_bulk(size_t size, void **p)
498 {
499 	kmem_cache_free_bulk(NULL, size, p);
500 }
501 
502 void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment
503 							 __alloc_size(1);
504 void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node) __assume_slab_alignment
505 									 __malloc;
506 
507 void *kmalloc_trace(struct kmem_cache *s, gfp_t flags, size_t size)
508 		    __assume_kmalloc_alignment __alloc_size(3);
509 
510 void *kmalloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
511 			 int node, size_t size) __assume_kmalloc_alignment
512 						__alloc_size(4);
513 void *kmalloc_large(size_t size, gfp_t flags) __assume_page_alignment
514 					      __alloc_size(1);
515 
516 void *kmalloc_large_node(size_t size, gfp_t flags, int node) __assume_page_alignment
517 							     __alloc_size(1);
518 
519 /**
520  * kmalloc - allocate kernel memory
521  * @size: how many bytes of memory are required.
522  * @flags: describe the allocation context
523  *
524  * kmalloc is the normal method of allocating memory
525  * for objects smaller than page size in the kernel.
526  *
527  * The allocated object address is aligned to at least ARCH_KMALLOC_MINALIGN
528  * bytes. For @size of power of two bytes, the alignment is also guaranteed
529  * to be at least to the size.
530  *
531  * The @flags argument may be one of the GFP flags defined at
532  * include/linux/gfp_types.h and described at
533  * :ref:`Documentation/core-api/mm-api.rst <mm-api-gfp-flags>`
534  *
535  * The recommended usage of the @flags is described at
536  * :ref:`Documentation/core-api/memory-allocation.rst <memory_allocation>`
537  *
538  * Below is a brief outline of the most useful GFP flags
539  *
540  * %GFP_KERNEL
541  *	Allocate normal kernel ram. May sleep.
542  *
543  * %GFP_NOWAIT
544  *	Allocation will not sleep.
545  *
546  * %GFP_ATOMIC
547  *	Allocation will not sleep.  May use emergency pools.
548  *
549  * Also it is possible to set different flags by OR'ing
550  * in one or more of the following additional @flags:
551  *
552  * %__GFP_ZERO
553  *	Zero the allocated memory before returning. Also see kzalloc().
554  *
555  * %__GFP_HIGH
556  *	This allocation has high priority and may use emergency pools.
557  *
558  * %__GFP_NOFAIL
559  *	Indicate that this allocation is in no way allowed to fail
560  *	(think twice before using).
561  *
562  * %__GFP_NORETRY
563  *	If memory is not immediately available,
564  *	then give up at once.
565  *
566  * %__GFP_NOWARN
567  *	If allocation fails, don't issue any warnings.
568  *
569  * %__GFP_RETRY_MAYFAIL
570  *	Try really hard to succeed the allocation but fail
571  *	eventually.
572  */
573 static __always_inline __alloc_size(1) void *kmalloc(size_t size, gfp_t flags)
574 {
575 	if (__builtin_constant_p(size) && size) {
576 		unsigned int index;
577 
578 		if (size > KMALLOC_MAX_CACHE_SIZE)
579 			return kmalloc_large(size, flags);
580 
581 		index = kmalloc_index(size);
582 		return kmalloc_trace(
583 				kmalloc_caches[kmalloc_type(flags)][index],
584 				flags, size);
585 	}
586 	return __kmalloc(size, flags);
587 }
588 
589 static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t flags, int node)
590 {
591 	if (__builtin_constant_p(size) && size) {
592 		unsigned int index;
593 
594 		if (size > KMALLOC_MAX_CACHE_SIZE)
595 			return kmalloc_large_node(size, flags, node);
596 
597 		index = kmalloc_index(size);
598 		return kmalloc_node_trace(
599 				kmalloc_caches[kmalloc_type(flags)][index],
600 				flags, node, size);
601 	}
602 	return __kmalloc_node(size, flags, node);
603 }
604 
605 /**
606  * kmalloc_array - allocate memory for an array.
607  * @n: number of elements.
608  * @size: element size.
609  * @flags: the type of memory to allocate (see kmalloc).
610  */
611 static inline __alloc_size(1, 2) void *kmalloc_array(size_t n, size_t size, gfp_t flags)
612 {
613 	size_t bytes;
614 
615 	if (unlikely(check_mul_overflow(n, size, &bytes)))
616 		return NULL;
617 	if (__builtin_constant_p(n) && __builtin_constant_p(size))
618 		return kmalloc(bytes, flags);
619 	return __kmalloc(bytes, flags);
620 }
621 
622 /**
623  * krealloc_array - reallocate memory for an array.
624  * @p: pointer to the memory chunk to reallocate
625  * @new_n: new number of elements to alloc
626  * @new_size: new size of a single member of the array
627  * @flags: the type of memory to allocate (see kmalloc)
628  */
629 static inline __realloc_size(2, 3) void * __must_check krealloc_array(void *p,
630 								      size_t new_n,
631 								      size_t new_size,
632 								      gfp_t flags)
633 {
634 	size_t bytes;
635 
636 	if (unlikely(check_mul_overflow(new_n, new_size, &bytes)))
637 		return NULL;
638 
639 	return krealloc(p, bytes, flags);
640 }
641 
642 /**
643  * kcalloc - allocate memory for an array. The memory is set to zero.
644  * @n: number of elements.
645  * @size: element size.
646  * @flags: the type of memory to allocate (see kmalloc).
647  */
648 static inline __alloc_size(1, 2) void *kcalloc(size_t n, size_t size, gfp_t flags)
649 {
650 	return kmalloc_array(n, size, flags | __GFP_ZERO);
651 }
652 
653 void *__kmalloc_node_track_caller(size_t size, gfp_t flags, int node,
654 				  unsigned long caller) __alloc_size(1);
655 #define kmalloc_node_track_caller(size, flags, node) \
656 	__kmalloc_node_track_caller(size, flags, node, \
657 				    _RET_IP_)
658 
659 /*
660  * kmalloc_track_caller is a special version of kmalloc that records the
661  * calling function of the routine calling it for slab leak tracking instead
662  * of just the calling function (confusing, eh?).
663  * It's useful when the call to kmalloc comes from a widely-used standard
664  * allocator where we care about the real place the memory allocation
665  * request comes from.
666  */
667 #define kmalloc_track_caller(size, flags) \
668 	__kmalloc_node_track_caller(size, flags, \
669 				    NUMA_NO_NODE, _RET_IP_)
670 
671 static inline __alloc_size(1, 2) void *kmalloc_array_node(size_t n, size_t size, gfp_t flags,
672 							  int node)
673 {
674 	size_t bytes;
675 
676 	if (unlikely(check_mul_overflow(n, size, &bytes)))
677 		return NULL;
678 	if (__builtin_constant_p(n) && __builtin_constant_p(size))
679 		return kmalloc_node(bytes, flags, node);
680 	return __kmalloc_node(bytes, flags, node);
681 }
682 
683 static inline __alloc_size(1, 2) void *kcalloc_node(size_t n, size_t size, gfp_t flags, int node)
684 {
685 	return kmalloc_array_node(n, size, flags | __GFP_ZERO, node);
686 }
687 
688 /*
689  * Shortcuts
690  */
691 static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags)
692 {
693 	return kmem_cache_alloc(k, flags | __GFP_ZERO);
694 }
695 
696 /**
697  * kzalloc - allocate memory. The memory is set to zero.
698  * @size: how many bytes of memory are required.
699  * @flags: the type of memory to allocate (see kmalloc).
700  */
701 static inline __alloc_size(1) void *kzalloc(size_t size, gfp_t flags)
702 {
703 	return kmalloc(size, flags | __GFP_ZERO);
704 }
705 
706 /**
707  * kzalloc_node - allocate zeroed memory from a particular memory node.
708  * @size: how many bytes of memory are required.
709  * @flags: the type of memory to allocate (see kmalloc).
710  * @node: memory node from which to allocate
711  */
712 static inline __alloc_size(1) void *kzalloc_node(size_t size, gfp_t flags, int node)
713 {
714 	return kmalloc_node(size, flags | __GFP_ZERO, node);
715 }
716 
717 extern void *kvmalloc_node(size_t size, gfp_t flags, int node) __alloc_size(1);
718 static inline __alloc_size(1) void *kvmalloc(size_t size, gfp_t flags)
719 {
720 	return kvmalloc_node(size, flags, NUMA_NO_NODE);
721 }
722 static inline __alloc_size(1) void *kvzalloc_node(size_t size, gfp_t flags, int node)
723 {
724 	return kvmalloc_node(size, flags | __GFP_ZERO, node);
725 }
726 static inline __alloc_size(1) void *kvzalloc(size_t size, gfp_t flags)
727 {
728 	return kvmalloc(size, flags | __GFP_ZERO);
729 }
730 
731 static inline __alloc_size(1, 2) void *kvmalloc_array(size_t n, size_t size, gfp_t flags)
732 {
733 	size_t bytes;
734 
735 	if (unlikely(check_mul_overflow(n, size, &bytes)))
736 		return NULL;
737 
738 	return kvmalloc(bytes, flags);
739 }
740 
741 static inline __alloc_size(1, 2) void *kvcalloc(size_t n, size_t size, gfp_t flags)
742 {
743 	return kvmalloc_array(n, size, flags | __GFP_ZERO);
744 }
745 
746 extern void *kvrealloc(const void *p, size_t oldsize, size_t newsize, gfp_t flags)
747 		      __realloc_size(3);
748 extern void kvfree(const void *addr);
749 extern void kvfree_sensitive(const void *addr, size_t len);
750 
751 unsigned int kmem_cache_size(struct kmem_cache *s);
752 
753 /**
754  * kmalloc_size_roundup - Report allocation bucket size for the given size
755  *
756  * @size: Number of bytes to round up from.
757  *
758  * This returns the number of bytes that would be available in a kmalloc()
759  * allocation of @size bytes. For example, a 126 byte request would be
760  * rounded up to the next sized kmalloc bucket, 128 bytes. (This is strictly
761  * for the general-purpose kmalloc()-based allocations, and is not for the
762  * pre-sized kmem_cache_alloc()-based allocations.)
763  *
764  * Use this to kmalloc() the full bucket size ahead of time instead of using
765  * ksize() to query the size after an allocation.
766  */
767 size_t kmalloc_size_roundup(size_t size);
768 
769 void __init kmem_cache_init_late(void);
770 
771 #if defined(CONFIG_SMP) && defined(CONFIG_SLAB)
772 int slab_prepare_cpu(unsigned int cpu);
773 int slab_dead_cpu(unsigned int cpu);
774 #else
775 #define slab_prepare_cpu	NULL
776 #define slab_dead_cpu		NULL
777 #endif
778 
779 #endif	/* _LINUX_SLAB_H */
780