1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Written by Mark Hemment, 1996 ([email protected]). 4 * 5 * (C) SGI 2006, Christoph Lameter 6 * Cleaned up and restructured to ease the addition of alternative 7 * implementations of SLAB allocators. 8 * (C) Linux Foundation 2008-2013 9 * Unified interface for all slab allocators 10 */ 11 12 #ifndef _LINUX_SLAB_H 13 #define _LINUX_SLAB_H 14 15 #include <linux/cache.h> 16 #include <linux/gfp.h> 17 #include <linux/overflow.h> 18 #include <linux/types.h> 19 #include <linux/workqueue.h> 20 #include <linux/percpu-refcount.h> 21 #include <linux/cleanup.h> 22 #include <linux/hash.h> 23 24 enum _slab_flag_bits { 25 _SLAB_CONSISTENCY_CHECKS, 26 _SLAB_RED_ZONE, 27 _SLAB_POISON, 28 _SLAB_KMALLOC, 29 _SLAB_HWCACHE_ALIGN, 30 _SLAB_CACHE_DMA, 31 _SLAB_CACHE_DMA32, 32 _SLAB_STORE_USER, 33 _SLAB_PANIC, 34 _SLAB_TYPESAFE_BY_RCU, 35 _SLAB_TRACE, 36 #ifdef CONFIG_DEBUG_OBJECTS 37 _SLAB_DEBUG_OBJECTS, 38 #endif 39 _SLAB_NOLEAKTRACE, 40 _SLAB_NO_MERGE, 41 #ifdef CONFIG_FAILSLAB 42 _SLAB_FAILSLAB, 43 #endif 44 #ifdef CONFIG_MEMCG 45 _SLAB_ACCOUNT, 46 #endif 47 #ifdef CONFIG_KASAN_GENERIC 48 _SLAB_KASAN, 49 #endif 50 _SLAB_NO_USER_FLAGS, 51 #ifdef CONFIG_KFENCE 52 _SLAB_SKIP_KFENCE, 53 #endif 54 #ifndef CONFIG_SLUB_TINY 55 _SLAB_RECLAIM_ACCOUNT, 56 #endif 57 _SLAB_OBJECT_POISON, 58 _SLAB_CMPXCHG_DOUBLE, 59 #ifdef CONFIG_SLAB_OBJ_EXT 60 _SLAB_NO_OBJ_EXT, 61 #endif 62 _SLAB_FLAGS_LAST_BIT 63 }; 64 65 #define __SLAB_FLAG_BIT(nr) ((slab_flags_t __force)(1U << (nr))) 66 #define __SLAB_FLAG_UNUSED ((slab_flags_t __force)(0U)) 67 68 /* 69 * Flags to pass to kmem_cache_create(). 70 * The ones marked DEBUG need CONFIG_SLUB_DEBUG enabled, otherwise are no-op 71 */ 72 /* DEBUG: Perform (expensive) checks on alloc/free */ 73 #define SLAB_CONSISTENCY_CHECKS __SLAB_FLAG_BIT(_SLAB_CONSISTENCY_CHECKS) 74 /* DEBUG: Red zone objs in a cache */ 75 #define SLAB_RED_ZONE __SLAB_FLAG_BIT(_SLAB_RED_ZONE) 76 /* DEBUG: Poison objects */ 77 #define SLAB_POISON __SLAB_FLAG_BIT(_SLAB_POISON) 78 /* Indicate a kmalloc slab */ 79 #define SLAB_KMALLOC __SLAB_FLAG_BIT(_SLAB_KMALLOC) 80 /** 81 * define SLAB_HWCACHE_ALIGN - Align objects on cache line boundaries. 82 * 83 * Sufficiently large objects are aligned on cache line boundary. For object 84 * size smaller than a half of cache line size, the alignment is on the half of 85 * cache line size. In general, if object size is smaller than 1/2^n of cache 86 * line size, the alignment is adjusted to 1/2^n. 87 * 88 * If explicit alignment is also requested by the respective 89 * &struct kmem_cache_args field, the greater of both is alignments is applied. 90 */ 91 #define SLAB_HWCACHE_ALIGN __SLAB_FLAG_BIT(_SLAB_HWCACHE_ALIGN) 92 /* Use GFP_DMA memory */ 93 #define SLAB_CACHE_DMA __SLAB_FLAG_BIT(_SLAB_CACHE_DMA) 94 /* Use GFP_DMA32 memory */ 95 #define SLAB_CACHE_DMA32 __SLAB_FLAG_BIT(_SLAB_CACHE_DMA32) 96 /* DEBUG: Store the last owner for bug hunting */ 97 #define SLAB_STORE_USER __SLAB_FLAG_BIT(_SLAB_STORE_USER) 98 /* Panic if kmem_cache_create() fails */ 99 #define SLAB_PANIC __SLAB_FLAG_BIT(_SLAB_PANIC) 100 /** 101 * define SLAB_TYPESAFE_BY_RCU - **WARNING** READ THIS! 102 * 103 * This delays freeing the SLAB page by a grace period, it does _NOT_ 104 * delay object freeing. This means that if you do kmem_cache_free() 105 * that memory location is free to be reused at any time. Thus it may 106 * be possible to see another object there in the same RCU grace period. 107 * 108 * This feature only ensures the memory location backing the object 109 * stays valid, the trick to using this is relying on an independent 110 * object validation pass. Something like: 111 * 112 * :: 113 * 114 * begin: 115 * rcu_read_lock(); 116 * obj = lockless_lookup(key); 117 * if (obj) { 118 * if (!try_get_ref(obj)) // might fail for free objects 119 * rcu_read_unlock(); 120 * goto begin; 121 * 122 * if (obj->key != key) { // not the object we expected 123 * put_ref(obj); 124 * rcu_read_unlock(); 125 * goto begin; 126 * } 127 * } 128 * rcu_read_unlock(); 129 * 130 * This is useful if we need to approach a kernel structure obliquely, 131 * from its address obtained without the usual locking. We can lock 132 * the structure to stabilize it and check it's still at the given address, 133 * only if we can be sure that the memory has not been meanwhile reused 134 * for some other kind of object (which our subsystem's lock might corrupt). 135 * 136 * rcu_read_lock before reading the address, then rcu_read_unlock after 137 * taking the spinlock within the structure expected at that address. 138 * 139 * Note that object identity check has to be done *after* acquiring a 140 * reference, therefore user has to ensure proper ordering for loads. 141 * Similarly, when initializing objects allocated with SLAB_TYPESAFE_BY_RCU, 142 * the newly allocated object has to be fully initialized *before* its 143 * refcount gets initialized and proper ordering for stores is required. 144 * refcount_{add|inc}_not_zero_acquire() and refcount_set_release() are 145 * designed with the proper fences required for reference counting objects 146 * allocated with SLAB_TYPESAFE_BY_RCU. 147 * 148 * Note that it is not possible to acquire a lock within a structure 149 * allocated with SLAB_TYPESAFE_BY_RCU without first acquiring a reference 150 * as described above. The reason is that SLAB_TYPESAFE_BY_RCU pages 151 * are not zeroed before being given to the slab, which means that any 152 * locks must be initialized after each and every kmem_struct_alloc(). 153 * Alternatively, make the ctor passed to kmem_cache_create() initialize 154 * the locks at page-allocation time, as is done in __i915_request_ctor(), 155 * sighand_ctor(), and anon_vma_ctor(). Such a ctor permits readers 156 * to safely acquire those ctor-initialized locks under rcu_read_lock() 157 * protection. 158 * 159 * Note that SLAB_TYPESAFE_BY_RCU was originally named SLAB_DESTROY_BY_RCU. 160 */ 161 #define SLAB_TYPESAFE_BY_RCU __SLAB_FLAG_BIT(_SLAB_TYPESAFE_BY_RCU) 162 /* Trace allocations and frees */ 163 #define SLAB_TRACE __SLAB_FLAG_BIT(_SLAB_TRACE) 164 165 /* Flag to prevent checks on free */ 166 #ifdef CONFIG_DEBUG_OBJECTS 167 # define SLAB_DEBUG_OBJECTS __SLAB_FLAG_BIT(_SLAB_DEBUG_OBJECTS) 168 #else 169 # define SLAB_DEBUG_OBJECTS __SLAB_FLAG_UNUSED 170 #endif 171 172 /* Avoid kmemleak tracing */ 173 #define SLAB_NOLEAKTRACE __SLAB_FLAG_BIT(_SLAB_NOLEAKTRACE) 174 175 /* 176 * Prevent merging with compatible kmem caches. This flag should be used 177 * cautiously. Valid use cases: 178 * 179 * - caches created for self-tests (e.g. kunit) 180 * - general caches created and used by a subsystem, only when a 181 * (subsystem-specific) debug option is enabled 182 * - performance critical caches, should be very rare and consulted with slab 183 * maintainers, and not used together with CONFIG_SLUB_TINY 184 */ 185 #define SLAB_NO_MERGE __SLAB_FLAG_BIT(_SLAB_NO_MERGE) 186 187 /* Fault injection mark */ 188 #ifdef CONFIG_FAILSLAB 189 # define SLAB_FAILSLAB __SLAB_FLAG_BIT(_SLAB_FAILSLAB) 190 #else 191 # define SLAB_FAILSLAB __SLAB_FLAG_UNUSED 192 #endif 193 /** 194 * define SLAB_ACCOUNT - Account allocations to memcg. 195 * 196 * All object allocations from this cache will be memcg accounted, regardless of 197 * __GFP_ACCOUNT being or not being passed to individual allocations. 198 */ 199 #ifdef CONFIG_MEMCG 200 # define SLAB_ACCOUNT __SLAB_FLAG_BIT(_SLAB_ACCOUNT) 201 #else 202 # define SLAB_ACCOUNT __SLAB_FLAG_UNUSED 203 #endif 204 205 #ifdef CONFIG_KASAN_GENERIC 206 #define SLAB_KASAN __SLAB_FLAG_BIT(_SLAB_KASAN) 207 #else 208 #define SLAB_KASAN __SLAB_FLAG_UNUSED 209 #endif 210 211 /* 212 * Ignore user specified debugging flags. 213 * Intended for caches created for self-tests so they have only flags 214 * specified in the code and other flags are ignored. 215 */ 216 #define SLAB_NO_USER_FLAGS __SLAB_FLAG_BIT(_SLAB_NO_USER_FLAGS) 217 218 #ifdef CONFIG_KFENCE 219 #define SLAB_SKIP_KFENCE __SLAB_FLAG_BIT(_SLAB_SKIP_KFENCE) 220 #else 221 #define SLAB_SKIP_KFENCE __SLAB_FLAG_UNUSED 222 #endif 223 224 /* The following flags affect the page allocator grouping pages by mobility */ 225 /** 226 * define SLAB_RECLAIM_ACCOUNT - Objects are reclaimable. 227 * 228 * Use this flag for caches that have an associated shrinker. As a result, slab 229 * pages are allocated with __GFP_RECLAIMABLE, which affects grouping pages by 230 * mobility, and are accounted in SReclaimable counter in /proc/meminfo 231 */ 232 #ifndef CONFIG_SLUB_TINY 233 #define SLAB_RECLAIM_ACCOUNT __SLAB_FLAG_BIT(_SLAB_RECLAIM_ACCOUNT) 234 #else 235 #define SLAB_RECLAIM_ACCOUNT __SLAB_FLAG_UNUSED 236 #endif 237 #define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */ 238 239 /* Slab created using create_boot_cache */ 240 #ifdef CONFIG_SLAB_OBJ_EXT 241 #define SLAB_NO_OBJ_EXT __SLAB_FLAG_BIT(_SLAB_NO_OBJ_EXT) 242 #else 243 #define SLAB_NO_OBJ_EXT __SLAB_FLAG_UNUSED 244 #endif 245 246 /* 247 * freeptr_t represents a SLUB freelist pointer, which might be encoded 248 * and not dereferenceable if CONFIG_SLAB_FREELIST_HARDENED is enabled. 249 */ 250 typedef struct { unsigned long v; } freeptr_t; 251 252 /* 253 * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests. 254 * 255 * Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault. 256 * 257 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can. 258 * Both make kfree a no-op. 259 */ 260 #define ZERO_SIZE_PTR ((void *)16) 261 262 #define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \ 263 (unsigned long)ZERO_SIZE_PTR) 264 265 #include <linux/kasan.h> 266 267 struct list_lru; 268 struct mem_cgroup; 269 /* 270 * struct kmem_cache related prototypes 271 */ 272 bool slab_is_available(void); 273 274 /** 275 * struct kmem_cache_args - Less common arguments for kmem_cache_create() 276 * 277 * Any uninitialized fields of the structure are interpreted as unused. The 278 * exception is @freeptr_offset where %0 is a valid value, so 279 * @use_freeptr_offset must be also set to %true in order to interpret the field 280 * as used. For @useroffset %0 is also valid, but only with non-%0 281 * @usersize. 282 * 283 * When %NULL args is passed to kmem_cache_create(), it is equivalent to all 284 * fields unused. 285 */ 286 struct kmem_cache_args { 287 /** 288 * @align: The required alignment for the objects. 289 * 290 * %0 means no specific alignment is requested. 291 */ 292 unsigned int align; 293 /** 294 * @useroffset: Usercopy region offset. 295 * 296 * %0 is a valid offset, when @usersize is non-%0 297 */ 298 unsigned int useroffset; 299 /** 300 * @usersize: Usercopy region size. 301 * 302 * %0 means no usercopy region is specified. 303 */ 304 unsigned int usersize; 305 /** 306 * @freeptr_offset: Custom offset for the free pointer 307 * in &SLAB_TYPESAFE_BY_RCU caches 308 * 309 * By default &SLAB_TYPESAFE_BY_RCU caches place the free pointer 310 * outside of the object. This might cause the object to grow in size. 311 * Cache creators that have a reason to avoid this can specify a custom 312 * free pointer offset in their struct where the free pointer will be 313 * placed. 314 * 315 * Note that placing the free pointer inside the object requires the 316 * caller to ensure that no fields are invalidated that are required to 317 * guard against object recycling (See &SLAB_TYPESAFE_BY_RCU for 318 * details). 319 * 320 * Using %0 as a value for @freeptr_offset is valid. If @freeptr_offset 321 * is specified, %use_freeptr_offset must be set %true. 322 * 323 * Note that @ctor currently isn't supported with custom free pointers 324 * as a @ctor requires an external free pointer. 325 */ 326 unsigned int freeptr_offset; 327 /** 328 * @use_freeptr_offset: Whether a @freeptr_offset is used. 329 */ 330 bool use_freeptr_offset; 331 /** 332 * @ctor: A constructor for the objects. 333 * 334 * The constructor is invoked for each object in a newly allocated slab 335 * page. It is the cache user's responsibility to free object in the 336 * same state as after calling the constructor, or deal appropriately 337 * with any differences between a freshly constructed and a reallocated 338 * object. 339 * 340 * %NULL means no constructor. 341 */ 342 void (*ctor)(void *); 343 }; 344 345 struct kmem_cache *__kmem_cache_create_args(const char *name, 346 unsigned int object_size, 347 struct kmem_cache_args *args, 348 slab_flags_t flags); 349 static inline struct kmem_cache * 350 __kmem_cache_create(const char *name, unsigned int size, unsigned int align, 351 slab_flags_t flags, void (*ctor)(void *)) 352 { 353 struct kmem_cache_args kmem_args = { 354 .align = align, 355 .ctor = ctor, 356 }; 357 358 return __kmem_cache_create_args(name, size, &kmem_args, flags); 359 } 360 361 /** 362 * kmem_cache_create_usercopy - Create a kmem cache with a region suitable 363 * for copying to userspace. 364 * @name: A string which is used in /proc/slabinfo to identify this cache. 365 * @size: The size of objects to be created in this cache. 366 * @align: The required alignment for the objects. 367 * @flags: SLAB flags 368 * @useroffset: Usercopy region offset 369 * @usersize: Usercopy region size 370 * @ctor: A constructor for the objects, or %NULL. 371 * 372 * This is a legacy wrapper, new code should use either KMEM_CACHE_USERCOPY() 373 * if whitelisting a single field is sufficient, or kmem_cache_create() with 374 * the necessary parameters passed via the args parameter (see 375 * &struct kmem_cache_args) 376 * 377 * Return: a pointer to the cache on success, NULL on failure. 378 */ 379 static inline struct kmem_cache * 380 kmem_cache_create_usercopy(const char *name, unsigned int size, 381 unsigned int align, slab_flags_t flags, 382 unsigned int useroffset, unsigned int usersize, 383 void (*ctor)(void *)) 384 { 385 struct kmem_cache_args kmem_args = { 386 .align = align, 387 .ctor = ctor, 388 .useroffset = useroffset, 389 .usersize = usersize, 390 }; 391 392 return __kmem_cache_create_args(name, size, &kmem_args, flags); 393 } 394 395 /* If NULL is passed for @args, use this variant with default arguments. */ 396 static inline struct kmem_cache * 397 __kmem_cache_default_args(const char *name, unsigned int size, 398 struct kmem_cache_args *args, 399 slab_flags_t flags) 400 { 401 struct kmem_cache_args kmem_default_args = {}; 402 403 /* Make sure we don't get passed garbage. */ 404 if (WARN_ON_ONCE(args)) 405 return ERR_PTR(-EINVAL); 406 407 return __kmem_cache_create_args(name, size, &kmem_default_args, flags); 408 } 409 410 /** 411 * kmem_cache_create - Create a kmem cache. 412 * @__name: A string which is used in /proc/slabinfo to identify this cache. 413 * @__object_size: The size of objects to be created in this cache. 414 * @__args: Optional arguments, see &struct kmem_cache_args. Passing %NULL 415 * means defaults will be used for all the arguments. 416 * 417 * This is currently implemented as a macro using ``_Generic()`` to call 418 * either the new variant of the function, or a legacy one. 419 * 420 * The new variant has 4 parameters: 421 * ``kmem_cache_create(name, object_size, args, flags)`` 422 * 423 * See __kmem_cache_create_args() which implements this. 424 * 425 * The legacy variant has 5 parameters: 426 * ``kmem_cache_create(name, object_size, align, flags, ctor)`` 427 * 428 * The align and ctor parameters map to the respective fields of 429 * &struct kmem_cache_args 430 * 431 * Context: Cannot be called within a interrupt, but can be interrupted. 432 * 433 * Return: a pointer to the cache on success, NULL on failure. 434 */ 435 #define kmem_cache_create(__name, __object_size, __args, ...) \ 436 _Generic((__args), \ 437 struct kmem_cache_args *: __kmem_cache_create_args, \ 438 void *: __kmem_cache_default_args, \ 439 default: __kmem_cache_create)(__name, __object_size, __args, __VA_ARGS__) 440 441 void kmem_cache_destroy(struct kmem_cache *s); 442 int kmem_cache_shrink(struct kmem_cache *s); 443 444 /* 445 * Please use this macro to create slab caches. Simply specify the 446 * name of the structure and maybe some flags that are listed above. 447 * 448 * The alignment of the struct determines object alignment. If you 449 * f.e. add ____cacheline_aligned_in_smp to the struct declaration 450 * then the objects will be properly aligned in SMP configurations. 451 */ 452 #define KMEM_CACHE(__struct, __flags) \ 453 __kmem_cache_create_args(#__struct, sizeof(struct __struct), \ 454 &(struct kmem_cache_args) { \ 455 .align = __alignof__(struct __struct), \ 456 }, (__flags)) 457 458 /* 459 * To whitelist a single field for copying to/from usercopy, use this 460 * macro instead for KMEM_CACHE() above. 461 */ 462 #define KMEM_CACHE_USERCOPY(__struct, __flags, __field) \ 463 __kmem_cache_create_args(#__struct, sizeof(struct __struct), \ 464 &(struct kmem_cache_args) { \ 465 .align = __alignof__(struct __struct), \ 466 .useroffset = offsetof(struct __struct, __field), \ 467 .usersize = sizeof_field(struct __struct, __field), \ 468 }, (__flags)) 469 470 /* 471 * Common kmalloc functions provided by all allocators 472 */ 473 void * __must_check krealloc_noprof(const void *objp, size_t new_size, 474 gfp_t flags) __realloc_size(2); 475 #define krealloc(...) alloc_hooks(krealloc_noprof(__VA_ARGS__)) 476 477 void kfree(const void *objp); 478 void kfree_sensitive(const void *objp); 479 size_t __ksize(const void *objp); 480 481 DEFINE_FREE(kfree, void *, if (!IS_ERR_OR_NULL(_T)) kfree(_T)) 482 DEFINE_FREE(kfree_sensitive, void *, if (_T) kfree_sensitive(_T)) 483 484 /** 485 * ksize - Report actual allocation size of associated object 486 * 487 * @objp: Pointer returned from a prior kmalloc()-family allocation. 488 * 489 * This should not be used for writing beyond the originally requested 490 * allocation size. Either use krealloc() or round up the allocation size 491 * with kmalloc_size_roundup() prior to allocation. If this is used to 492 * access beyond the originally requested allocation size, UBSAN_BOUNDS 493 * and/or FORTIFY_SOURCE may trip, since they only know about the 494 * originally allocated size via the __alloc_size attribute. 495 */ 496 size_t ksize(const void *objp); 497 498 #ifdef CONFIG_PRINTK 499 bool kmem_dump_obj(void *object); 500 #else 501 static inline bool kmem_dump_obj(void *object) { return false; } 502 #endif 503 504 /* 505 * Some archs want to perform DMA into kmalloc caches and need a guaranteed 506 * alignment larger than the alignment of a 64-bit integer. 507 * Setting ARCH_DMA_MINALIGN in arch headers allows that. 508 */ 509 #ifdef ARCH_HAS_DMA_MINALIGN 510 #if ARCH_DMA_MINALIGN > 8 && !defined(ARCH_KMALLOC_MINALIGN) 511 #define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN 512 #endif 513 #endif 514 515 #ifndef ARCH_KMALLOC_MINALIGN 516 #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long) 517 #elif ARCH_KMALLOC_MINALIGN > 8 518 #define KMALLOC_MIN_SIZE ARCH_KMALLOC_MINALIGN 519 #define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE) 520 #endif 521 522 /* 523 * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment. 524 * Intended for arches that get misalignment faults even for 64 bit integer 525 * aligned buffers. 526 */ 527 #ifndef ARCH_SLAB_MINALIGN 528 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long) 529 #endif 530 531 /* 532 * Arches can define this function if they want to decide the minimum slab 533 * alignment at runtime. The value returned by the function must be a power 534 * of two and >= ARCH_SLAB_MINALIGN. 535 */ 536 #ifndef arch_slab_minalign 537 static inline unsigned int arch_slab_minalign(void) 538 { 539 return ARCH_SLAB_MINALIGN; 540 } 541 #endif 542 543 /* 544 * kmem_cache_alloc and friends return pointers aligned to ARCH_SLAB_MINALIGN. 545 * kmalloc and friends return pointers aligned to both ARCH_KMALLOC_MINALIGN 546 * and ARCH_SLAB_MINALIGN, but here we only assume the former alignment. 547 */ 548 #define __assume_kmalloc_alignment __assume_aligned(ARCH_KMALLOC_MINALIGN) 549 #define __assume_slab_alignment __assume_aligned(ARCH_SLAB_MINALIGN) 550 #define __assume_page_alignment __assume_aligned(PAGE_SIZE) 551 552 /* 553 * Kmalloc array related definitions 554 */ 555 556 /* 557 * SLUB directly allocates requests fitting in to an order-1 page 558 * (PAGE_SIZE*2). Larger requests are passed to the page allocator. 559 */ 560 #define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1) 561 #define KMALLOC_SHIFT_MAX (MAX_PAGE_ORDER + PAGE_SHIFT) 562 #ifndef KMALLOC_SHIFT_LOW 563 #define KMALLOC_SHIFT_LOW 3 564 #endif 565 566 /* Maximum allocatable size */ 567 #define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_MAX) 568 /* Maximum size for which we actually use a slab cache */ 569 #define KMALLOC_MAX_CACHE_SIZE (1UL << KMALLOC_SHIFT_HIGH) 570 /* Maximum order allocatable via the slab allocator */ 571 #define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_MAX - PAGE_SHIFT) 572 573 /* 574 * Kmalloc subsystem. 575 */ 576 #ifndef KMALLOC_MIN_SIZE 577 #define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW) 578 #endif 579 580 /* 581 * This restriction comes from byte sized index implementation. 582 * Page size is normally 2^12 bytes and, in this case, if we want to use 583 * byte sized index which can represent 2^8 entries, the size of the object 584 * should be equal or greater to 2^12 / 2^8 = 2^4 = 16. 585 * If minimum size of kmalloc is less than 16, we use it as minimum object 586 * size and give up to use byte sized index. 587 */ 588 #define SLAB_OBJ_MIN_SIZE (KMALLOC_MIN_SIZE < 16 ? \ 589 (KMALLOC_MIN_SIZE) : 16) 590 591 #ifdef CONFIG_RANDOM_KMALLOC_CACHES 592 #define RANDOM_KMALLOC_CACHES_NR 15 // # of cache copies 593 #else 594 #define RANDOM_KMALLOC_CACHES_NR 0 595 #endif 596 597 /* 598 * Whenever changing this, take care of that kmalloc_type() and 599 * create_kmalloc_caches() still work as intended. 600 * 601 * KMALLOC_NORMAL can contain only unaccounted objects whereas KMALLOC_CGROUP 602 * is for accounted but unreclaimable and non-dma objects. All the other 603 * kmem caches can have both accounted and unaccounted objects. 604 */ 605 enum kmalloc_cache_type { 606 KMALLOC_NORMAL = 0, 607 #ifndef CONFIG_ZONE_DMA 608 KMALLOC_DMA = KMALLOC_NORMAL, 609 #endif 610 #ifndef CONFIG_MEMCG 611 KMALLOC_CGROUP = KMALLOC_NORMAL, 612 #endif 613 KMALLOC_RANDOM_START = KMALLOC_NORMAL, 614 KMALLOC_RANDOM_END = KMALLOC_RANDOM_START + RANDOM_KMALLOC_CACHES_NR, 615 #ifdef CONFIG_SLUB_TINY 616 KMALLOC_RECLAIM = KMALLOC_NORMAL, 617 #else 618 KMALLOC_RECLAIM, 619 #endif 620 #ifdef CONFIG_ZONE_DMA 621 KMALLOC_DMA, 622 #endif 623 #ifdef CONFIG_MEMCG 624 KMALLOC_CGROUP, 625 #endif 626 NR_KMALLOC_TYPES 627 }; 628 629 typedef struct kmem_cache * kmem_buckets[KMALLOC_SHIFT_HIGH + 1]; 630 631 extern kmem_buckets kmalloc_caches[NR_KMALLOC_TYPES]; 632 633 /* 634 * Define gfp bits that should not be set for KMALLOC_NORMAL. 635 */ 636 #define KMALLOC_NOT_NORMAL_BITS \ 637 (__GFP_RECLAIMABLE | \ 638 (IS_ENABLED(CONFIG_ZONE_DMA) ? __GFP_DMA : 0) | \ 639 (IS_ENABLED(CONFIG_MEMCG) ? __GFP_ACCOUNT : 0)) 640 641 extern unsigned long random_kmalloc_seed; 642 643 static __always_inline enum kmalloc_cache_type kmalloc_type(gfp_t flags, unsigned long caller) 644 { 645 /* 646 * The most common case is KMALLOC_NORMAL, so test for it 647 * with a single branch for all the relevant flags. 648 */ 649 if (likely((flags & KMALLOC_NOT_NORMAL_BITS) == 0)) 650 #ifdef CONFIG_RANDOM_KMALLOC_CACHES 651 /* RANDOM_KMALLOC_CACHES_NR (=15) copies + the KMALLOC_NORMAL */ 652 return KMALLOC_RANDOM_START + hash_64(caller ^ random_kmalloc_seed, 653 ilog2(RANDOM_KMALLOC_CACHES_NR + 1)); 654 #else 655 return KMALLOC_NORMAL; 656 #endif 657 658 /* 659 * At least one of the flags has to be set. Their priorities in 660 * decreasing order are: 661 * 1) __GFP_DMA 662 * 2) __GFP_RECLAIMABLE 663 * 3) __GFP_ACCOUNT 664 */ 665 if (IS_ENABLED(CONFIG_ZONE_DMA) && (flags & __GFP_DMA)) 666 return KMALLOC_DMA; 667 if (!IS_ENABLED(CONFIG_MEMCG) || (flags & __GFP_RECLAIMABLE)) 668 return KMALLOC_RECLAIM; 669 else 670 return KMALLOC_CGROUP; 671 } 672 673 /* 674 * Figure out which kmalloc slab an allocation of a certain size 675 * belongs to. 676 * 0 = zero alloc 677 * 1 = 65 .. 96 bytes 678 * 2 = 129 .. 192 bytes 679 * n = 2^(n-1)+1 .. 2^n 680 * 681 * Note: __kmalloc_index() is compile-time optimized, and not runtime optimized; 682 * typical usage is via kmalloc_index() and therefore evaluated at compile-time. 683 * Callers where !size_is_constant should only be test modules, where runtime 684 * overheads of __kmalloc_index() can be tolerated. Also see kmalloc_slab(). 685 */ 686 static __always_inline unsigned int __kmalloc_index(size_t size, 687 bool size_is_constant) 688 { 689 if (!size) 690 return 0; 691 692 if (size <= KMALLOC_MIN_SIZE) 693 return KMALLOC_SHIFT_LOW; 694 695 if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96) 696 return 1; 697 if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192) 698 return 2; 699 if (size <= 8) return 3; 700 if (size <= 16) return 4; 701 if (size <= 32) return 5; 702 if (size <= 64) return 6; 703 if (size <= 128) return 7; 704 if (size <= 256) return 8; 705 if (size <= 512) return 9; 706 if (size <= 1024) return 10; 707 if (size <= 2 * 1024) return 11; 708 if (size <= 4 * 1024) return 12; 709 if (size <= 8 * 1024) return 13; 710 if (size <= 16 * 1024) return 14; 711 if (size <= 32 * 1024) return 15; 712 if (size <= 64 * 1024) return 16; 713 if (size <= 128 * 1024) return 17; 714 if (size <= 256 * 1024) return 18; 715 if (size <= 512 * 1024) return 19; 716 if (size <= 1024 * 1024) return 20; 717 if (size <= 2 * 1024 * 1024) return 21; 718 719 if (!IS_ENABLED(CONFIG_PROFILE_ALL_BRANCHES) && size_is_constant) 720 BUILD_BUG_ON_MSG(1, "unexpected size in kmalloc_index()"); 721 else 722 BUG(); 723 724 /* Will never be reached. Needed because the compiler may complain */ 725 return -1; 726 } 727 static_assert(PAGE_SHIFT <= 20); 728 #define kmalloc_index(s) __kmalloc_index(s, true) 729 730 #include <linux/alloc_tag.h> 731 732 /** 733 * kmem_cache_alloc - Allocate an object 734 * @cachep: The cache to allocate from. 735 * @flags: See kmalloc(). 736 * 737 * Allocate an object from this cache. 738 * See kmem_cache_zalloc() for a shortcut of adding __GFP_ZERO to flags. 739 * 740 * Return: pointer to the new object or %NULL in case of error 741 */ 742 void *kmem_cache_alloc_noprof(struct kmem_cache *cachep, 743 gfp_t flags) __assume_slab_alignment __malloc; 744 #define kmem_cache_alloc(...) alloc_hooks(kmem_cache_alloc_noprof(__VA_ARGS__)) 745 746 void *kmem_cache_alloc_lru_noprof(struct kmem_cache *s, struct list_lru *lru, 747 gfp_t gfpflags) __assume_slab_alignment __malloc; 748 #define kmem_cache_alloc_lru(...) alloc_hooks(kmem_cache_alloc_lru_noprof(__VA_ARGS__)) 749 750 /** 751 * kmem_cache_charge - memcg charge an already allocated slab memory 752 * @objp: address of the slab object to memcg charge 753 * @gfpflags: describe the allocation context 754 * 755 * kmem_cache_charge allows charging a slab object to the current memcg, 756 * primarily in cases where charging at allocation time might not be possible 757 * because the target memcg is not known (i.e. softirq context) 758 * 759 * The objp should be pointer returned by the slab allocator functions like 760 * kmalloc (with __GFP_ACCOUNT in flags) or kmem_cache_alloc. The memcg charge 761 * behavior can be controlled through gfpflags parameter, which affects how the 762 * necessary internal metadata can be allocated. Including __GFP_NOFAIL denotes 763 * that overcharging is requested instead of failure, but is not applied for the 764 * internal metadata allocation. 765 * 766 * There are several cases where it will return true even if the charging was 767 * not done: 768 * More specifically: 769 * 770 * 1. For !CONFIG_MEMCG or cgroup_disable=memory systems. 771 * 2. Already charged slab objects. 772 * 3. For slab objects from KMALLOC_NORMAL caches - allocated by kmalloc() 773 * without __GFP_ACCOUNT 774 * 4. Allocating internal metadata has failed 775 * 776 * Return: true if charge was successful otherwise false. 777 */ 778 bool kmem_cache_charge(void *objp, gfp_t gfpflags); 779 void kmem_cache_free(struct kmem_cache *s, void *objp); 780 781 kmem_buckets *kmem_buckets_create(const char *name, slab_flags_t flags, 782 unsigned int useroffset, unsigned int usersize, 783 void (*ctor)(void *)); 784 785 /* 786 * Bulk allocation and freeing operations. These are accelerated in an 787 * allocator specific way to avoid taking locks repeatedly or building 788 * metadata structures unnecessarily. 789 * 790 * Note that interrupts must be enabled when calling these functions. 791 */ 792 void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p); 793 794 int kmem_cache_alloc_bulk_noprof(struct kmem_cache *s, gfp_t flags, size_t size, void **p); 795 #define kmem_cache_alloc_bulk(...) alloc_hooks(kmem_cache_alloc_bulk_noprof(__VA_ARGS__)) 796 797 static __always_inline void kfree_bulk(size_t size, void **p) 798 { 799 kmem_cache_free_bulk(NULL, size, p); 800 } 801 802 void *kmem_cache_alloc_node_noprof(struct kmem_cache *s, gfp_t flags, 803 int node) __assume_slab_alignment __malloc; 804 #define kmem_cache_alloc_node(...) alloc_hooks(kmem_cache_alloc_node_noprof(__VA_ARGS__)) 805 806 /* 807 * These macros allow declaring a kmem_buckets * parameter alongside size, which 808 * can be compiled out with CONFIG_SLAB_BUCKETS=n so that a large number of call 809 * sites don't have to pass NULL. 810 */ 811 #ifdef CONFIG_SLAB_BUCKETS 812 #define DECL_BUCKET_PARAMS(_size, _b) size_t (_size), kmem_buckets *(_b) 813 #define PASS_BUCKET_PARAMS(_size, _b) (_size), (_b) 814 #define PASS_BUCKET_PARAM(_b) (_b) 815 #else 816 #define DECL_BUCKET_PARAMS(_size, _b) size_t (_size) 817 #define PASS_BUCKET_PARAMS(_size, _b) (_size) 818 #define PASS_BUCKET_PARAM(_b) NULL 819 #endif 820 821 /* 822 * The following functions are not to be used directly and are intended only 823 * for internal use from kmalloc() and kmalloc_node() 824 * with the exception of kunit tests 825 */ 826 827 void *__kmalloc_noprof(size_t size, gfp_t flags) 828 __assume_kmalloc_alignment __alloc_size(1); 829 830 void *__kmalloc_node_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flags, int node) 831 __assume_kmalloc_alignment __alloc_size(1); 832 833 void *__kmalloc_cache_noprof(struct kmem_cache *s, gfp_t flags, size_t size) 834 __assume_kmalloc_alignment __alloc_size(3); 835 836 void *__kmalloc_cache_node_noprof(struct kmem_cache *s, gfp_t gfpflags, 837 int node, size_t size) 838 __assume_kmalloc_alignment __alloc_size(4); 839 840 void *__kmalloc_large_noprof(size_t size, gfp_t flags) 841 __assume_page_alignment __alloc_size(1); 842 843 void *__kmalloc_large_node_noprof(size_t size, gfp_t flags, int node) 844 __assume_page_alignment __alloc_size(1); 845 846 /** 847 * kmalloc - allocate kernel memory 848 * @size: how many bytes of memory are required. 849 * @flags: describe the allocation context 850 * 851 * kmalloc is the normal method of allocating memory 852 * for objects smaller than page size in the kernel. 853 * 854 * The allocated object address is aligned to at least ARCH_KMALLOC_MINALIGN 855 * bytes. For @size of power of two bytes, the alignment is also guaranteed 856 * to be at least to the size. For other sizes, the alignment is guaranteed to 857 * be at least the largest power-of-two divisor of @size. 858 * 859 * The @flags argument may be one of the GFP flags defined at 860 * include/linux/gfp_types.h and described at 861 * :ref:`Documentation/core-api/mm-api.rst <mm-api-gfp-flags>` 862 * 863 * The recommended usage of the @flags is described at 864 * :ref:`Documentation/core-api/memory-allocation.rst <memory_allocation>` 865 * 866 * Below is a brief outline of the most useful GFP flags 867 * 868 * %GFP_KERNEL 869 * Allocate normal kernel ram. May sleep. 870 * 871 * %GFP_NOWAIT 872 * Allocation will not sleep. 873 * 874 * %GFP_ATOMIC 875 * Allocation will not sleep. May use emergency pools. 876 * 877 * Also it is possible to set different flags by OR'ing 878 * in one or more of the following additional @flags: 879 * 880 * %__GFP_ZERO 881 * Zero the allocated memory before returning. Also see kzalloc(). 882 * 883 * %__GFP_HIGH 884 * This allocation has high priority and may use emergency pools. 885 * 886 * %__GFP_NOFAIL 887 * Indicate that this allocation is in no way allowed to fail 888 * (think twice before using). 889 * 890 * %__GFP_NORETRY 891 * If memory is not immediately available, 892 * then give up at once. 893 * 894 * %__GFP_NOWARN 895 * If allocation fails, don't issue any warnings. 896 * 897 * %__GFP_RETRY_MAYFAIL 898 * Try really hard to succeed the allocation but fail 899 * eventually. 900 */ 901 static __always_inline __alloc_size(1) void *kmalloc_noprof(size_t size, gfp_t flags) 902 { 903 if (__builtin_constant_p(size) && size) { 904 unsigned int index; 905 906 if (size > KMALLOC_MAX_CACHE_SIZE) 907 return __kmalloc_large_noprof(size, flags); 908 909 index = kmalloc_index(size); 910 return __kmalloc_cache_noprof( 911 kmalloc_caches[kmalloc_type(flags, _RET_IP_)][index], 912 flags, size); 913 } 914 return __kmalloc_noprof(size, flags); 915 } 916 #define kmalloc(...) alloc_hooks(kmalloc_noprof(__VA_ARGS__)) 917 918 #define kmem_buckets_alloc(_b, _size, _flags) \ 919 alloc_hooks(__kmalloc_node_noprof(PASS_BUCKET_PARAMS(_size, _b), _flags, NUMA_NO_NODE)) 920 921 #define kmem_buckets_alloc_track_caller(_b, _size, _flags) \ 922 alloc_hooks(__kmalloc_node_track_caller_noprof(PASS_BUCKET_PARAMS(_size, _b), _flags, NUMA_NO_NODE, _RET_IP_)) 923 924 static __always_inline __alloc_size(1) void *kmalloc_node_noprof(size_t size, gfp_t flags, int node) 925 { 926 if (__builtin_constant_p(size) && size) { 927 unsigned int index; 928 929 if (size > KMALLOC_MAX_CACHE_SIZE) 930 return __kmalloc_large_node_noprof(size, flags, node); 931 932 index = kmalloc_index(size); 933 return __kmalloc_cache_node_noprof( 934 kmalloc_caches[kmalloc_type(flags, _RET_IP_)][index], 935 flags, node, size); 936 } 937 return __kmalloc_node_noprof(PASS_BUCKET_PARAMS(size, NULL), flags, node); 938 } 939 #define kmalloc_node(...) alloc_hooks(kmalloc_node_noprof(__VA_ARGS__)) 940 941 /** 942 * kmalloc_array - allocate memory for an array. 943 * @n: number of elements. 944 * @size: element size. 945 * @flags: the type of memory to allocate (see kmalloc). 946 */ 947 static inline __alloc_size(1, 2) void *kmalloc_array_noprof(size_t n, size_t size, gfp_t flags) 948 { 949 size_t bytes; 950 951 if (unlikely(check_mul_overflow(n, size, &bytes))) 952 return NULL; 953 if (__builtin_constant_p(n) && __builtin_constant_p(size)) 954 return kmalloc_noprof(bytes, flags); 955 return kmalloc_noprof(bytes, flags); 956 } 957 #define kmalloc_array(...) alloc_hooks(kmalloc_array_noprof(__VA_ARGS__)) 958 959 /** 960 * krealloc_array - reallocate memory for an array. 961 * @p: pointer to the memory chunk to reallocate 962 * @new_n: new number of elements to alloc 963 * @new_size: new size of a single member of the array 964 * @flags: the type of memory to allocate (see kmalloc) 965 * 966 * If __GFP_ZERO logic is requested, callers must ensure that, starting with the 967 * initial memory allocation, every subsequent call to this API for the same 968 * memory allocation is flagged with __GFP_ZERO. Otherwise, it is possible that 969 * __GFP_ZERO is not fully honored by this API. 970 * 971 * See krealloc_noprof() for further details. 972 * 973 * In any case, the contents of the object pointed to are preserved up to the 974 * lesser of the new and old sizes. 975 */ 976 static inline __realloc_size(2, 3) void * __must_check krealloc_array_noprof(void *p, 977 size_t new_n, 978 size_t new_size, 979 gfp_t flags) 980 { 981 size_t bytes; 982 983 if (unlikely(check_mul_overflow(new_n, new_size, &bytes))) 984 return NULL; 985 986 return krealloc_noprof(p, bytes, flags); 987 } 988 #define krealloc_array(...) alloc_hooks(krealloc_array_noprof(__VA_ARGS__)) 989 990 /** 991 * kcalloc - allocate memory for an array. The memory is set to zero. 992 * @n: number of elements. 993 * @size: element size. 994 * @flags: the type of memory to allocate (see kmalloc). 995 */ 996 #define kcalloc(n, size, flags) kmalloc_array(n, size, (flags) | __GFP_ZERO) 997 998 void *__kmalloc_node_track_caller_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flags, int node, 999 unsigned long caller) __alloc_size(1); 1000 #define kmalloc_node_track_caller_noprof(size, flags, node, caller) \ 1001 __kmalloc_node_track_caller_noprof(PASS_BUCKET_PARAMS(size, NULL), flags, node, caller) 1002 #define kmalloc_node_track_caller(...) \ 1003 alloc_hooks(kmalloc_node_track_caller_noprof(__VA_ARGS__, _RET_IP_)) 1004 1005 /* 1006 * kmalloc_track_caller is a special version of kmalloc that records the 1007 * calling function of the routine calling it for slab leak tracking instead 1008 * of just the calling function (confusing, eh?). 1009 * It's useful when the call to kmalloc comes from a widely-used standard 1010 * allocator where we care about the real place the memory allocation 1011 * request comes from. 1012 */ 1013 #define kmalloc_track_caller(...) kmalloc_node_track_caller(__VA_ARGS__, NUMA_NO_NODE) 1014 1015 #define kmalloc_track_caller_noprof(...) \ 1016 kmalloc_node_track_caller_noprof(__VA_ARGS__, NUMA_NO_NODE, _RET_IP_) 1017 1018 static inline __alloc_size(1, 2) void *kmalloc_array_node_noprof(size_t n, size_t size, gfp_t flags, 1019 int node) 1020 { 1021 size_t bytes; 1022 1023 if (unlikely(check_mul_overflow(n, size, &bytes))) 1024 return NULL; 1025 if (__builtin_constant_p(n) && __builtin_constant_p(size)) 1026 return kmalloc_node_noprof(bytes, flags, node); 1027 return __kmalloc_node_noprof(PASS_BUCKET_PARAMS(bytes, NULL), flags, node); 1028 } 1029 #define kmalloc_array_node(...) alloc_hooks(kmalloc_array_node_noprof(__VA_ARGS__)) 1030 1031 #define kcalloc_node(_n, _size, _flags, _node) \ 1032 kmalloc_array_node(_n, _size, (_flags) | __GFP_ZERO, _node) 1033 1034 /* 1035 * Shortcuts 1036 */ 1037 #define kmem_cache_zalloc(_k, _flags) kmem_cache_alloc(_k, (_flags)|__GFP_ZERO) 1038 1039 /** 1040 * kzalloc - allocate memory. The memory is set to zero. 1041 * @size: how many bytes of memory are required. 1042 * @flags: the type of memory to allocate (see kmalloc). 1043 */ 1044 static inline __alloc_size(1) void *kzalloc_noprof(size_t size, gfp_t flags) 1045 { 1046 return kmalloc_noprof(size, flags | __GFP_ZERO); 1047 } 1048 #define kzalloc(...) alloc_hooks(kzalloc_noprof(__VA_ARGS__)) 1049 #define kzalloc_node(_size, _flags, _node) kmalloc_node(_size, (_flags)|__GFP_ZERO, _node) 1050 1051 void *__kvmalloc_node_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flags, int node) __alloc_size(1); 1052 #define kvmalloc_node_noprof(size, flags, node) \ 1053 __kvmalloc_node_noprof(PASS_BUCKET_PARAMS(size, NULL), flags, node) 1054 #define kvmalloc_node(...) alloc_hooks(kvmalloc_node_noprof(__VA_ARGS__)) 1055 1056 #define kvmalloc(_size, _flags) kvmalloc_node(_size, _flags, NUMA_NO_NODE) 1057 #define kvmalloc_noprof(_size, _flags) kvmalloc_node_noprof(_size, _flags, NUMA_NO_NODE) 1058 #define kvzalloc(_size, _flags) kvmalloc(_size, (_flags)|__GFP_ZERO) 1059 1060 #define kvzalloc_node(_size, _flags, _node) kvmalloc_node(_size, (_flags)|__GFP_ZERO, _node) 1061 #define kmem_buckets_valloc(_b, _size, _flags) \ 1062 alloc_hooks(__kvmalloc_node_noprof(PASS_BUCKET_PARAMS(_size, _b), _flags, NUMA_NO_NODE)) 1063 1064 static inline __alloc_size(1, 2) void * 1065 kvmalloc_array_node_noprof(size_t n, size_t size, gfp_t flags, int node) 1066 { 1067 size_t bytes; 1068 1069 if (unlikely(check_mul_overflow(n, size, &bytes))) 1070 return NULL; 1071 1072 return kvmalloc_node_noprof(bytes, flags, node); 1073 } 1074 1075 #define kvmalloc_array_noprof(...) kvmalloc_array_node_noprof(__VA_ARGS__, NUMA_NO_NODE) 1076 #define kvcalloc_node_noprof(_n,_s,_f,_node) kvmalloc_array_node_noprof(_n,_s,(_f)|__GFP_ZERO,_node) 1077 #define kvcalloc_noprof(...) kvcalloc_node_noprof(__VA_ARGS__, NUMA_NO_NODE) 1078 1079 #define kvmalloc_array(...) alloc_hooks(kvmalloc_array_noprof(__VA_ARGS__)) 1080 #define kvcalloc_node(...) alloc_hooks(kvcalloc_node_noprof(__VA_ARGS__)) 1081 #define kvcalloc(...) alloc_hooks(kvcalloc_noprof(__VA_ARGS__)) 1082 1083 void *kvrealloc_noprof(const void *p, size_t size, gfp_t flags) 1084 __realloc_size(2); 1085 #define kvrealloc(...) alloc_hooks(kvrealloc_noprof(__VA_ARGS__)) 1086 1087 extern void kvfree(const void *addr); 1088 DEFINE_FREE(kvfree, void *, if (!IS_ERR_OR_NULL(_T)) kvfree(_T)) 1089 1090 extern void kvfree_sensitive(const void *addr, size_t len); 1091 1092 unsigned int kmem_cache_size(struct kmem_cache *s); 1093 1094 /** 1095 * kmalloc_size_roundup - Report allocation bucket size for the given size 1096 * 1097 * @size: Number of bytes to round up from. 1098 * 1099 * This returns the number of bytes that would be available in a kmalloc() 1100 * allocation of @size bytes. For example, a 126 byte request would be 1101 * rounded up to the next sized kmalloc bucket, 128 bytes. (This is strictly 1102 * for the general-purpose kmalloc()-based allocations, and is not for the 1103 * pre-sized kmem_cache_alloc()-based allocations.) 1104 * 1105 * Use this to kmalloc() the full bucket size ahead of time instead of using 1106 * ksize() to query the size after an allocation. 1107 */ 1108 size_t kmalloc_size_roundup(size_t size); 1109 1110 void __init kmem_cache_init_late(void); 1111 void __init kvfree_rcu_init(void); 1112 1113 #endif /* _LINUX_SLAB_H */ 1114