1 /* 2 * Written by Mark Hemment, 1996 ([email protected]). 3 * 4 * (C) SGI 2006, Christoph Lameter 5 * Cleaned up and restructured to ease the addition of alternative 6 * implementations of SLAB allocators. 7 * (C) Linux Foundation 2008-2013 8 * Unified interface for all slab allocators 9 */ 10 11 #ifndef _LINUX_SLAB_H 12 #define _LINUX_SLAB_H 13 14 #include <linux/gfp.h> 15 #include <linux/types.h> 16 #include <linux/workqueue.h> 17 18 19 /* 20 * Flags to pass to kmem_cache_create(). 21 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set. 22 */ 23 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */ 24 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */ 25 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */ 26 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */ 27 #define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */ 28 #define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */ 29 #define SLAB_PANIC 0x00040000UL /* Panic if kmem_cache_create() fails */ 30 /* 31 * SLAB_DESTROY_BY_RCU - **WARNING** READ THIS! 32 * 33 * This delays freeing the SLAB page by a grace period, it does _NOT_ 34 * delay object freeing. This means that if you do kmem_cache_free() 35 * that memory location is free to be reused at any time. Thus it may 36 * be possible to see another object there in the same RCU grace period. 37 * 38 * This feature only ensures the memory location backing the object 39 * stays valid, the trick to using this is relying on an independent 40 * object validation pass. Something like: 41 * 42 * rcu_read_lock() 43 * again: 44 * obj = lockless_lookup(key); 45 * if (obj) { 46 * if (!try_get_ref(obj)) // might fail for free objects 47 * goto again; 48 * 49 * if (obj->key != key) { // not the object we expected 50 * put_ref(obj); 51 * goto again; 52 * } 53 * } 54 * rcu_read_unlock(); 55 * 56 * This is useful if we need to approach a kernel structure obliquely, 57 * from its address obtained without the usual locking. We can lock 58 * the structure to stabilize it and check it's still at the given address, 59 * only if we can be sure that the memory has not been meanwhile reused 60 * for some other kind of object (which our subsystem's lock might corrupt). 61 * 62 * rcu_read_lock before reading the address, then rcu_read_unlock after 63 * taking the spinlock within the structure expected at that address. 64 */ 65 #define SLAB_DESTROY_BY_RCU 0x00080000UL /* Defer freeing slabs to RCU */ 66 #define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */ 67 #define SLAB_TRACE 0x00200000UL /* Trace allocations and frees */ 68 69 /* Flag to prevent checks on free */ 70 #ifdef CONFIG_DEBUG_OBJECTS 71 # define SLAB_DEBUG_OBJECTS 0x00400000UL 72 #else 73 # define SLAB_DEBUG_OBJECTS 0x00000000UL 74 #endif 75 76 #define SLAB_NOLEAKTRACE 0x00800000UL /* Avoid kmemleak tracing */ 77 78 /* Don't track use of uninitialized memory */ 79 #ifdef CONFIG_KMEMCHECK 80 # define SLAB_NOTRACK 0x01000000UL 81 #else 82 # define SLAB_NOTRACK 0x00000000UL 83 #endif 84 #ifdef CONFIG_FAILSLAB 85 # define SLAB_FAILSLAB 0x02000000UL /* Fault injection mark */ 86 #else 87 # define SLAB_FAILSLAB 0x00000000UL 88 #endif 89 90 /* The following flags affect the page allocator grouping pages by mobility */ 91 #define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */ 92 #define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */ 93 /* 94 * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests. 95 * 96 * Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault. 97 * 98 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can. 99 * Both make kfree a no-op. 100 */ 101 #define ZERO_SIZE_PTR ((void *)16) 102 103 #define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \ 104 (unsigned long)ZERO_SIZE_PTR) 105 106 #include <linux/kmemleak.h> 107 108 struct mem_cgroup; 109 /* 110 * struct kmem_cache related prototypes 111 */ 112 void __init kmem_cache_init(void); 113 int slab_is_available(void); 114 115 struct kmem_cache *kmem_cache_create(const char *, size_t, size_t, 116 unsigned long, 117 void (*)(void *)); 118 struct kmem_cache * 119 kmem_cache_create_memcg(struct mem_cgroup *, const char *, size_t, size_t, 120 unsigned long, void (*)(void *), struct kmem_cache *); 121 void kmem_cache_destroy(struct kmem_cache *); 122 int kmem_cache_shrink(struct kmem_cache *); 123 void kmem_cache_free(struct kmem_cache *, void *); 124 125 /* 126 * Please use this macro to create slab caches. Simply specify the 127 * name of the structure and maybe some flags that are listed above. 128 * 129 * The alignment of the struct determines object alignment. If you 130 * f.e. add ____cacheline_aligned_in_smp to the struct declaration 131 * then the objects will be properly aligned in SMP configurations. 132 */ 133 #define KMEM_CACHE(__struct, __flags) kmem_cache_create(#__struct,\ 134 sizeof(struct __struct), __alignof__(struct __struct),\ 135 (__flags), NULL) 136 137 /* 138 * Common kmalloc functions provided by all allocators 139 */ 140 void * __must_check __krealloc(const void *, size_t, gfp_t); 141 void * __must_check krealloc(const void *, size_t, gfp_t); 142 void kfree(const void *); 143 void kzfree(const void *); 144 size_t ksize(const void *); 145 146 /* 147 * Some archs want to perform DMA into kmalloc caches and need a guaranteed 148 * alignment larger than the alignment of a 64-bit integer. 149 * Setting ARCH_KMALLOC_MINALIGN in arch headers allows that. 150 */ 151 #if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8 152 #define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN 153 #define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN 154 #define KMALLOC_SHIFT_LOW ilog2(ARCH_DMA_MINALIGN) 155 #else 156 #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long) 157 #endif 158 159 #ifdef CONFIG_SLOB 160 /* 161 * Common fields provided in kmem_cache by all slab allocators 162 * This struct is either used directly by the allocator (SLOB) 163 * or the allocator must include definitions for all fields 164 * provided in kmem_cache_common in their definition of kmem_cache. 165 * 166 * Once we can do anonymous structs (C11 standard) we could put a 167 * anonymous struct definition in these allocators so that the 168 * separate allocations in the kmem_cache structure of SLAB and 169 * SLUB is no longer needed. 170 */ 171 struct kmem_cache { 172 unsigned int object_size;/* The original size of the object */ 173 unsigned int size; /* The aligned/padded/added on size */ 174 unsigned int align; /* Alignment as calculated */ 175 unsigned long flags; /* Active flags on the slab */ 176 const char *name; /* Slab name for sysfs */ 177 int refcount; /* Use counter */ 178 void (*ctor)(void *); /* Called on object slot creation */ 179 struct list_head list; /* List of all slab caches on the system */ 180 }; 181 182 #endif /* CONFIG_SLOB */ 183 184 /* 185 * Kmalloc array related definitions 186 */ 187 188 #ifdef CONFIG_SLAB 189 /* 190 * The largest kmalloc size supported by the SLAB allocators is 191 * 32 megabyte (2^25) or the maximum allocatable page order if that is 192 * less than 32 MB. 193 * 194 * WARNING: Its not easy to increase this value since the allocators have 195 * to do various tricks to work around compiler limitations in order to 196 * ensure proper constant folding. 197 */ 198 #define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \ 199 (MAX_ORDER + PAGE_SHIFT - 1) : 25) 200 #define KMALLOC_SHIFT_MAX KMALLOC_SHIFT_HIGH 201 #ifndef KMALLOC_SHIFT_LOW 202 #define KMALLOC_SHIFT_LOW 5 203 #endif 204 #endif 205 206 #ifdef CONFIG_SLUB 207 /* 208 * SLUB allocates up to order 2 pages directly and otherwise 209 * passes the request to the page allocator. 210 */ 211 #define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1) 212 #define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT) 213 #ifndef KMALLOC_SHIFT_LOW 214 #define KMALLOC_SHIFT_LOW 3 215 #endif 216 #endif 217 218 #ifdef CONFIG_SLOB 219 /* 220 * SLOB passes all page size and larger requests to the page allocator. 221 * No kmalloc array is necessary since objects of different sizes can 222 * be allocated from the same page. 223 */ 224 #define KMALLOC_SHIFT_MAX 30 225 #define KMALLOC_SHIFT_HIGH PAGE_SHIFT 226 #ifndef KMALLOC_SHIFT_LOW 227 #define KMALLOC_SHIFT_LOW 3 228 #endif 229 #endif 230 231 /* Maximum allocatable size */ 232 #define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_MAX) 233 /* Maximum size for which we actually use a slab cache */ 234 #define KMALLOC_MAX_CACHE_SIZE (1UL << KMALLOC_SHIFT_HIGH) 235 /* Maximum order allocatable via the slab allocagtor */ 236 #define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_MAX - PAGE_SHIFT) 237 238 /* 239 * Kmalloc subsystem. 240 */ 241 #ifndef KMALLOC_MIN_SIZE 242 #define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW) 243 #endif 244 245 #ifndef CONFIG_SLOB 246 extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1]; 247 #ifdef CONFIG_ZONE_DMA 248 extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1]; 249 #endif 250 251 /* 252 * Figure out which kmalloc slab an allocation of a certain size 253 * belongs to. 254 * 0 = zero alloc 255 * 1 = 65 .. 96 bytes 256 * 2 = 120 .. 192 bytes 257 * n = 2^(n-1) .. 2^n -1 258 */ 259 static __always_inline int kmalloc_index(size_t size) 260 { 261 if (!size) 262 return 0; 263 264 if (size <= KMALLOC_MIN_SIZE) 265 return KMALLOC_SHIFT_LOW; 266 267 if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96) 268 return 1; 269 if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192) 270 return 2; 271 if (size <= 8) return 3; 272 if (size <= 16) return 4; 273 if (size <= 32) return 5; 274 if (size <= 64) return 6; 275 if (size <= 128) return 7; 276 if (size <= 256) return 8; 277 if (size <= 512) return 9; 278 if (size <= 1024) return 10; 279 if (size <= 2 * 1024) return 11; 280 if (size <= 4 * 1024) return 12; 281 if (size <= 8 * 1024) return 13; 282 if (size <= 16 * 1024) return 14; 283 if (size <= 32 * 1024) return 15; 284 if (size <= 64 * 1024) return 16; 285 if (size <= 128 * 1024) return 17; 286 if (size <= 256 * 1024) return 18; 287 if (size <= 512 * 1024) return 19; 288 if (size <= 1024 * 1024) return 20; 289 if (size <= 2 * 1024 * 1024) return 21; 290 if (size <= 4 * 1024 * 1024) return 22; 291 if (size <= 8 * 1024 * 1024) return 23; 292 if (size <= 16 * 1024 * 1024) return 24; 293 if (size <= 32 * 1024 * 1024) return 25; 294 if (size <= 64 * 1024 * 1024) return 26; 295 BUG(); 296 297 /* Will never be reached. Needed because the compiler may complain */ 298 return -1; 299 } 300 #endif /* !CONFIG_SLOB */ 301 302 void *__kmalloc(size_t size, gfp_t flags); 303 void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags); 304 305 #ifdef CONFIG_NUMA 306 void *__kmalloc_node(size_t size, gfp_t flags, int node); 307 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); 308 #else 309 static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node) 310 { 311 return __kmalloc(size, flags); 312 } 313 314 static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node) 315 { 316 return kmem_cache_alloc(s, flags); 317 } 318 #endif 319 320 #ifdef CONFIG_TRACING 321 extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t); 322 323 #ifdef CONFIG_NUMA 324 extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s, 325 gfp_t gfpflags, 326 int node, size_t size); 327 #else 328 static __always_inline void * 329 kmem_cache_alloc_node_trace(struct kmem_cache *s, 330 gfp_t gfpflags, 331 int node, size_t size) 332 { 333 return kmem_cache_alloc_trace(s, gfpflags, size); 334 } 335 #endif /* CONFIG_NUMA */ 336 337 #else /* CONFIG_TRACING */ 338 static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s, 339 gfp_t flags, size_t size) 340 { 341 return kmem_cache_alloc(s, flags); 342 } 343 344 static __always_inline void * 345 kmem_cache_alloc_node_trace(struct kmem_cache *s, 346 gfp_t gfpflags, 347 int node, size_t size) 348 { 349 return kmem_cache_alloc_node(s, gfpflags, node); 350 } 351 #endif /* CONFIG_TRACING */ 352 353 #ifdef CONFIG_SLAB 354 #include <linux/slab_def.h> 355 #endif 356 357 #ifdef CONFIG_SLUB 358 #include <linux/slub_def.h> 359 #endif 360 361 static __always_inline void * 362 kmalloc_order(size_t size, gfp_t flags, unsigned int order) 363 { 364 void *ret; 365 366 flags |= (__GFP_COMP | __GFP_KMEMCG); 367 ret = (void *) __get_free_pages(flags, order); 368 kmemleak_alloc(ret, size, 1, flags); 369 return ret; 370 } 371 372 #ifdef CONFIG_TRACING 373 extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order); 374 #else 375 static __always_inline void * 376 kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) 377 { 378 return kmalloc_order(size, flags, order); 379 } 380 #endif 381 382 static __always_inline void *kmalloc_large(size_t size, gfp_t flags) 383 { 384 unsigned int order = get_order(size); 385 return kmalloc_order_trace(size, flags, order); 386 } 387 388 /** 389 * kmalloc - allocate memory 390 * @size: how many bytes of memory are required. 391 * @flags: the type of memory to allocate (see kcalloc). 392 * 393 * kmalloc is the normal method of allocating memory 394 * for objects smaller than page size in the kernel. 395 */ 396 static __always_inline void *kmalloc(size_t size, gfp_t flags) 397 { 398 if (__builtin_constant_p(size)) { 399 if (size > KMALLOC_MAX_CACHE_SIZE) 400 return kmalloc_large(size, flags); 401 #ifndef CONFIG_SLOB 402 if (!(flags & GFP_DMA)) { 403 int index = kmalloc_index(size); 404 405 if (!index) 406 return ZERO_SIZE_PTR; 407 408 return kmem_cache_alloc_trace(kmalloc_caches[index], 409 flags, size); 410 } 411 #endif 412 } 413 return __kmalloc(size, flags); 414 } 415 416 /* 417 * Determine size used for the nth kmalloc cache. 418 * return size or 0 if a kmalloc cache for that 419 * size does not exist 420 */ 421 static __always_inline int kmalloc_size(int n) 422 { 423 #ifndef CONFIG_SLOB 424 if (n > 2) 425 return 1 << n; 426 427 if (n == 1 && KMALLOC_MIN_SIZE <= 32) 428 return 96; 429 430 if (n == 2 && KMALLOC_MIN_SIZE <= 64) 431 return 192; 432 #endif 433 return 0; 434 } 435 436 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) 437 { 438 #ifndef CONFIG_SLOB 439 if (__builtin_constant_p(size) && 440 size <= KMALLOC_MAX_CACHE_SIZE && !(flags & GFP_DMA)) { 441 int i = kmalloc_index(size); 442 443 if (!i) 444 return ZERO_SIZE_PTR; 445 446 return kmem_cache_alloc_node_trace(kmalloc_caches[i], 447 flags, node, size); 448 } 449 #endif 450 return __kmalloc_node(size, flags, node); 451 } 452 453 /* 454 * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment. 455 * Intended for arches that get misalignment faults even for 64 bit integer 456 * aligned buffers. 457 */ 458 #ifndef ARCH_SLAB_MINALIGN 459 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long) 460 #endif 461 /* 462 * This is the main placeholder for memcg-related information in kmem caches. 463 * struct kmem_cache will hold a pointer to it, so the memory cost while 464 * disabled is 1 pointer. The runtime cost while enabled, gets bigger than it 465 * would otherwise be if that would be bundled in kmem_cache: we'll need an 466 * extra pointer chase. But the trade off clearly lays in favor of not 467 * penalizing non-users. 468 * 469 * Both the root cache and the child caches will have it. For the root cache, 470 * this will hold a dynamically allocated array large enough to hold 471 * information about the currently limited memcgs in the system. 472 * 473 * Child caches will hold extra metadata needed for its operation. Fields are: 474 * 475 * @memcg: pointer to the memcg this cache belongs to 476 * @list: list_head for the list of all caches in this memcg 477 * @root_cache: pointer to the global, root cache, this cache was derived from 478 * @dead: set to true after the memcg dies; the cache may still be around. 479 * @nr_pages: number of pages that belongs to this cache. 480 * @destroy: worker to be called whenever we are ready, or believe we may be 481 * ready, to destroy this cache. 482 */ 483 struct memcg_cache_params { 484 bool is_root_cache; 485 union { 486 struct kmem_cache *memcg_caches[0]; 487 struct { 488 struct mem_cgroup *memcg; 489 struct list_head list; 490 struct kmem_cache *root_cache; 491 bool dead; 492 atomic_t nr_pages; 493 struct work_struct destroy; 494 }; 495 }; 496 }; 497 498 int memcg_update_all_caches(int num_memcgs); 499 500 struct seq_file; 501 int cache_show(struct kmem_cache *s, struct seq_file *m); 502 void print_slabinfo_header(struct seq_file *m); 503 504 /** 505 * kmalloc - allocate memory 506 * @size: how many bytes of memory are required. 507 * @flags: the type of memory to allocate. 508 * 509 * The @flags argument may be one of: 510 * 511 * %GFP_USER - Allocate memory on behalf of user. May sleep. 512 * 513 * %GFP_KERNEL - Allocate normal kernel ram. May sleep. 514 * 515 * %GFP_ATOMIC - Allocation will not sleep. May use emergency pools. 516 * For example, use this inside interrupt handlers. 517 * 518 * %GFP_HIGHUSER - Allocate pages from high memory. 519 * 520 * %GFP_NOIO - Do not do any I/O at all while trying to get memory. 521 * 522 * %GFP_NOFS - Do not make any fs calls while trying to get memory. 523 * 524 * %GFP_NOWAIT - Allocation will not sleep. 525 * 526 * %GFP_THISNODE - Allocate node-local memory only. 527 * 528 * %GFP_DMA - Allocation suitable for DMA. 529 * Should only be used for kmalloc() caches. Otherwise, use a 530 * slab created with SLAB_DMA. 531 * 532 * Also it is possible to set different flags by OR'ing 533 * in one or more of the following additional @flags: 534 * 535 * %__GFP_COLD - Request cache-cold pages instead of 536 * trying to return cache-warm pages. 537 * 538 * %__GFP_HIGH - This allocation has high priority and may use emergency pools. 539 * 540 * %__GFP_NOFAIL - Indicate that this allocation is in no way allowed to fail 541 * (think twice before using). 542 * 543 * %__GFP_NORETRY - If memory is not immediately available, 544 * then give up at once. 545 * 546 * %__GFP_NOWARN - If allocation fails, don't issue any warnings. 547 * 548 * %__GFP_REPEAT - If allocation fails initially, try once more before failing. 549 * 550 * There are other flags available as well, but these are not intended 551 * for general use, and so are not documented here. For a full list of 552 * potential flags, always refer to linux/gfp.h. 553 * 554 * kmalloc is the normal method of allocating memory 555 * in the kernel. 556 */ 557 static __always_inline void *kmalloc(size_t size, gfp_t flags); 558 559 /** 560 * kmalloc_array - allocate memory for an array. 561 * @n: number of elements. 562 * @size: element size. 563 * @flags: the type of memory to allocate (see kmalloc). 564 */ 565 static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags) 566 { 567 if (size != 0 && n > SIZE_MAX / size) 568 return NULL; 569 return __kmalloc(n * size, flags); 570 } 571 572 /** 573 * kcalloc - allocate memory for an array. The memory is set to zero. 574 * @n: number of elements. 575 * @size: element size. 576 * @flags: the type of memory to allocate (see kmalloc). 577 */ 578 static inline void *kcalloc(size_t n, size_t size, gfp_t flags) 579 { 580 return kmalloc_array(n, size, flags | __GFP_ZERO); 581 } 582 583 /* 584 * kmalloc_track_caller is a special version of kmalloc that records the 585 * calling function of the routine calling it for slab leak tracking instead 586 * of just the calling function (confusing, eh?). 587 * It's useful when the call to kmalloc comes from a widely-used standard 588 * allocator where we care about the real place the memory allocation 589 * request comes from. 590 */ 591 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \ 592 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \ 593 (defined(CONFIG_SLOB) && defined(CONFIG_TRACING)) 594 extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long); 595 #define kmalloc_track_caller(size, flags) \ 596 __kmalloc_track_caller(size, flags, _RET_IP_) 597 #else 598 #define kmalloc_track_caller(size, flags) \ 599 __kmalloc(size, flags) 600 #endif /* DEBUG_SLAB */ 601 602 #ifdef CONFIG_NUMA 603 /* 604 * kmalloc_node_track_caller is a special version of kmalloc_node that 605 * records the calling function of the routine calling it for slab leak 606 * tracking instead of just the calling function (confusing, eh?). 607 * It's useful when the call to kmalloc_node comes from a widely-used 608 * standard allocator where we care about the real place the memory 609 * allocation request comes from. 610 */ 611 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \ 612 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \ 613 (defined(CONFIG_SLOB) && defined(CONFIG_TRACING)) 614 extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long); 615 #define kmalloc_node_track_caller(size, flags, node) \ 616 __kmalloc_node_track_caller(size, flags, node, \ 617 _RET_IP_) 618 #else 619 #define kmalloc_node_track_caller(size, flags, node) \ 620 __kmalloc_node(size, flags, node) 621 #endif 622 623 #else /* CONFIG_NUMA */ 624 625 #define kmalloc_node_track_caller(size, flags, node) \ 626 kmalloc_track_caller(size, flags) 627 628 #endif /* CONFIG_NUMA */ 629 630 /* 631 * Shortcuts 632 */ 633 static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags) 634 { 635 return kmem_cache_alloc(k, flags | __GFP_ZERO); 636 } 637 638 /** 639 * kzalloc - allocate memory. The memory is set to zero. 640 * @size: how many bytes of memory are required. 641 * @flags: the type of memory to allocate (see kmalloc). 642 */ 643 static inline void *kzalloc(size_t size, gfp_t flags) 644 { 645 return kmalloc(size, flags | __GFP_ZERO); 646 } 647 648 /** 649 * kzalloc_node - allocate zeroed memory from a particular memory node. 650 * @size: how many bytes of memory are required. 651 * @flags: the type of memory to allocate (see kmalloc). 652 * @node: memory node from which to allocate 653 */ 654 static inline void *kzalloc_node(size_t size, gfp_t flags, int node) 655 { 656 return kmalloc_node(size, flags | __GFP_ZERO, node); 657 } 658 659 /* 660 * Determine the size of a slab object 661 */ 662 static inline unsigned int kmem_cache_size(struct kmem_cache *s) 663 { 664 return s->object_size; 665 } 666 667 void __init kmem_cache_init_late(void); 668 669 #endif /* _LINUX_SLAB_H */ 670