1 /* 2 * Written by Mark Hemment, 1996 ([email protected]). 3 * 4 * (C) SGI 2006, Christoph Lameter 5 * Cleaned up and restructured to ease the addition of alternative 6 * implementations of SLAB allocators. 7 */ 8 9 #ifndef _LINUX_SLAB_H 10 #define _LINUX_SLAB_H 11 12 #include <linux/gfp.h> 13 #include <linux/types.h> 14 15 /* 16 * Flags to pass to kmem_cache_create(). 17 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set. 18 */ 19 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */ 20 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */ 21 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */ 22 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */ 23 #define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */ 24 #define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */ 25 #define SLAB_PANIC 0x00040000UL /* Panic if kmem_cache_create() fails */ 26 /* 27 * SLAB_DESTROY_BY_RCU - **WARNING** READ THIS! 28 * 29 * This delays freeing the SLAB page by a grace period, it does _NOT_ 30 * delay object freeing. This means that if you do kmem_cache_free() 31 * that memory location is free to be reused at any time. Thus it may 32 * be possible to see another object there in the same RCU grace period. 33 * 34 * This feature only ensures the memory location backing the object 35 * stays valid, the trick to using this is relying on an independent 36 * object validation pass. Something like: 37 * 38 * rcu_read_lock() 39 * again: 40 * obj = lockless_lookup(key); 41 * if (obj) { 42 * if (!try_get_ref(obj)) // might fail for free objects 43 * goto again; 44 * 45 * if (obj->key != key) { // not the object we expected 46 * put_ref(obj); 47 * goto again; 48 * } 49 * } 50 * rcu_read_unlock(); 51 * 52 * See also the comment on struct slab_rcu in mm/slab.c. 53 */ 54 #define SLAB_DESTROY_BY_RCU 0x00080000UL /* Defer freeing slabs to RCU */ 55 #define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */ 56 #define SLAB_TRACE 0x00200000UL /* Trace allocations and frees */ 57 58 /* Flag to prevent checks on free */ 59 #ifdef CONFIG_DEBUG_OBJECTS 60 # define SLAB_DEBUG_OBJECTS 0x00400000UL 61 #else 62 # define SLAB_DEBUG_OBJECTS 0x00000000UL 63 #endif 64 65 #define SLAB_NOLEAKTRACE 0x00800000UL /* Avoid kmemleak tracing */ 66 67 /* Don't track use of uninitialized memory */ 68 #ifdef CONFIG_KMEMCHECK 69 # define SLAB_NOTRACK 0x01000000UL 70 #else 71 # define SLAB_NOTRACK 0x00000000UL 72 #endif 73 #ifdef CONFIG_FAILSLAB 74 # define SLAB_FAILSLAB 0x02000000UL /* Fault injection mark */ 75 #else 76 # define SLAB_FAILSLAB 0x00000000UL 77 #endif 78 79 /* The following flags affect the page allocator grouping pages by mobility */ 80 #define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */ 81 #define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */ 82 /* 83 * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests. 84 * 85 * Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault. 86 * 87 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can. 88 * Both make kfree a no-op. 89 */ 90 #define ZERO_SIZE_PTR ((void *)16) 91 92 #define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \ 93 (unsigned long)ZERO_SIZE_PTR) 94 95 /* 96 * Common fields provided in kmem_cache by all slab allocators 97 * This struct is either used directly by the allocator (SLOB) 98 * or the allocator must include definitions for all fields 99 * provided in kmem_cache_common in their definition of kmem_cache. 100 * 101 * Once we can do anonymous structs (C11 standard) we could put a 102 * anonymous struct definition in these allocators so that the 103 * separate allocations in the kmem_cache structure of SLAB and 104 * SLUB is no longer needed. 105 */ 106 #ifdef CONFIG_SLOB 107 struct kmem_cache { 108 unsigned int object_size;/* The original size of the object */ 109 unsigned int size; /* The aligned/padded/added on size */ 110 unsigned int align; /* Alignment as calculated */ 111 unsigned long flags; /* Active flags on the slab */ 112 const char *name; /* Slab name for sysfs */ 113 int refcount; /* Use counter */ 114 void (*ctor)(void *); /* Called on object slot creation */ 115 struct list_head list; /* List of all slab caches on the system */ 116 }; 117 #endif 118 119 /* 120 * struct kmem_cache related prototypes 121 */ 122 void __init kmem_cache_init(void); 123 int slab_is_available(void); 124 125 struct kmem_cache *kmem_cache_create(const char *, size_t, size_t, 126 unsigned long, 127 void (*)(void *)); 128 void kmem_cache_destroy(struct kmem_cache *); 129 int kmem_cache_shrink(struct kmem_cache *); 130 void kmem_cache_free(struct kmem_cache *, void *); 131 unsigned int kmem_cache_size(struct kmem_cache *); 132 133 /* 134 * Please use this macro to create slab caches. Simply specify the 135 * name of the structure and maybe some flags that are listed above. 136 * 137 * The alignment of the struct determines object alignment. If you 138 * f.e. add ____cacheline_aligned_in_smp to the struct declaration 139 * then the objects will be properly aligned in SMP configurations. 140 */ 141 #define KMEM_CACHE(__struct, __flags) kmem_cache_create(#__struct,\ 142 sizeof(struct __struct), __alignof__(struct __struct),\ 143 (__flags), NULL) 144 145 /* 146 * The largest kmalloc size supported by the slab allocators is 147 * 32 megabyte (2^25) or the maximum allocatable page order if that is 148 * less than 32 MB. 149 * 150 * WARNING: Its not easy to increase this value since the allocators have 151 * to do various tricks to work around compiler limitations in order to 152 * ensure proper constant folding. 153 */ 154 #define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \ 155 (MAX_ORDER + PAGE_SHIFT - 1) : 25) 156 157 #define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_HIGH) 158 #define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_HIGH - PAGE_SHIFT) 159 160 /* 161 * Some archs want to perform DMA into kmalloc caches and need a guaranteed 162 * alignment larger than the alignment of a 64-bit integer. 163 * Setting ARCH_KMALLOC_MINALIGN in arch headers allows that. 164 */ 165 #ifdef ARCH_DMA_MINALIGN 166 #define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN 167 #else 168 #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long) 169 #endif 170 171 /* 172 * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment. 173 * Intended for arches that get misalignment faults even for 64 bit integer 174 * aligned buffers. 175 */ 176 #ifndef ARCH_SLAB_MINALIGN 177 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long) 178 #endif 179 180 /* 181 * Common kmalloc functions provided by all allocators 182 */ 183 void * __must_check __krealloc(const void *, size_t, gfp_t); 184 void * __must_check krealloc(const void *, size_t, gfp_t); 185 void kfree(const void *); 186 void kzfree(const void *); 187 size_t ksize(const void *); 188 189 /* 190 * Allocator specific definitions. These are mainly used to establish optimized 191 * ways to convert kmalloc() calls to kmem_cache_alloc() invocations by 192 * selecting the appropriate general cache at compile time. 193 * 194 * Allocators must define at least: 195 * 196 * kmem_cache_alloc() 197 * __kmalloc() 198 * kmalloc() 199 * 200 * Those wishing to support NUMA must also define: 201 * 202 * kmem_cache_alloc_node() 203 * kmalloc_node() 204 * 205 * See each allocator definition file for additional comments and 206 * implementation notes. 207 */ 208 #ifdef CONFIG_SLUB 209 #include <linux/slub_def.h> 210 #elif defined(CONFIG_SLOB) 211 #include <linux/slob_def.h> 212 #else 213 #include <linux/slab_def.h> 214 #endif 215 216 /** 217 * kmalloc_array - allocate memory for an array. 218 * @n: number of elements. 219 * @size: element size. 220 * @flags: the type of memory to allocate. 221 * 222 * The @flags argument may be one of: 223 * 224 * %GFP_USER - Allocate memory on behalf of user. May sleep. 225 * 226 * %GFP_KERNEL - Allocate normal kernel ram. May sleep. 227 * 228 * %GFP_ATOMIC - Allocation will not sleep. May use emergency pools. 229 * For example, use this inside interrupt handlers. 230 * 231 * %GFP_HIGHUSER - Allocate pages from high memory. 232 * 233 * %GFP_NOIO - Do not do any I/O at all while trying to get memory. 234 * 235 * %GFP_NOFS - Do not make any fs calls while trying to get memory. 236 * 237 * %GFP_NOWAIT - Allocation will not sleep. 238 * 239 * %GFP_THISNODE - Allocate node-local memory only. 240 * 241 * %GFP_DMA - Allocation suitable for DMA. 242 * Should only be used for kmalloc() caches. Otherwise, use a 243 * slab created with SLAB_DMA. 244 * 245 * Also it is possible to set different flags by OR'ing 246 * in one or more of the following additional @flags: 247 * 248 * %__GFP_COLD - Request cache-cold pages instead of 249 * trying to return cache-warm pages. 250 * 251 * %__GFP_HIGH - This allocation has high priority and may use emergency pools. 252 * 253 * %__GFP_NOFAIL - Indicate that this allocation is in no way allowed to fail 254 * (think twice before using). 255 * 256 * %__GFP_NORETRY - If memory is not immediately available, 257 * then give up at once. 258 * 259 * %__GFP_NOWARN - If allocation fails, don't issue any warnings. 260 * 261 * %__GFP_REPEAT - If allocation fails initially, try once more before failing. 262 * 263 * There are other flags available as well, but these are not intended 264 * for general use, and so are not documented here. For a full list of 265 * potential flags, always refer to linux/gfp.h. 266 */ 267 static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags) 268 { 269 if (size != 0 && n > SIZE_MAX / size) 270 return NULL; 271 return __kmalloc(n * size, flags); 272 } 273 274 /** 275 * kcalloc - allocate memory for an array. The memory is set to zero. 276 * @n: number of elements. 277 * @size: element size. 278 * @flags: the type of memory to allocate (see kmalloc). 279 */ 280 static inline void *kcalloc(size_t n, size_t size, gfp_t flags) 281 { 282 return kmalloc_array(n, size, flags | __GFP_ZERO); 283 } 284 285 #if !defined(CONFIG_NUMA) && !defined(CONFIG_SLOB) 286 /** 287 * kmalloc_node - allocate memory from a specific node 288 * @size: how many bytes of memory are required. 289 * @flags: the type of memory to allocate (see kcalloc). 290 * @node: node to allocate from. 291 * 292 * kmalloc() for non-local nodes, used to allocate from a specific node 293 * if available. Equivalent to kmalloc() in the non-NUMA single-node 294 * case. 295 */ 296 static inline void *kmalloc_node(size_t size, gfp_t flags, int node) 297 { 298 return kmalloc(size, flags); 299 } 300 301 static inline void *__kmalloc_node(size_t size, gfp_t flags, int node) 302 { 303 return __kmalloc(size, flags); 304 } 305 306 void *kmem_cache_alloc(struct kmem_cache *, gfp_t); 307 308 static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep, 309 gfp_t flags, int node) 310 { 311 return kmem_cache_alloc(cachep, flags); 312 } 313 #endif /* !CONFIG_NUMA && !CONFIG_SLOB */ 314 315 /* 316 * kmalloc_track_caller is a special version of kmalloc that records the 317 * calling function of the routine calling it for slab leak tracking instead 318 * of just the calling function (confusing, eh?). 319 * It's useful when the call to kmalloc comes from a widely-used standard 320 * allocator where we care about the real place the memory allocation 321 * request comes from. 322 */ 323 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \ 324 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \ 325 (defined(CONFIG_SLOB) && defined(CONFIG_TRACING)) 326 extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long); 327 #define kmalloc_track_caller(size, flags) \ 328 __kmalloc_track_caller(size, flags, _RET_IP_) 329 #else 330 #define kmalloc_track_caller(size, flags) \ 331 __kmalloc(size, flags) 332 #endif /* DEBUG_SLAB */ 333 334 #ifdef CONFIG_NUMA 335 /* 336 * kmalloc_node_track_caller is a special version of kmalloc_node that 337 * records the calling function of the routine calling it for slab leak 338 * tracking instead of just the calling function (confusing, eh?). 339 * It's useful when the call to kmalloc_node comes from a widely-used 340 * standard allocator where we care about the real place the memory 341 * allocation request comes from. 342 */ 343 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \ 344 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \ 345 (defined(CONFIG_SLOB) && defined(CONFIG_TRACING)) 346 extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long); 347 #define kmalloc_node_track_caller(size, flags, node) \ 348 __kmalloc_node_track_caller(size, flags, node, \ 349 _RET_IP_) 350 #else 351 #define kmalloc_node_track_caller(size, flags, node) \ 352 __kmalloc_node(size, flags, node) 353 #endif 354 355 #else /* CONFIG_NUMA */ 356 357 #define kmalloc_node_track_caller(size, flags, node) \ 358 kmalloc_track_caller(size, flags) 359 360 #endif /* CONFIG_NUMA */ 361 362 /* 363 * Shortcuts 364 */ 365 static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags) 366 { 367 return kmem_cache_alloc(k, flags | __GFP_ZERO); 368 } 369 370 /** 371 * kzalloc - allocate memory. The memory is set to zero. 372 * @size: how many bytes of memory are required. 373 * @flags: the type of memory to allocate (see kmalloc). 374 */ 375 static inline void *kzalloc(size_t size, gfp_t flags) 376 { 377 return kmalloc(size, flags | __GFP_ZERO); 378 } 379 380 /** 381 * kzalloc_node - allocate zeroed memory from a particular memory node. 382 * @size: how many bytes of memory are required. 383 * @flags: the type of memory to allocate (see kmalloc). 384 * @node: memory node from which to allocate 385 */ 386 static inline void *kzalloc_node(size_t size, gfp_t flags, int node) 387 { 388 return kmalloc_node(size, flags | __GFP_ZERO, node); 389 } 390 391 void __init kmem_cache_init_late(void); 392 393 #endif /* _LINUX_SLAB_H */ 394