1 #ifndef IOU_ALLOC_CACHE_H 2 #define IOU_ALLOC_CACHE_H 3 4 #include <linux/io_uring_types.h> 5 6 /* 7 * Don't allow the cache to grow beyond this size. 8 */ 9 #define IO_ALLOC_CACHE_MAX 128 10 11 #if defined(CONFIG_KASAN) 12 static inline void io_alloc_cache_kasan(struct iovec **iov, int *nr) 13 { 14 kfree(*iov); 15 *iov = NULL; 16 *nr = 0; 17 } 18 #else 19 static inline void io_alloc_cache_kasan(struct iovec **iov, int *nr) 20 { 21 } 22 #endif 23 24 static inline bool io_alloc_cache_put(struct io_alloc_cache *cache, 25 void *entry) 26 { 27 if (cache->nr_cached < cache->max_cached) { 28 if (!kasan_mempool_poison_object(entry)) 29 return false; 30 cache->entries[cache->nr_cached++] = entry; 31 return true; 32 } 33 return false; 34 } 35 36 static inline void *io_alloc_cache_get(struct io_alloc_cache *cache) 37 { 38 if (cache->nr_cached) { 39 void *entry = cache->entries[--cache->nr_cached]; 40 41 /* 42 * If KASAN is enabled, always clear the initial bytes that 43 * must be zeroed post alloc, in case any of them overlap 44 * with KASAN storage. 45 */ 46 #if defined(CONFIG_KASAN) 47 kasan_mempool_unpoison_object(entry, cache->elem_size); 48 if (cache->init_clear) 49 memset(entry, 0, cache->init_clear); 50 #endif 51 return entry; 52 } 53 54 return NULL; 55 } 56 57 static inline void *io_cache_alloc(struct io_alloc_cache *cache, gfp_t gfp) 58 { 59 void *obj; 60 61 obj = io_alloc_cache_get(cache); 62 if (obj) 63 return obj; 64 65 obj = kmalloc(cache->elem_size, gfp); 66 if (obj && cache->init_clear) 67 memset(obj, 0, cache->init_clear); 68 return obj; 69 } 70 71 /* returns false if the cache was initialized properly */ 72 static inline bool io_alloc_cache_init(struct io_alloc_cache *cache, 73 unsigned max_nr, unsigned int size, 74 unsigned int init_bytes) 75 { 76 cache->entries = kvmalloc_array(max_nr, sizeof(void *), GFP_KERNEL); 77 if (cache->entries) { 78 cache->nr_cached = 0; 79 cache->max_cached = max_nr; 80 cache->elem_size = size; 81 cache->init_clear = init_bytes; 82 return false; 83 } 84 return true; 85 } 86 87 static inline void io_alloc_cache_free(struct io_alloc_cache *cache, 88 void (*free)(const void *)) 89 { 90 void *entry; 91 92 if (!cache->entries) 93 return; 94 95 while ((entry = io_alloc_cache_get(cache)) != NULL) 96 free(entry); 97 98 kvfree(cache->entries); 99 cache->entries = NULL; 100 } 101 #endif 102