1 #ifndef IOU_ALLOC_CACHE_H 2 #define IOU_ALLOC_CACHE_H 3 4 /* 5 * Don't allow the cache to grow beyond this size. 6 */ 7 #define IO_ALLOC_CACHE_MAX 512 8 9 struct io_cache_entry { 10 struct io_wq_work_node node; 11 }; 12 13 static inline bool io_alloc_cache_put(struct io_alloc_cache *cache, 14 struct io_cache_entry *entry) 15 { 16 if (cache->nr_cached < cache->max_cached) { 17 cache->nr_cached++; 18 wq_stack_add_head(&entry->node, &cache->list); 19 /* KASAN poisons object */ 20 kasan_slab_free_mempool(entry); 21 return true; 22 } 23 return false; 24 } 25 26 static inline struct io_cache_entry *io_alloc_cache_get(struct io_alloc_cache *cache) 27 { 28 if (cache->list.next) { 29 struct io_cache_entry *entry; 30 31 entry = container_of(cache->list.next, struct io_cache_entry, node); 32 kasan_unpoison_range(entry, cache->elem_size); 33 cache->list.next = cache->list.next->next; 34 cache->nr_cached--; 35 return entry; 36 } 37 38 return NULL; 39 } 40 41 static inline void io_alloc_cache_init(struct io_alloc_cache *cache, 42 unsigned max_nr, size_t size) 43 { 44 cache->list.next = NULL; 45 cache->nr_cached = 0; 46 cache->max_cached = max_nr; 47 cache->elem_size = size; 48 } 49 50 static inline void io_alloc_cache_free(struct io_alloc_cache *cache, 51 void (*free)(struct io_cache_entry *)) 52 { 53 while (1) { 54 struct io_cache_entry *entry = io_alloc_cache_get(cache); 55 56 if (!entry) 57 break; 58 free(entry); 59 } 60 cache->nr_cached = 0; 61 } 62 #endif 63