1 #ifndef IOU_ALLOC_CACHE_H 2 #define IOU_ALLOC_CACHE_H 3 4 /* 5 * Don't allow the cache to grow beyond this size. 6 */ 7 #define IO_ALLOC_CACHE_MAX 512 8 9 struct io_cache_entry { 10 struct io_wq_work_node node; 11 }; 12 13 static inline bool io_alloc_cache_put(struct io_alloc_cache *cache, 14 struct io_cache_entry *entry) 15 { 16 if (cache->nr_cached < IO_ALLOC_CACHE_MAX) { 17 cache->nr_cached++; 18 wq_stack_add_head(&entry->node, &cache->list); 19 /* KASAN poisons object */ 20 kasan_slab_free_mempool(entry); 21 return true; 22 } 23 return false; 24 } 25 26 static inline struct io_cache_entry *io_alloc_cache_get(struct io_alloc_cache *cache) 27 { 28 if (cache->list.next) { 29 struct io_cache_entry *entry; 30 31 entry = container_of(cache->list.next, struct io_cache_entry, node); 32 kasan_unpoison_range(entry, cache->elem_size); 33 cache->list.next = cache->list.next->next; 34 cache->nr_cached--; 35 return entry; 36 } 37 38 return NULL; 39 } 40 41 static inline void io_alloc_cache_init(struct io_alloc_cache *cache, size_t size) 42 { 43 cache->list.next = NULL; 44 cache->nr_cached = 0; 45 cache->elem_size = size; 46 } 47 48 static inline void io_alloc_cache_free(struct io_alloc_cache *cache, 49 void (*free)(struct io_cache_entry *)) 50 { 51 while (1) { 52 struct io_cache_entry *entry = io_alloc_cache_get(cache); 53 54 if (!entry) 55 break; 56 free(entry); 57 } 58 cache->nr_cached = 0; 59 } 60 #endif 61