19b797a37SJens Axboe #ifndef IOU_ALLOC_CACHE_H
29b797a37SJens Axboe #define IOU_ALLOC_CACHE_H
39b797a37SJens Axboe
429927650SPavel Begunkov #include <linux/io_uring_types.h>
529927650SPavel Begunkov
69731bc98SJens Axboe /*
79731bc98SJens Axboe * Don't allow the cache to grow beyond this size.
89731bc98SJens Axboe */
90ae9b9a1SJens Axboe #define IO_ALLOC_CACHE_MAX 128
109731bc98SJens Axboe
11d19af0e9SPavel Begunkov void io_alloc_cache_free(struct io_alloc_cache *cache,
12d19af0e9SPavel Begunkov void (*free)(const void *));
13d19af0e9SPavel Begunkov bool io_alloc_cache_init(struct io_alloc_cache *cache,
14d19af0e9SPavel Begunkov unsigned max_nr, unsigned int size,
15d19af0e9SPavel Begunkov unsigned int init_bytes);
16d19af0e9SPavel Begunkov
17d19af0e9SPavel Begunkov void *io_cache_alloc_new(struct io_alloc_cache *cache, gfp_t gfp);
18d19af0e9SPavel Begunkov
io_alloc_cache_put(struct io_alloc_cache * cache,void * entry)199731bc98SJens Axboe static inline bool io_alloc_cache_put(struct io_alloc_cache *cache,
20414d0f45SJens Axboe void *entry)
219b797a37SJens Axboe {
2269bbc6adSPavel Begunkov if (cache->nr_cached < cache->max_cached) {
23414d0f45SJens Axboe if (!kasan_mempool_poison_object(entry))
24414d0f45SJens Axboe return false;
25414d0f45SJens Axboe cache->entries[cache->nr_cached++] = entry;
269731bc98SJens Axboe return true;
279731bc98SJens Axboe }
289731bc98SJens Axboe return false;
299b797a37SJens Axboe }
309b797a37SJens Axboe
io_alloc_cache_get(struct io_alloc_cache * cache)31414d0f45SJens Axboe static inline void *io_alloc_cache_get(struct io_alloc_cache *cache)
32528407b1SPavel Begunkov {
33414d0f45SJens Axboe if (cache->nr_cached) {
34414d0f45SJens Axboe void *entry = cache->entries[--cache->nr_cached];
35528407b1SPavel Begunkov
36fa359552SJens Axboe /*
37fa359552SJens Axboe * If KASAN is enabled, always clear the initial bytes that
38fa359552SJens Axboe * must be zeroed post alloc, in case any of them overlap
39fa359552SJens Axboe * with KASAN storage.
40fa359552SJens Axboe */
41fa359552SJens Axboe #if defined(CONFIG_KASAN)
428ab3b097SAndrey Konovalov kasan_mempool_unpoison_object(entry, cache->elem_size);
43fa359552SJens Axboe if (cache->init_clear)
44fa359552SJens Axboe memset(entry, 0, cache->init_clear);
45fa359552SJens Axboe #endif
46efba1a9eSBreno Leitao return entry;
479b797a37SJens Axboe }
489b797a37SJens Axboe
499b797a37SJens Axboe return NULL;
509b797a37SJens Axboe }
519b797a37SJens Axboe
io_cache_alloc(struct io_alloc_cache * cache,gfp_t gfp)52fa359552SJens Axboe static inline void *io_cache_alloc(struct io_alloc_cache *cache, gfp_t gfp)
53479b2f45SGabriel Krisman Bertazi {
54fa359552SJens Axboe void *obj;
55479b2f45SGabriel Krisman Bertazi
56fa359552SJens Axboe obj = io_alloc_cache_get(cache);
57fa359552SJens Axboe if (obj)
58479b2f45SGabriel Krisman Bertazi return obj;
59d19af0e9SPavel Begunkov return io_cache_alloc_new(cache, gfp);
60479b2f45SGabriel Krisman Bertazi }
61479b2f45SGabriel Krisman Bertazi
io_cache_free(struct io_alloc_cache * cache,void * obj)62*0d83b8a9SCaleb Sander Mateos static inline void io_cache_free(struct io_alloc_cache *cache, void *obj)
63*0d83b8a9SCaleb Sander Mateos {
64*0d83b8a9SCaleb Sander Mateos if (!io_alloc_cache_put(cache, obj))
65*0d83b8a9SCaleb Sander Mateos kfree(obj);
66*0d83b8a9SCaleb Sander Mateos }
67*0d83b8a9SCaleb Sander Mateos
689b797a37SJens Axboe #endif
69