| 1 | #ifndef IOU_ALLOC_CACHE_H |
| 2 | #define IOU_ALLOC_CACHE_H |
| 3 | |
| 4 | /* |
| 5 | * Don't allow the cache to grow beyond this size. |
| 6 | */ |
| 7 | #define IO_ALLOC_CACHE_MAX 512 |
| 8 | |
| 9 | struct io_cache_entry { |
| 10 | struct io_wq_work_node node; |
| 11 | }; |
| 12 | |
| 13 | static inline bool io_alloc_cache_put(struct io_alloc_cache *cache, |
| 14 | struct io_cache_entry *entry) |
| 15 | { |
| 16 | if (cache->nr_cached < cache->max_cached) { |
| 17 | cache->nr_cached++; |
| 18 | wq_stack_add_head(&entry->node, &cache->list); |
| 19 | /* KASAN poisons object */ |
| 20 | kasan_slab_free_mempool(entry); |
| 21 | return true; |
| 22 | } |
| 23 | return false; |
| 24 | } |
| 25 | |
| 26 | static inline bool io_alloc_cache_empty(struct io_alloc_cache *cache) |
| 27 | { |
| 28 | return !cache->list.next; |
| 29 | } |
| 30 | |
| 31 | static inline struct io_cache_entry *io_alloc_cache_get(struct io_alloc_cache *cache) |
| 32 | { |
| 33 | if (cache->list.next) { |
| 34 | struct io_cache_entry *entry; |
| 35 | |
| 36 | entry = container_of(cache->list.next, struct io_cache_entry, node); |
| 37 | kasan_unpoison_range(entry, cache->elem_size); |
| 38 | cache->list.next = cache->list.next->next; |
| 39 | cache->nr_cached--; |
| 40 | return entry; |
| 41 | } |
| 42 | |
| 43 | return NULL; |
| 44 | } |
| 45 | |
| 46 | static inline void io_alloc_cache_init(struct io_alloc_cache *cache, |
| 47 | unsigned max_nr, size_t size) |
| 48 | { |
| 49 | cache->list.next = NULL; |
| 50 | cache->nr_cached = 0; |
| 51 | cache->max_cached = max_nr; |
| 52 | cache->elem_size = size; |
| 53 | } |
| 54 | |
| 55 | static inline void io_alloc_cache_free(struct io_alloc_cache *cache, |
| 56 | void (*free)(struct io_cache_entry *)) |
| 57 | { |
| 58 | while (1) { |
| 59 | struct io_cache_entry *entry = io_alloc_cache_get(cache); |
| 60 | |
| 61 | if (!entry) |
| 62 | break; |
| 63 | free(entry); |
| 64 | } |
| 65 | cache->nr_cached = 0; |
| 66 | } |
| 67 | #endif |