4 * memory buffer pool support. Such pools are mostly used
5 * for guaranteed, deadlock-free memory allocations during
8 * started by Ingo Molnar, Copyright (C) 2001
9 * debugging by David Rientjes, Copyright (C) 2015
13 #include <linux/slab.h>
14 #include <linux/highmem.h>
15 #include <linux/kasan.h>
16 #include <linux/kmemleak.h>
17 #include <linux/export.h>
18 #include <linux/mempool.h>
19 #include <linux/blkdev.h>
20 #include <linux/writeback.h>
23 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON)
24 static void poison_error(mempool_t *pool, void *element, size_t size,
27 const int nr = pool->curr_nr;
28 const int start = max_t(int, byte - (BITS_PER_LONG / 8), 0);
29 const int end = min_t(int, byte + (BITS_PER_LONG / 8), size);
32 pr_err("BUG: mempool element poison mismatch\n");
33 pr_err("Mempool %p size %zu\n", pool, size);
34 pr_err(" nr=%d @ %p: %s0x", nr, element, start > 0 ? "... " : "");
35 for (i = start; i < end; i++)
36 pr_cont("%x ", *(u8 *)(element + i));
37 pr_cont("%s\n", end < size ? "..." : "");
41 static void __check_element(mempool_t *pool, void *element, size_t size)
46 for (i = 0; i < size; i++) {
47 u8 exp = (i < size - 1) ? POISON_FREE : POISON_END;
50 poison_error(pool, element, size, i);
54 memset(obj, POISON_INUSE, size);
57 static void check_element(mempool_t *pool, void *element)
59 /* Mempools backed by slab allocator */
60 if (pool->free == mempool_free_slab || pool->free == mempool_kfree)
61 __check_element(pool, element, ksize(element));
63 /* Mempools backed by page allocator */
64 if (pool->free == mempool_free_pages) {
65 int order = (int)(long)pool->pool_data;
66 void *addr = kmap_atomic((struct page *)element);
68 __check_element(pool, addr, 1UL << (PAGE_SHIFT + order));
73 static void __poison_element(void *element, size_t size)
77 memset(obj, POISON_FREE, size - 1);
78 obj[size - 1] = POISON_END;
81 static void poison_element(mempool_t *pool, void *element)
83 /* Mempools backed by slab allocator */
84 if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
85 __poison_element(element, ksize(element));
87 /* Mempools backed by page allocator */
88 if (pool->alloc == mempool_alloc_pages) {
89 int order = (int)(long)pool->pool_data;
90 void *addr = kmap_atomic((struct page *)element);
92 __poison_element(addr, 1UL << (PAGE_SHIFT + order));
96 #else /* CONFIG_DEBUG_SLAB || CONFIG_SLUB_DEBUG_ON */
97 static inline void check_element(mempool_t *pool, void *element)
100 static inline void poison_element(mempool_t *pool, void *element)
103 #endif /* CONFIG_DEBUG_SLAB || CONFIG_SLUB_DEBUG_ON */
105 static void kasan_poison_element(mempool_t *pool, void *element)
107 if (pool->alloc == mempool_alloc_slab)
108 kasan_slab_free(pool->pool_data, element);
109 if (pool->alloc == mempool_kmalloc)
110 kasan_kfree(element);
111 if (pool->alloc == mempool_alloc_pages)
112 kasan_free_pages(element, (unsigned long)pool->pool_data);
115 static void kasan_unpoison_element(mempool_t *pool, void *element)
117 if (pool->alloc == mempool_alloc_slab)
118 kasan_slab_alloc(pool->pool_data, element);
119 if (pool->alloc == mempool_kmalloc)
120 kasan_krealloc(element, (size_t)pool->pool_data);
121 if (pool->alloc == mempool_alloc_pages)
122 kasan_alloc_pages(element, (unsigned long)pool->pool_data);
125 static void add_element(mempool_t *pool, void *element)
127 BUG_ON(pool->curr_nr >= pool->min_nr);
128 poison_element(pool, element);
129 kasan_poison_element(pool, element);
130 pool->elements[pool->curr_nr++] = element;
133 static void *remove_element(mempool_t *pool)
135 void *element = pool->elements[--pool->curr_nr];
137 BUG_ON(pool->curr_nr < 0);
138 check_element(pool, element);
139 kasan_unpoison_element(pool, element);
144 * mempool_destroy - deallocate a memory pool
145 * @pool: pointer to the memory pool which was allocated via
148 * Free all reserved elements in @pool and @pool itself. This function
149 * only sleeps if the free_fn() function sleeps.
151 void mempool_destroy(mempool_t *pool)
153 while (pool->curr_nr) {
154 void *element = remove_element(pool);
155 pool->free(element, pool->pool_data);
157 kfree(pool->elements);
160 EXPORT_SYMBOL(mempool_destroy);
163 * mempool_create - create a memory pool
164 * @min_nr: the minimum number of elements guaranteed to be
165 * allocated for this pool.
166 * @alloc_fn: user-defined element-allocation function.
167 * @free_fn: user-defined element-freeing function.
168 * @pool_data: optional private data available to the user-defined functions.
170 * this function creates and allocates a guaranteed size, preallocated
171 * memory pool. The pool can be used from the mempool_alloc() and mempool_free()
172 * functions. This function might sleep. Both the alloc_fn() and the free_fn()
173 * functions might sleep - as long as the mempool_alloc() function is not called
176 mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
177 mempool_free_t *free_fn, void *pool_data)
179 return mempool_create_node(min_nr,alloc_fn,free_fn, pool_data,
180 GFP_KERNEL, NUMA_NO_NODE);
182 EXPORT_SYMBOL(mempool_create);
184 mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn,
185 mempool_free_t *free_fn, void *pool_data,
186 gfp_t gfp_mask, int node_id)
189 pool = kzalloc_node(sizeof(*pool), gfp_mask, node_id);
192 pool->elements = kmalloc_node(min_nr * sizeof(void *),
194 if (!pool->elements) {
198 spin_lock_init(&pool->lock);
199 pool->min_nr = min_nr;
200 pool->pool_data = pool_data;
201 init_waitqueue_head(&pool->wait);
202 pool->alloc = alloc_fn;
203 pool->free = free_fn;
206 * First pre-allocate the guaranteed number of buffers.
208 while (pool->curr_nr < pool->min_nr) {
211 element = pool->alloc(gfp_mask, pool->pool_data);
212 if (unlikely(!element)) {
213 mempool_destroy(pool);
216 add_element(pool, element);
220 EXPORT_SYMBOL(mempool_create_node);
223 * mempool_resize - resize an existing memory pool
224 * @pool: pointer to the memory pool which was allocated via
226 * @new_min_nr: the new minimum number of elements guaranteed to be
227 * allocated for this pool.
229 * This function shrinks/grows the pool. In the case of growing,
230 * it cannot be guaranteed that the pool will be grown to the new
231 * size immediately, but new mempool_free() calls will refill it.
232 * This function may sleep.
234 * Note, the caller must guarantee that no mempool_destroy is called
235 * while this function is running. mempool_alloc() & mempool_free()
236 * might be called (eg. from IRQ contexts) while this function executes.
238 int mempool_resize(mempool_t *pool, int new_min_nr)
244 BUG_ON(new_min_nr <= 0);
247 spin_lock_irqsave(&pool->lock, flags);
248 if (new_min_nr <= pool->min_nr) {
249 while (new_min_nr < pool->curr_nr) {
250 element = remove_element(pool);
251 spin_unlock_irqrestore(&pool->lock, flags);
252 pool->free(element, pool->pool_data);
253 spin_lock_irqsave(&pool->lock, flags);
255 pool->min_nr = new_min_nr;
258 spin_unlock_irqrestore(&pool->lock, flags);
261 new_elements = kmalloc_array(new_min_nr, sizeof(*new_elements),
266 spin_lock_irqsave(&pool->lock, flags);
267 if (unlikely(new_min_nr <= pool->min_nr)) {
268 /* Raced, other resize will do our work */
269 spin_unlock_irqrestore(&pool->lock, flags);
273 memcpy(new_elements, pool->elements,
274 pool->curr_nr * sizeof(*new_elements));
275 kfree(pool->elements);
276 pool->elements = new_elements;
277 pool->min_nr = new_min_nr;
279 while (pool->curr_nr < pool->min_nr) {
280 spin_unlock_irqrestore(&pool->lock, flags);
281 element = pool->alloc(GFP_KERNEL, pool->pool_data);
284 spin_lock_irqsave(&pool->lock, flags);
285 if (pool->curr_nr < pool->min_nr) {
286 add_element(pool, element);
288 spin_unlock_irqrestore(&pool->lock, flags);
289 pool->free(element, pool->pool_data); /* Raced */
294 spin_unlock_irqrestore(&pool->lock, flags);
298 EXPORT_SYMBOL(mempool_resize);
301 * mempool_alloc - allocate an element from a specific memory pool
302 * @pool: pointer to the memory pool which was allocated via
304 * @gfp_mask: the usual allocation bitmask.
306 * this function only sleeps if the alloc_fn() function sleeps or
307 * returns NULL. Note that due to preallocation, this function
308 * *never* fails when called from process contexts. (it might
309 * fail if called from an IRQ context.)
310 * Note: using __GFP_ZERO is not supported.
312 void * mempool_alloc(mempool_t *pool, gfp_t gfp_mask)
319 VM_WARN_ON_ONCE(gfp_mask & __GFP_ZERO);
320 might_sleep_if(gfp_mask & __GFP_WAIT);
322 gfp_mask |= __GFP_NOMEMALLOC; /* don't allocate emergency reserves */
323 gfp_mask |= __GFP_NORETRY; /* don't loop in __alloc_pages */
324 gfp_mask |= __GFP_NOWARN; /* failures are OK */
326 gfp_temp = gfp_mask & ~(__GFP_WAIT|__GFP_IO);
330 element = pool->alloc(gfp_temp, pool->pool_data);
331 if (likely(element != NULL))
334 spin_lock_irqsave(&pool->lock, flags);
335 if (likely(pool->curr_nr)) {
336 element = remove_element(pool);
337 spin_unlock_irqrestore(&pool->lock, flags);
338 /* paired with rmb in mempool_free(), read comment there */
341 * Update the allocation stack trace as this is more useful
344 kmemleak_update_trace(element);
349 * We use gfp mask w/o __GFP_WAIT or IO for the first round. If
350 * alloc failed with that and @pool was empty, retry immediately.
352 if (gfp_temp != gfp_mask) {
353 spin_unlock_irqrestore(&pool->lock, flags);
358 /* We must not sleep if !__GFP_WAIT */
359 if (!(gfp_mask & __GFP_WAIT)) {
360 spin_unlock_irqrestore(&pool->lock, flags);
364 /* Let's wait for someone else to return an element to @pool */
366 prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE);
368 spin_unlock_irqrestore(&pool->lock, flags);
371 * FIXME: this should be io_schedule(). The timeout is there as a
372 * workaround for some DM problems in 2.6.18.
374 io_schedule_timeout(5*HZ);
376 finish_wait(&pool->wait, &wait);
379 EXPORT_SYMBOL(mempool_alloc);
382 * mempool_free - return an element to the pool.
383 * @element: pool element pointer.
384 * @pool: pointer to the memory pool which was allocated via
387 * this function only sleeps if the free_fn() function sleeps.
389 void mempool_free(void *element, mempool_t *pool)
393 if (unlikely(element == NULL))
397 * Paired with the wmb in mempool_alloc(). The preceding read is
398 * for @element and the following @pool->curr_nr. This ensures
399 * that the visible value of @pool->curr_nr is from after the
400 * allocation of @element. This is necessary for fringe cases
401 * where @element was passed to this task without going through
404 * For example, assume @p is %NULL at the beginning and one task
405 * performs "p = mempool_alloc(...);" while another task is doing
406 * "while (!p) cpu_relax(); mempool_free(p, ...);". This function
407 * may end up using curr_nr value which is from before allocation
408 * of @p without the following rmb.
413 * For correctness, we need a test which is guaranteed to trigger
414 * if curr_nr + #allocated == min_nr. Testing curr_nr < min_nr
415 * without locking achieves that and refilling as soon as possible
418 * Because curr_nr visible here is always a value after the
419 * allocation of @element, any task which decremented curr_nr below
420 * min_nr is guaranteed to see curr_nr < min_nr unless curr_nr gets
421 * incremented to min_nr afterwards. If curr_nr gets incremented
422 * to min_nr after the allocation of @element, the elements
423 * allocated after that are subject to the same guarantee.
425 * Waiters happen iff curr_nr is 0 and the above guarantee also
426 * ensures that there will be frees which return elements to the
427 * pool waking up the waiters.
429 if (unlikely(pool->curr_nr < pool->min_nr)) {
430 spin_lock_irqsave(&pool->lock, flags);
431 if (likely(pool->curr_nr < pool->min_nr)) {
432 add_element(pool, element);
433 spin_unlock_irqrestore(&pool->lock, flags);
434 wake_up(&pool->wait);
437 spin_unlock_irqrestore(&pool->lock, flags);
439 pool->free(element, pool->pool_data);
441 EXPORT_SYMBOL(mempool_free);
444 * A commonly used alloc and free fn.
446 void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data)
448 struct kmem_cache *mem = pool_data;
449 VM_BUG_ON(mem->ctor);
450 return kmem_cache_alloc(mem, gfp_mask);
452 EXPORT_SYMBOL(mempool_alloc_slab);
454 void mempool_free_slab(void *element, void *pool_data)
456 struct kmem_cache *mem = pool_data;
457 kmem_cache_free(mem, element);
459 EXPORT_SYMBOL(mempool_free_slab);
462 * A commonly used alloc and free fn that kmalloc/kfrees the amount of memory
463 * specified by pool_data
465 void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data)
467 size_t size = (size_t)pool_data;
468 return kmalloc(size, gfp_mask);
470 EXPORT_SYMBOL(mempool_kmalloc);
472 void mempool_kfree(void *element, void *pool_data)
476 EXPORT_SYMBOL(mempool_kfree);
479 * A simple mempool-backed page allocator that allocates pages
480 * of the order specified by pool_data.
482 void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data)
484 int order = (int)(long)pool_data;
485 return alloc_pages(gfp_mask, order);
487 EXPORT_SYMBOL(mempool_alloc_pages);
489 void mempool_free_pages(void *element, void *pool_data)
491 int order = (int)(long)pool_data;
492 __free_pages(element, order);
494 EXPORT_SYMBOL(mempool_free_pages);