4 * memory buffer pool support. Such pools are mostly used
5 * for guaranteed, deadlock-free memory allocations during
8 * started by Ingo Molnar, Copyright (C) 2001
9 * debugging by David Rientjes, Copyright (C) 2015
13 #include <linux/slab.h>
14 #include <linux/highmem.h>
15 #include <linux/kmemleak.h>
16 #include <linux/export.h>
17 #include <linux/mempool.h>
18 #include <linux/blkdev.h>
19 #include <linux/writeback.h>
22 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON)
23 static void poison_error(mempool_t *pool, void *element, size_t size,
26 const int nr = pool->curr_nr;
27 const int start = max_t(int, byte - (BITS_PER_LONG / 8), 0);
28 const int end = min_t(int, byte + (BITS_PER_LONG / 8), size);
31 pr_err("BUG: mempool element poison mismatch\n");
32 pr_err("Mempool %p size %zu\n", pool, size);
33 pr_err(" nr=%d @ %p: %s0x", nr, element, start > 0 ? "... " : "");
34 for (i = start; i < end; i++)
35 pr_cont("%x ", *(u8 *)(element + i));
36 pr_cont("%s\n", end < size ? "..." : "");
40 static void __check_element(mempool_t *pool, void *element, size_t size)
45 for (i = 0; i < size; i++) {
46 u8 exp = (i < size - 1) ? POISON_FREE : POISON_END;
49 poison_error(pool, element, size, i);
53 memset(obj, POISON_INUSE, size);
56 static void check_element(mempool_t *pool, void *element)
58 /* Mempools backed by slab allocator */
59 if (pool->free == mempool_free_slab || pool->free == mempool_kfree)
60 __check_element(pool, element, ksize(element));
62 /* Mempools backed by page allocator */
63 if (pool->free == mempool_free_pages) {
64 int order = (int)(long)pool->pool_data;
65 void *addr = kmap_atomic((struct page *)element);
67 __check_element(pool, addr, 1UL << (PAGE_SHIFT + order));
72 static void __poison_element(void *element, size_t size)
76 memset(obj, POISON_FREE, size - 1);
77 obj[size - 1] = POISON_END;
80 static void poison_element(mempool_t *pool, void *element)
82 /* Mempools backed by slab allocator */
83 if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
84 __poison_element(element, ksize(element));
86 /* Mempools backed by page allocator */
87 if (pool->alloc == mempool_alloc_pages) {
88 int order = (int)(long)pool->pool_data;
89 void *addr = kmap_atomic((struct page *)element);
91 __poison_element(addr, 1UL << (PAGE_SHIFT + order));
95 #else /* CONFIG_DEBUG_SLAB || CONFIG_SLUB_DEBUG_ON */
96 static inline void check_element(mempool_t *pool, void *element)
99 static inline void poison_element(mempool_t *pool, void *element)
102 #endif /* CONFIG_DEBUG_SLAB || CONFIG_SLUB_DEBUG_ON */
104 static void add_element(mempool_t *pool, void *element)
106 BUG_ON(pool->curr_nr >= pool->min_nr);
107 poison_element(pool, element);
108 pool->elements[pool->curr_nr++] = element;
111 static void *remove_element(mempool_t *pool)
113 void *element = pool->elements[--pool->curr_nr];
115 BUG_ON(pool->curr_nr < 0);
116 check_element(pool, element);
121 * mempool_destroy - deallocate a memory pool
122 * @pool: pointer to the memory pool which was allocated via
125 * Free all reserved elements in @pool and @pool itself. This function
126 * only sleeps if the free_fn() function sleeps.
128 void mempool_destroy(mempool_t *pool)
130 while (pool->curr_nr) {
131 void *element = remove_element(pool);
132 pool->free(element, pool->pool_data);
134 kfree(pool->elements);
137 EXPORT_SYMBOL(mempool_destroy);
140 * mempool_create - create a memory pool
141 * @min_nr: the minimum number of elements guaranteed to be
142 * allocated for this pool.
143 * @alloc_fn: user-defined element-allocation function.
144 * @free_fn: user-defined element-freeing function.
145 * @pool_data: optional private data available to the user-defined functions.
147 * this function creates and allocates a guaranteed size, preallocated
148 * memory pool. The pool can be used from the mempool_alloc() and mempool_free()
149 * functions. This function might sleep. Both the alloc_fn() and the free_fn()
150 * functions might sleep - as long as the mempool_alloc() function is not called
153 mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
154 mempool_free_t *free_fn, void *pool_data)
156 return mempool_create_node(min_nr,alloc_fn,free_fn, pool_data,
157 GFP_KERNEL, NUMA_NO_NODE);
159 EXPORT_SYMBOL(mempool_create);
161 mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn,
162 mempool_free_t *free_fn, void *pool_data,
163 gfp_t gfp_mask, int node_id)
166 pool = kzalloc_node(sizeof(*pool), gfp_mask, node_id);
169 pool->elements = kmalloc_node(min_nr * sizeof(void *),
171 if (!pool->elements) {
175 spin_lock_init(&pool->lock);
176 pool->min_nr = min_nr;
177 pool->pool_data = pool_data;
178 init_waitqueue_head(&pool->wait);
179 pool->alloc = alloc_fn;
180 pool->free = free_fn;
183 * First pre-allocate the guaranteed number of buffers.
185 while (pool->curr_nr < pool->min_nr) {
188 element = pool->alloc(gfp_mask, pool->pool_data);
189 if (unlikely(!element)) {
190 mempool_destroy(pool);
193 add_element(pool, element);
197 EXPORT_SYMBOL(mempool_create_node);
200 * mempool_resize - resize an existing memory pool
201 * @pool: pointer to the memory pool which was allocated via
203 * @new_min_nr: the new minimum number of elements guaranteed to be
204 * allocated for this pool.
206 * This function shrinks/grows the pool. In the case of growing,
207 * it cannot be guaranteed that the pool will be grown to the new
208 * size immediately, but new mempool_free() calls will refill it.
209 * This function may sleep.
211 * Note, the caller must guarantee that no mempool_destroy is called
212 * while this function is running. mempool_alloc() & mempool_free()
213 * might be called (eg. from IRQ contexts) while this function executes.
215 int mempool_resize(mempool_t *pool, int new_min_nr)
221 BUG_ON(new_min_nr <= 0);
224 spin_lock_irqsave(&pool->lock, flags);
225 if (new_min_nr <= pool->min_nr) {
226 while (new_min_nr < pool->curr_nr) {
227 element = remove_element(pool);
228 spin_unlock_irqrestore(&pool->lock, flags);
229 pool->free(element, pool->pool_data);
230 spin_lock_irqsave(&pool->lock, flags);
232 pool->min_nr = new_min_nr;
235 spin_unlock_irqrestore(&pool->lock, flags);
238 new_elements = kmalloc_array(new_min_nr, sizeof(*new_elements),
243 spin_lock_irqsave(&pool->lock, flags);
244 if (unlikely(new_min_nr <= pool->min_nr)) {
245 /* Raced, other resize will do our work */
246 spin_unlock_irqrestore(&pool->lock, flags);
250 memcpy(new_elements, pool->elements,
251 pool->curr_nr * sizeof(*new_elements));
252 kfree(pool->elements);
253 pool->elements = new_elements;
254 pool->min_nr = new_min_nr;
256 while (pool->curr_nr < pool->min_nr) {
257 spin_unlock_irqrestore(&pool->lock, flags);
258 element = pool->alloc(GFP_KERNEL, pool->pool_data);
261 spin_lock_irqsave(&pool->lock, flags);
262 if (pool->curr_nr < pool->min_nr) {
263 add_element(pool, element);
265 spin_unlock_irqrestore(&pool->lock, flags);
266 pool->free(element, pool->pool_data); /* Raced */
271 spin_unlock_irqrestore(&pool->lock, flags);
275 EXPORT_SYMBOL(mempool_resize);
278 * mempool_alloc - allocate an element from a specific memory pool
279 * @pool: pointer to the memory pool which was allocated via
281 * @gfp_mask: the usual allocation bitmask.
283 * this function only sleeps if the alloc_fn() function sleeps or
284 * returns NULL. Note that due to preallocation, this function
285 * *never* fails when called from process contexts. (it might
286 * fail if called from an IRQ context.)
287 * Note: using __GFP_ZERO is not supported.
289 void * mempool_alloc(mempool_t *pool, gfp_t gfp_mask)
296 VM_WARN_ON_ONCE(gfp_mask & __GFP_ZERO);
297 might_sleep_if(gfp_mask & __GFP_WAIT);
299 gfp_mask |= __GFP_NOMEMALLOC; /* don't allocate emergency reserves */
300 gfp_mask |= __GFP_NORETRY; /* don't loop in __alloc_pages */
301 gfp_mask |= __GFP_NOWARN; /* failures are OK */
303 gfp_temp = gfp_mask & ~(__GFP_WAIT|__GFP_IO);
307 element = pool->alloc(gfp_temp, pool->pool_data);
308 if (likely(element != NULL))
311 spin_lock_irqsave(&pool->lock, flags);
312 if (likely(pool->curr_nr)) {
313 element = remove_element(pool);
314 spin_unlock_irqrestore(&pool->lock, flags);
315 /* paired with rmb in mempool_free(), read comment there */
318 * Update the allocation stack trace as this is more useful
321 kmemleak_update_trace(element);
326 * We use gfp mask w/o __GFP_WAIT or IO for the first round. If
327 * alloc failed with that and @pool was empty, retry immediately.
329 if (gfp_temp != gfp_mask) {
330 spin_unlock_irqrestore(&pool->lock, flags);
335 /* We must not sleep if !__GFP_WAIT */
336 if (!(gfp_mask & __GFP_WAIT)) {
337 spin_unlock_irqrestore(&pool->lock, flags);
341 /* Let's wait for someone else to return an element to @pool */
343 prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE);
345 spin_unlock_irqrestore(&pool->lock, flags);
348 * FIXME: this should be io_schedule(). The timeout is there as a
349 * workaround for some DM problems in 2.6.18.
351 io_schedule_timeout(5*HZ);
353 finish_wait(&pool->wait, &wait);
356 EXPORT_SYMBOL(mempool_alloc);
359 * mempool_free - return an element to the pool.
360 * @element: pool element pointer.
361 * @pool: pointer to the memory pool which was allocated via
364 * this function only sleeps if the free_fn() function sleeps.
366 void mempool_free(void *element, mempool_t *pool)
370 if (unlikely(element == NULL))
374 * Paired with the wmb in mempool_alloc(). The preceding read is
375 * for @element and the following @pool->curr_nr. This ensures
376 * that the visible value of @pool->curr_nr is from after the
377 * allocation of @element. This is necessary for fringe cases
378 * where @element was passed to this task without going through
381 * For example, assume @p is %NULL at the beginning and one task
382 * performs "p = mempool_alloc(...);" while another task is doing
383 * "while (!p) cpu_relax(); mempool_free(p, ...);". This function
384 * may end up using curr_nr value which is from before allocation
385 * of @p without the following rmb.
390 * For correctness, we need a test which is guaranteed to trigger
391 * if curr_nr + #allocated == min_nr. Testing curr_nr < min_nr
392 * without locking achieves that and refilling as soon as possible
395 * Because curr_nr visible here is always a value after the
396 * allocation of @element, any task which decremented curr_nr below
397 * min_nr is guaranteed to see curr_nr < min_nr unless curr_nr gets
398 * incremented to min_nr afterwards. If curr_nr gets incremented
399 * to min_nr after the allocation of @element, the elements
400 * allocated after that are subject to the same guarantee.
402 * Waiters happen iff curr_nr is 0 and the above guarantee also
403 * ensures that there will be frees which return elements to the
404 * pool waking up the waiters.
406 if (unlikely(pool->curr_nr < pool->min_nr)) {
407 spin_lock_irqsave(&pool->lock, flags);
408 if (likely(pool->curr_nr < pool->min_nr)) {
409 add_element(pool, element);
410 spin_unlock_irqrestore(&pool->lock, flags);
411 wake_up(&pool->wait);
414 spin_unlock_irqrestore(&pool->lock, flags);
416 pool->free(element, pool->pool_data);
418 EXPORT_SYMBOL(mempool_free);
421 * A commonly used alloc and free fn.
423 void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data)
425 struct kmem_cache *mem = pool_data;
426 VM_BUG_ON(mem->ctor);
427 return kmem_cache_alloc(mem, gfp_mask);
429 EXPORT_SYMBOL(mempool_alloc_slab);
431 void mempool_free_slab(void *element, void *pool_data)
433 struct kmem_cache *mem = pool_data;
434 kmem_cache_free(mem, element);
436 EXPORT_SYMBOL(mempool_free_slab);
439 * A commonly used alloc and free fn that kmalloc/kfrees the amount of memory
440 * specified by pool_data
442 void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data)
444 size_t size = (size_t)pool_data;
445 return kmalloc(size, gfp_mask);
447 EXPORT_SYMBOL(mempool_kmalloc);
449 void mempool_kfree(void *element, void *pool_data)
453 EXPORT_SYMBOL(mempool_kfree);
456 * A simple mempool-backed page allocator that allocates pages
457 * of the order specified by pool_data.
459 void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data)
461 int order = (int)(long)pool_data;
462 return alloc_pages(gfp_mask, order);
464 EXPORT_SYMBOL(mempool_alloc_pages);
466 void mempool_free_pages(void *element, void *pool_data)
468 int order = (int)(long)pool_data;
469 __free_pages(element, order);
471 EXPORT_SYMBOL(mempool_free_pages);