4 * memory buffer pool support. Such pools are mostly used
5 * for guaranteed, deadlock-free memory allocations during
8 * started by Ingo Molnar, Copyright (C) 2001
12 #include <linux/slab.h>
13 #include <linux/kmemleak.h>
14 #include <linux/export.h>
15 #include <linux/mempool.h>
16 #include <linux/blkdev.h>
17 #include <linux/writeback.h>
20 static void add_element(mempool_t *pool, void *element)
22 BUG_ON(pool->curr_nr >= pool->min_nr);
23 pool->elements[pool->curr_nr++] = element;
26 static void *remove_element(mempool_t *pool)
28 BUG_ON(pool->curr_nr <= 0);
29 return pool->elements[--pool->curr_nr];
33 * mempool_destroy - deallocate a memory pool
34 * @pool: pointer to the memory pool which was allocated via
37 * Free all reserved elements in @pool and @pool itself. This function
38 * only sleeps if the free_fn() function sleeps.
40 void mempool_destroy(mempool_t *pool)
42 while (pool->curr_nr) {
43 void *element = remove_element(pool);
44 pool->free(element, pool->pool_data);
46 kfree(pool->elements);
49 EXPORT_SYMBOL(mempool_destroy);
52 * mempool_create - create a memory pool
53 * @min_nr: the minimum number of elements guaranteed to be
54 * allocated for this pool.
55 * @alloc_fn: user-defined element-allocation function.
56 * @free_fn: user-defined element-freeing function.
57 * @pool_data: optional private data available to the user-defined functions.
59 * this function creates and allocates a guaranteed size, preallocated
60 * memory pool. The pool can be used from the mempool_alloc() and mempool_free()
61 * functions. This function might sleep. Both the alloc_fn() and the free_fn()
62 * functions might sleep - as long as the mempool_alloc() function is not called
65 mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
66 mempool_free_t *free_fn, void *pool_data)
68 return mempool_create_node(min_nr,alloc_fn,free_fn, pool_data,
69 GFP_KERNEL, NUMA_NO_NODE);
71 EXPORT_SYMBOL(mempool_create);
73 mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn,
74 mempool_free_t *free_fn, void *pool_data,
75 gfp_t gfp_mask, int node_id)
78 pool = kzalloc_node(sizeof(*pool), gfp_mask, node_id);
81 pool->elements = kmalloc_node(min_nr * sizeof(void *),
83 if (!pool->elements) {
87 spin_lock_init(&pool->lock);
88 pool->min_nr = min_nr;
89 pool->pool_data = pool_data;
90 init_waitqueue_head(&pool->wait);
91 pool->alloc = alloc_fn;
95 * First pre-allocate the guaranteed number of buffers.
97 while (pool->curr_nr < pool->min_nr) {
100 element = pool->alloc(gfp_mask, pool->pool_data);
101 if (unlikely(!element)) {
102 mempool_destroy(pool);
105 add_element(pool, element);
109 EXPORT_SYMBOL(mempool_create_node);
112 * mempool_resize - resize an existing memory pool
113 * @pool: pointer to the memory pool which was allocated via
115 * @new_min_nr: the new minimum number of elements guaranteed to be
116 * allocated for this pool.
118 * This function shrinks/grows the pool. In the case of growing,
119 * it cannot be guaranteed that the pool will be grown to the new
120 * size immediately, but new mempool_free() calls will refill it.
121 * This function may sleep.
123 * Note, the caller must guarantee that no mempool_destroy is called
124 * while this function is running. mempool_alloc() & mempool_free()
125 * might be called (eg. from IRQ contexts) while this function executes.
127 int mempool_resize(mempool_t *pool, int new_min_nr)
133 BUG_ON(new_min_nr <= 0);
136 spin_lock_irqsave(&pool->lock, flags);
137 if (new_min_nr <= pool->min_nr) {
138 while (new_min_nr < pool->curr_nr) {
139 element = remove_element(pool);
140 spin_unlock_irqrestore(&pool->lock, flags);
141 pool->free(element, pool->pool_data);
142 spin_lock_irqsave(&pool->lock, flags);
144 pool->min_nr = new_min_nr;
147 spin_unlock_irqrestore(&pool->lock, flags);
150 new_elements = kmalloc_array(new_min_nr, sizeof(*new_elements),
155 spin_lock_irqsave(&pool->lock, flags);
156 if (unlikely(new_min_nr <= pool->min_nr)) {
157 /* Raced, other resize will do our work */
158 spin_unlock_irqrestore(&pool->lock, flags);
162 memcpy(new_elements, pool->elements,
163 pool->curr_nr * sizeof(*new_elements));
164 kfree(pool->elements);
165 pool->elements = new_elements;
166 pool->min_nr = new_min_nr;
168 while (pool->curr_nr < pool->min_nr) {
169 spin_unlock_irqrestore(&pool->lock, flags);
170 element = pool->alloc(GFP_KERNEL, pool->pool_data);
173 spin_lock_irqsave(&pool->lock, flags);
174 if (pool->curr_nr < pool->min_nr) {
175 add_element(pool, element);
177 spin_unlock_irqrestore(&pool->lock, flags);
178 pool->free(element, pool->pool_data); /* Raced */
183 spin_unlock_irqrestore(&pool->lock, flags);
187 EXPORT_SYMBOL(mempool_resize);
190 * mempool_alloc - allocate an element from a specific memory pool
191 * @pool: pointer to the memory pool which was allocated via
193 * @gfp_mask: the usual allocation bitmask.
195 * this function only sleeps if the alloc_fn() function sleeps or
196 * returns NULL. Note that due to preallocation, this function
197 * *never* fails when called from process contexts. (it might
198 * fail if called from an IRQ context.)
199 * Note: using __GFP_ZERO is not supported.
201 void * mempool_alloc(mempool_t *pool, gfp_t gfp_mask)
208 VM_WARN_ON_ONCE(gfp_mask & __GFP_ZERO);
209 might_sleep_if(gfp_mask & __GFP_WAIT);
211 gfp_mask |= __GFP_NOMEMALLOC; /* don't allocate emergency reserves */
212 gfp_mask |= __GFP_NORETRY; /* don't loop in __alloc_pages */
213 gfp_mask |= __GFP_NOWARN; /* failures are OK */
215 gfp_temp = gfp_mask & ~(__GFP_WAIT|__GFP_IO);
219 element = pool->alloc(gfp_temp, pool->pool_data);
220 if (likely(element != NULL))
223 spin_lock_irqsave(&pool->lock, flags);
224 if (likely(pool->curr_nr)) {
225 element = remove_element(pool);
226 spin_unlock_irqrestore(&pool->lock, flags);
227 /* paired with rmb in mempool_free(), read comment there */
230 * Update the allocation stack trace as this is more useful
233 kmemleak_update_trace(element);
238 * We use gfp mask w/o __GFP_WAIT or IO for the first round. If
239 * alloc failed with that and @pool was empty, retry immediately.
241 if (gfp_temp != gfp_mask) {
242 spin_unlock_irqrestore(&pool->lock, flags);
247 /* We must not sleep if !__GFP_WAIT */
248 if (!(gfp_mask & __GFP_WAIT)) {
249 spin_unlock_irqrestore(&pool->lock, flags);
253 /* Let's wait for someone else to return an element to @pool */
255 prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE);
257 spin_unlock_irqrestore(&pool->lock, flags);
260 * FIXME: this should be io_schedule(). The timeout is there as a
261 * workaround for some DM problems in 2.6.18.
263 io_schedule_timeout(5*HZ);
265 finish_wait(&pool->wait, &wait);
268 EXPORT_SYMBOL(mempool_alloc);
271 * mempool_free - return an element to the pool.
272 * @element: pool element pointer.
273 * @pool: pointer to the memory pool which was allocated via
276 * this function only sleeps if the free_fn() function sleeps.
278 void mempool_free(void *element, mempool_t *pool)
282 if (unlikely(element == NULL))
286 * Paired with the wmb in mempool_alloc(). The preceding read is
287 * for @element and the following @pool->curr_nr. This ensures
288 * that the visible value of @pool->curr_nr is from after the
289 * allocation of @element. This is necessary for fringe cases
290 * where @element was passed to this task without going through
293 * For example, assume @p is %NULL at the beginning and one task
294 * performs "p = mempool_alloc(...);" while another task is doing
295 * "while (!p) cpu_relax(); mempool_free(p, ...);". This function
296 * may end up using curr_nr value which is from before allocation
297 * of @p without the following rmb.
302 * For correctness, we need a test which is guaranteed to trigger
303 * if curr_nr + #allocated == min_nr. Testing curr_nr < min_nr
304 * without locking achieves that and refilling as soon as possible
307 * Because curr_nr visible here is always a value after the
308 * allocation of @element, any task which decremented curr_nr below
309 * min_nr is guaranteed to see curr_nr < min_nr unless curr_nr gets
310 * incremented to min_nr afterwards. If curr_nr gets incremented
311 * to min_nr after the allocation of @element, the elements
312 * allocated after that are subject to the same guarantee.
314 * Waiters happen iff curr_nr is 0 and the above guarantee also
315 * ensures that there will be frees which return elements to the
316 * pool waking up the waiters.
318 if (unlikely(pool->curr_nr < pool->min_nr)) {
319 spin_lock_irqsave(&pool->lock, flags);
320 if (likely(pool->curr_nr < pool->min_nr)) {
321 add_element(pool, element);
322 spin_unlock_irqrestore(&pool->lock, flags);
323 wake_up(&pool->wait);
326 spin_unlock_irqrestore(&pool->lock, flags);
328 pool->free(element, pool->pool_data);
330 EXPORT_SYMBOL(mempool_free);
333 * A commonly used alloc and free fn.
335 void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data)
337 struct kmem_cache *mem = pool_data;
338 VM_BUG_ON(mem->ctor);
339 return kmem_cache_alloc(mem, gfp_mask);
341 EXPORT_SYMBOL(mempool_alloc_slab);
343 void mempool_free_slab(void *element, void *pool_data)
345 struct kmem_cache *mem = pool_data;
346 kmem_cache_free(mem, element);
348 EXPORT_SYMBOL(mempool_free_slab);
351 * A commonly used alloc and free fn that kmalloc/kfrees the amount of memory
352 * specified by pool_data
354 void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data)
356 size_t size = (size_t)pool_data;
357 return kmalloc(size, gfp_mask);
359 EXPORT_SYMBOL(mempool_kmalloc);
361 void mempool_kfree(void *element, void *pool_data)
365 EXPORT_SYMBOL(mempool_kfree);
368 * A simple mempool-backed page allocator that allocates pages
369 * of the order specified by pool_data.
371 void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data)
373 int order = (int)(long)pool_data;
374 return alloc_pages(gfp_mask, order);
376 EXPORT_SYMBOL(mempool_alloc_pages);
378 void mempool_free_pages(void *element, void *pool_data)
380 int order = (int)(long)pool_data;
381 __free_pages(element, order);
383 EXPORT_SYMBOL(mempool_free_pages);