2 #include <linux/device.h>
4 #include <asm/io.h> /* Needed for i386 to build */
5 #include <linux/dma-mapping.h>
6 #include <linux/dmapool.h>
7 #include <linux/slab.h>
8 #include <linux/module.h>
9 #include <linux/poison.h>
10 #include <linux/sched.h>
13 * Pool allocator ... wraps the dma_alloc_coherent page allocator, so
14 * small blocks are easily used by drivers for bus mastering controllers.
15 * This should probably be sharing the guts of the slab allocator.
18 struct dma_pool { /* the pool */
19 struct list_head page_list;
21 size_t blocks_per_page;
26 wait_queue_head_t waitq;
27 struct list_head pools;
30 struct dma_page { /* cacheable header for 'allocation' bytes */
31 struct list_head page_list;
35 unsigned long bitmap[0];
38 #define POOL_TIMEOUT_JIFFIES ((100 /* msec */ * HZ) / 1000)
40 static DEFINE_MUTEX(pools_lock);
43 show_pools(struct device *dev, struct device_attribute *attr, char *buf)
48 struct dma_page *page;
49 struct dma_pool *pool;
54 temp = scnprintf(next, size, "poolinfo - 0.1\n");
58 mutex_lock(&pools_lock);
59 list_for_each_entry(pool, &dev->dma_pools, pools) {
63 list_for_each_entry(page, &pool->page_list, page_list) {
65 blocks += page->in_use;
68 /* per-pool info, no real statistics yet */
69 temp = scnprintf(next, size, "%-16s %4u %4Zu %4Zu %2u\n",
71 blocks, pages * pool->blocks_per_page,
76 mutex_unlock(&pools_lock);
78 return PAGE_SIZE - size;
81 static DEVICE_ATTR(pools, S_IRUGO, show_pools, NULL);
84 * dma_pool_create - Creates a pool of consistent memory blocks, for dma.
85 * @name: name of pool, for diagnostics
86 * @dev: device that will be doing the DMA
87 * @size: size of the blocks in this pool.
88 * @align: alignment requirement for blocks; must be a power of two
89 * @allocation: returned blocks won't cross this boundary (or zero)
90 * Context: !in_interrupt()
92 * Returns a dma allocation pool with the requested characteristics, or
93 * null if one can't be created. Given one of these pools, dma_pool_alloc()
94 * may be used to allocate memory. Such memory will all have "consistent"
95 * DMA mappings, accessible by the device and its driver without using
96 * cache flushing primitives. The actual size of blocks allocated may be
97 * larger than requested because of alignment.
99 * If allocation is nonzero, objects returned from dma_pool_alloc() won't
100 * cross that size boundary. This is useful for devices which have
101 * addressing restrictions on individual DMA transfers, such as not crossing
102 * boundaries of 4KBytes.
104 struct dma_pool *dma_pool_create(const char *name, struct device *dev,
105 size_t size, size_t align, size_t allocation)
107 struct dma_pool *retval;
111 } else if (align & (align - 1)) {
118 if ((size % align) != 0)
119 size = ALIGN(size, align);
121 if (allocation == 0) {
122 if (PAGE_SIZE < size)
125 allocation = PAGE_SIZE;
126 /* FIXME: round up for less fragmentation */
127 } else if (allocation < size)
132 kmalloc_node(sizeof *retval, GFP_KERNEL, dev_to_node(dev))))
135 strlcpy(retval->name, name, sizeof retval->name);
139 INIT_LIST_HEAD(&retval->page_list);
140 spin_lock_init(&retval->lock);
142 retval->allocation = allocation;
143 retval->blocks_per_page = allocation / size;
144 init_waitqueue_head(&retval->waitq);
149 mutex_lock(&pools_lock);
150 if (list_empty(&dev->dma_pools))
151 ret = device_create_file(dev, &dev_attr_pools);
154 /* note: not currently insisting "name" be unique */
156 list_add(&retval->pools, &dev->dma_pools);
161 mutex_unlock(&pools_lock);
163 INIT_LIST_HEAD(&retval->pools);
167 EXPORT_SYMBOL(dma_pool_create);
169 static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
171 struct dma_page *page;
174 mapsize = pool->blocks_per_page;
175 mapsize = (mapsize + BITS_PER_LONG - 1) / BITS_PER_LONG;
176 mapsize *= sizeof(long);
178 page = kmalloc(mapsize + sizeof *page, mem_flags);
181 page->vaddr = dma_alloc_coherent(pool->dev,
183 &page->dma, mem_flags);
185 memset(page->bitmap, 0xff, mapsize); /* bit set == free */
186 #ifdef CONFIG_DEBUG_SLAB
187 memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
189 list_add(&page->page_list, &pool->page_list);
198 static inline int is_page_busy(int blocks, unsigned long *bitmap)
201 if (*bitmap++ != ~0UL)
203 blocks -= BITS_PER_LONG;
208 static void pool_free_page(struct dma_pool *pool, struct dma_page *page)
210 dma_addr_t dma = page->dma;
212 #ifdef CONFIG_DEBUG_SLAB
213 memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
215 dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma);
216 list_del(&page->page_list);
221 * dma_pool_destroy - destroys a pool of dma memory blocks.
222 * @pool: dma pool that will be destroyed
223 * Context: !in_interrupt()
225 * Caller guarantees that no more memory from the pool is in use,
226 * and that nothing will try to use the pool after this call.
228 void dma_pool_destroy(struct dma_pool *pool)
230 mutex_lock(&pools_lock);
231 list_del(&pool->pools);
232 if (pool->dev && list_empty(&pool->dev->dma_pools))
233 device_remove_file(pool->dev, &dev_attr_pools);
234 mutex_unlock(&pools_lock);
236 while (!list_empty(&pool->page_list)) {
237 struct dma_page *page;
238 page = list_entry(pool->page_list.next,
239 struct dma_page, page_list);
240 if (is_page_busy(pool->blocks_per_page, page->bitmap)) {
243 "dma_pool_destroy %s, %p busy\n",
244 pool->name, page->vaddr);
247 "dma_pool_destroy %s, %p busy\n",
248 pool->name, page->vaddr);
249 /* leak the still-in-use consistent memory */
250 list_del(&page->page_list);
253 pool_free_page(pool, page);
258 EXPORT_SYMBOL(dma_pool_destroy);
261 * dma_pool_alloc - get a block of consistent memory
262 * @pool: dma pool that will produce the block
263 * @mem_flags: GFP_* bitmask
264 * @handle: pointer to dma address of block
266 * This returns the kernel virtual address of a currently unused block,
267 * and reports its dma address through the handle.
268 * If such a memory block can't be allocated, null is returned.
270 void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
274 struct dma_page *page;
279 spin_lock_irqsave(&pool->lock, flags);
281 list_for_each_entry(page, &pool->page_list, page_list) {
283 /* only cachable accesses here ... */
285 i < pool->blocks_per_page; i += BITS_PER_LONG, map++) {
286 if (page->bitmap[map] == 0)
288 block = ffz(~page->bitmap[map]);
289 if ((i + block) < pool->blocks_per_page) {
290 clear_bit(block, &page->bitmap[map]);
291 offset = (BITS_PER_LONG * map) + block;
292 offset *= pool->size;
297 page = pool_alloc_page(pool, GFP_ATOMIC);
299 if (mem_flags & __GFP_WAIT) {
300 DECLARE_WAITQUEUE(wait, current);
302 __set_current_state(TASK_INTERRUPTIBLE);
303 __add_wait_queue(&pool->waitq, &wait);
304 spin_unlock_irqrestore(&pool->lock, flags);
306 schedule_timeout(POOL_TIMEOUT_JIFFIES);
308 spin_lock_irqsave(&pool->lock, flags);
309 __remove_wait_queue(&pool->waitq, &wait);
316 clear_bit(0, &page->bitmap[0]);
320 retval = offset + page->vaddr;
321 *handle = offset + page->dma;
322 #ifdef CONFIG_DEBUG_SLAB
323 memset(retval, POOL_POISON_ALLOCATED, pool->size);
326 spin_unlock_irqrestore(&pool->lock, flags);
329 EXPORT_SYMBOL(dma_pool_alloc);
331 static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma)
334 struct dma_page *page;
336 spin_lock_irqsave(&pool->lock, flags);
337 list_for_each_entry(page, &pool->page_list, page_list) {
340 if (dma < (page->dma + pool->allocation))
345 spin_unlock_irqrestore(&pool->lock, flags);
350 * dma_pool_free - put block back into dma pool
351 * @pool: the dma pool holding the block
352 * @vaddr: virtual address of block
353 * @dma: dma address of block
355 * Caller promises neither device nor driver will again touch this block
356 * unless it is first re-allocated.
358 void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
360 struct dma_page *page;
364 page = pool_find_page(pool, dma);
368 "dma_pool_free %s, %p/%lx (bad dma)\n",
369 pool->name, vaddr, (unsigned long)dma);
371 printk(KERN_ERR "dma_pool_free %s, %p/%lx (bad dma)\n",
372 pool->name, vaddr, (unsigned long)dma);
376 block = dma - page->dma;
378 map = block / BITS_PER_LONG;
379 block %= BITS_PER_LONG;
381 #ifdef CONFIG_DEBUG_SLAB
382 if (((dma - page->dma) + (void *)page->vaddr) != vaddr) {
385 "dma_pool_free %s, %p (bad vaddr)/%Lx\n",
386 pool->name, vaddr, (unsigned long long)dma);
389 "dma_pool_free %s, %p (bad vaddr)/%Lx\n",
390 pool->name, vaddr, (unsigned long long)dma);
393 if (page->bitmap[map] & (1UL << block)) {
396 "dma_pool_free %s, dma %Lx already free\n",
397 pool->name, (unsigned long long)dma);
400 "dma_pool_free %s, dma %Lx already free\n",
401 pool->name, (unsigned long long)dma);
404 memset(vaddr, POOL_POISON_FREED, pool->size);
407 spin_lock_irqsave(&pool->lock, flags);
409 set_bit(block, &page->bitmap[map]);
410 if (waitqueue_active(&pool->waitq))
411 wake_up_locked(&pool->waitq);
413 * Resist a temptation to do
414 * if (!is_page_busy(bpp, page->bitmap)) pool_free_page(pool, page);
415 * Better have a few empty pages hang around.
417 spin_unlock_irqrestore(&pool->lock, flags);
419 EXPORT_SYMBOL(dma_pool_free);
424 static void dmam_pool_release(struct device *dev, void *res)
426 struct dma_pool *pool = *(struct dma_pool **)res;
428 dma_pool_destroy(pool);
431 static int dmam_pool_match(struct device *dev, void *res, void *match_data)
433 return *(struct dma_pool **)res == match_data;
437 * dmam_pool_create - Managed dma_pool_create()
438 * @name: name of pool, for diagnostics
439 * @dev: device that will be doing the DMA
440 * @size: size of the blocks in this pool.
441 * @align: alignment requirement for blocks; must be a power of two
442 * @allocation: returned blocks won't cross this boundary (or zero)
444 * Managed dma_pool_create(). DMA pool created with this function is
445 * automatically destroyed on driver detach.
447 struct dma_pool *dmam_pool_create(const char *name, struct device *dev,
448 size_t size, size_t align, size_t allocation)
450 struct dma_pool **ptr, *pool;
452 ptr = devres_alloc(dmam_pool_release, sizeof(*ptr), GFP_KERNEL);
456 pool = *ptr = dma_pool_create(name, dev, size, align, allocation);
458 devres_add(dev, ptr);
464 EXPORT_SYMBOL(dmam_pool_create);
467 * dmam_pool_destroy - Managed dma_pool_destroy()
468 * @pool: dma pool that will be destroyed
470 * Managed dma_pool_destroy().
472 void dmam_pool_destroy(struct dma_pool *pool)
474 struct device *dev = pool->dev;
476 dma_pool_destroy(pool);
477 WARN_ON(devres_destroy(dev, dmam_pool_release, dmam_pool_match, pool));
479 EXPORT_SYMBOL(dmam_pool_destroy);