Merge branch 'tracing-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-2.6-block.git] / mm / dmapool.c
CommitLineData
6182a094
MW
1/*
2 * DMA Pool allocator
3 *
4 * Copyright 2001 David Brownell
5 * Copyright 2007 Intel Corporation
6 * Author: Matthew Wilcox <willy@linux.intel.com>
7 *
8 * This software may be redistributed and/or modified under the terms of
9 * the GNU General Public License ("GPL") version 2 as published by the
10 * Free Software Foundation.
11 *
12 * This allocator returns small blocks of a given size which are DMA-able by
13 * the given device. It uses the dma_alloc_coherent page allocator to get
14 * new pages, then splits them up into blocks of the required size.
15 * Many older drivers still have their own code to do this.
16 *
17 * The current design of this allocator is fairly simple. The pool is
18 * represented by the 'struct dma_pool' which keeps a doubly-linked list of
19 * allocated pages. Each page in the page_list is split into blocks of at
a35a3455
MW
20 * least 'size' bytes. Free blocks are tracked in an unsorted singly-linked
21 * list of free blocks within the page. Used blocks aren't tracked, but we
22 * keep a count of how many are currently allocated from each page.
6182a094 23 */
1da177e4
LT
24
25#include <linux/device.h>
1da177e4
LT
26#include <linux/dma-mapping.h>
27#include <linux/dmapool.h>
6182a094
MW
28#include <linux/kernel.h>
29#include <linux/list.h>
1da177e4 30#include <linux/module.h>
6182a094 31#include <linux/mutex.h>
c9cf5528 32#include <linux/poison.h>
e8edc6e0 33#include <linux/sched.h>
6182a094
MW
34#include <linux/slab.h>
35#include <linux/spinlock.h>
36#include <linux/string.h>
37#include <linux/types.h>
38#include <linux/wait.h>
1da177e4 39
b5ee5bef
AK
40#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON)
41#define DMAPOOL_DEBUG 1
42#endif
43
e87aa773
MW
44struct dma_pool { /* the pool */
45 struct list_head page_list;
46 spinlock_t lock;
e87aa773
MW
47 size_t size;
48 struct device *dev;
49 size_t allocation;
e34f44b3 50 size_t boundary;
e87aa773
MW
51 char name[32];
52 wait_queue_head_t waitq;
53 struct list_head pools;
1da177e4
LT
54};
55
e87aa773
MW
56struct dma_page { /* cacheable header for 'allocation' bytes */
57 struct list_head page_list;
58 void *vaddr;
59 dma_addr_t dma;
a35a3455
MW
60 unsigned int in_use;
61 unsigned int offset;
1da177e4
LT
62};
63
64#define POOL_TIMEOUT_JIFFIES ((100 /* msec */ * HZ) / 1000)
1da177e4 65
e87aa773 66static DEFINE_MUTEX(pools_lock);
1da177e4
LT
67
68static ssize_t
e87aa773 69show_pools(struct device *dev, struct device_attribute *attr, char *buf)
1da177e4
LT
70{
71 unsigned temp;
72 unsigned size;
73 char *next;
74 struct dma_page *page;
75 struct dma_pool *pool;
76
77 next = buf;
78 size = PAGE_SIZE;
79
80 temp = scnprintf(next, size, "poolinfo - 0.1\n");
81 size -= temp;
82 next += temp;
83
b2366d68 84 mutex_lock(&pools_lock);
1da177e4
LT
85 list_for_each_entry(pool, &dev->dma_pools, pools) {
86 unsigned pages = 0;
87 unsigned blocks = 0;
88
89 list_for_each_entry(page, &pool->page_list, page_list) {
90 pages++;
91 blocks += page->in_use;
92 }
93
94 /* per-pool info, no real statistics yet */
95 temp = scnprintf(next, size, "%-16s %4u %4Zu %4Zu %2u\n",
a35a3455
MW
96 pool->name, blocks,
97 pages * (pool->allocation / pool->size),
e87aa773 98 pool->size, pages);
1da177e4
LT
99 size -= temp;
100 next += temp;
101 }
b2366d68 102 mutex_unlock(&pools_lock);
1da177e4
LT
103
104 return PAGE_SIZE - size;
105}
e87aa773
MW
106
107static DEVICE_ATTR(pools, S_IRUGO, show_pools, NULL);
1da177e4
LT
108
109/**
110 * dma_pool_create - Creates a pool of consistent memory blocks, for dma.
111 * @name: name of pool, for diagnostics
112 * @dev: device that will be doing the DMA
113 * @size: size of the blocks in this pool.
114 * @align: alignment requirement for blocks; must be a power of two
e34f44b3 115 * @boundary: returned blocks won't cross this power of two boundary
1da177e4
LT
116 * Context: !in_interrupt()
117 *
118 * Returns a dma allocation pool with the requested characteristics, or
119 * null if one can't be created. Given one of these pools, dma_pool_alloc()
120 * may be used to allocate memory. Such memory will all have "consistent"
121 * DMA mappings, accessible by the device and its driver without using
122 * cache flushing primitives. The actual size of blocks allocated may be
123 * larger than requested because of alignment.
124 *
e34f44b3 125 * If @boundary is nonzero, objects returned from dma_pool_alloc() won't
1da177e4
LT
126 * cross that size boundary. This is useful for devices which have
127 * addressing restrictions on individual DMA transfers, such as not crossing
128 * boundaries of 4KBytes.
129 */
e87aa773 130struct dma_pool *dma_pool_create(const char *name, struct device *dev,
e34f44b3 131 size_t size, size_t align, size_t boundary)
1da177e4 132{
e87aa773 133 struct dma_pool *retval;
e34f44b3 134 size_t allocation;
1da177e4 135
399154be 136 if (align == 0) {
1da177e4 137 align = 1;
399154be 138 } else if (align & (align - 1)) {
1da177e4 139 return NULL;
1da177e4
LT
140 }
141
a35a3455 142 if (size == 0) {
399154be 143 return NULL;
a35a3455
MW
144 } else if (size < 4) {
145 size = 4;
146 }
399154be
MW
147
148 if ((size % align) != 0)
149 size = ALIGN(size, align);
150
e34f44b3
MW
151 allocation = max_t(size_t, size, PAGE_SIZE);
152
153 if (!boundary) {
154 boundary = allocation;
155 } else if ((boundary < size) || (boundary & (boundary - 1))) {
1da177e4 156 return NULL;
e34f44b3 157 }
1da177e4 158
e34f44b3
MW
159 retval = kmalloc_node(sizeof(*retval), GFP_KERNEL, dev_to_node(dev));
160 if (!retval)
1da177e4
LT
161 return retval;
162
e34f44b3 163 strlcpy(retval->name, name, sizeof(retval->name));
1da177e4
LT
164
165 retval->dev = dev;
166
e87aa773
MW
167 INIT_LIST_HEAD(&retval->page_list);
168 spin_lock_init(&retval->lock);
1da177e4 169 retval->size = size;
e34f44b3 170 retval->boundary = boundary;
1da177e4 171 retval->allocation = allocation;
e87aa773 172 init_waitqueue_head(&retval->waitq);
1da177e4
LT
173
174 if (dev) {
141ecc53
CH
175 int ret;
176
b2366d68 177 mutex_lock(&pools_lock);
e87aa773
MW
178 if (list_empty(&dev->dma_pools))
179 ret = device_create_file(dev, &dev_attr_pools);
141ecc53
CH
180 else
181 ret = 0;
1da177e4 182 /* note: not currently insisting "name" be unique */
141ecc53 183 if (!ret)
e87aa773 184 list_add(&retval->pools, &dev->dma_pools);
141ecc53
CH
185 else {
186 kfree(retval);
187 retval = NULL;
188 }
b2366d68 189 mutex_unlock(&pools_lock);
1da177e4 190 } else
e87aa773 191 INIT_LIST_HEAD(&retval->pools);
1da177e4
LT
192
193 return retval;
194}
e87aa773 195EXPORT_SYMBOL(dma_pool_create);
1da177e4 196
a35a3455
MW
197static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page)
198{
199 unsigned int offset = 0;
e34f44b3 200 unsigned int next_boundary = pool->boundary;
a35a3455
MW
201
202 do {
203 unsigned int next = offset + pool->size;
e34f44b3
MW
204 if (unlikely((next + pool->size) >= next_boundary)) {
205 next = next_boundary;
206 next_boundary += pool->boundary;
207 }
a35a3455
MW
208 *(int *)(page->vaddr + offset) = next;
209 offset = next;
210 } while (offset < pool->allocation);
211}
212
e87aa773 213static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
1da177e4 214{
e87aa773 215 struct dma_page *page;
1da177e4 216
a35a3455 217 page = kmalloc(sizeof(*page), mem_flags);
1da177e4
LT
218 if (!page)
219 return NULL;
a35a3455 220 page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation,
e87aa773 221 &page->dma, mem_flags);
1da177e4 222 if (page->vaddr) {
b5ee5bef 223#ifdef DMAPOOL_DEBUG
e87aa773 224 memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
1da177e4 225#endif
a35a3455 226 pool_initialise_page(pool, page);
e87aa773 227 list_add(&page->page_list, &pool->page_list);
1da177e4 228 page->in_use = 0;
a35a3455 229 page->offset = 0;
1da177e4 230 } else {
e87aa773 231 kfree(page);
1da177e4
LT
232 page = NULL;
233 }
234 return page;
235}
236
a35a3455 237static inline int is_page_busy(struct dma_page *page)
1da177e4 238{
a35a3455 239 return page->in_use != 0;
1da177e4
LT
240}
241
e87aa773 242static void pool_free_page(struct dma_pool *pool, struct dma_page *page)
1da177e4 243{
e87aa773 244 dma_addr_t dma = page->dma;
1da177e4 245
b5ee5bef 246#ifdef DMAPOOL_DEBUG
e87aa773 247 memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
1da177e4 248#endif
e87aa773
MW
249 dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma);
250 list_del(&page->page_list);
251 kfree(page);
1da177e4
LT
252}
253
1da177e4
LT
254/**
255 * dma_pool_destroy - destroys a pool of dma memory blocks.
256 * @pool: dma pool that will be destroyed
257 * Context: !in_interrupt()
258 *
259 * Caller guarantees that no more memory from the pool is in use,
260 * and that nothing will try to use the pool after this call.
261 */
e87aa773 262void dma_pool_destroy(struct dma_pool *pool)
1da177e4 263{
b2366d68 264 mutex_lock(&pools_lock);
e87aa773
MW
265 list_del(&pool->pools);
266 if (pool->dev && list_empty(&pool->dev->dma_pools))
267 device_remove_file(pool->dev, &dev_attr_pools);
b2366d68 268 mutex_unlock(&pools_lock);
1da177e4 269
e87aa773
MW
270 while (!list_empty(&pool->page_list)) {
271 struct dma_page *page;
272 page = list_entry(pool->page_list.next,
273 struct dma_page, page_list);
a35a3455 274 if (is_page_busy(page)) {
1da177e4 275 if (pool->dev)
e87aa773
MW
276 dev_err(pool->dev,
277 "dma_pool_destroy %s, %p busy\n",
1da177e4
LT
278 pool->name, page->vaddr);
279 else
e87aa773
MW
280 printk(KERN_ERR
281 "dma_pool_destroy %s, %p busy\n",
282 pool->name, page->vaddr);
1da177e4 283 /* leak the still-in-use consistent memory */
e87aa773
MW
284 list_del(&page->page_list);
285 kfree(page);
1da177e4 286 } else
e87aa773 287 pool_free_page(pool, page);
1da177e4
LT
288 }
289
e87aa773 290 kfree(pool);
1da177e4 291}
e87aa773 292EXPORT_SYMBOL(dma_pool_destroy);
1da177e4
LT
293
294/**
295 * dma_pool_alloc - get a block of consistent memory
296 * @pool: dma pool that will produce the block
297 * @mem_flags: GFP_* bitmask
298 * @handle: pointer to dma address of block
299 *
300 * This returns the kernel virtual address of a currently unused block,
301 * and reports its dma address through the handle.
6182a094 302 * If such a memory block can't be allocated, %NULL is returned.
1da177e4 303 */
e87aa773
MW
304void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
305 dma_addr_t *handle)
1da177e4 306{
e87aa773
MW
307 unsigned long flags;
308 struct dma_page *page;
e87aa773
MW
309 size_t offset;
310 void *retval;
311
e87aa773 312 spin_lock_irqsave(&pool->lock, flags);
2cae367e 313 restart:
1da177e4 314 list_for_each_entry(page, &pool->page_list, page_list) {
a35a3455
MW
315 if (page->offset < pool->allocation)
316 goto ready;
1da177e4 317 }
e87aa773
MW
318 page = pool_alloc_page(pool, GFP_ATOMIC);
319 if (!page) {
1da177e4 320 if (mem_flags & __GFP_WAIT) {
e87aa773 321 DECLARE_WAITQUEUE(wait, current);
1da177e4 322
d9aacccf 323 __set_current_state(TASK_INTERRUPTIBLE);
2cae367e 324 __add_wait_queue(&pool->waitq, &wait);
e87aa773 325 spin_unlock_irqrestore(&pool->lock, flags);
1da177e4 326
e87aa773 327 schedule_timeout(POOL_TIMEOUT_JIFFIES);
1da177e4 328
2cae367e
MW
329 spin_lock_irqsave(&pool->lock, flags);
330 __remove_wait_queue(&pool->waitq, &wait);
1da177e4
LT
331 goto restart;
332 }
333 retval = NULL;
334 goto done;
335 }
336
e87aa773 337 ready:
1da177e4 338 page->in_use++;
a35a3455
MW
339 offset = page->offset;
340 page->offset = *(int *)(page->vaddr + offset);
1da177e4
LT
341 retval = offset + page->vaddr;
342 *handle = offset + page->dma;
b5ee5bef 343#ifdef DMAPOOL_DEBUG
e87aa773 344 memset(retval, POOL_POISON_ALLOCATED, pool->size);
1da177e4 345#endif
e87aa773
MW
346 done:
347 spin_unlock_irqrestore(&pool->lock, flags);
1da177e4
LT
348 return retval;
349}
e87aa773 350EXPORT_SYMBOL(dma_pool_alloc);
1da177e4 351
e87aa773 352static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma)
1da177e4 353{
e87aa773
MW
354 unsigned long flags;
355 struct dma_page *page;
1da177e4 356
e87aa773 357 spin_lock_irqsave(&pool->lock, flags);
1da177e4
LT
358 list_for_each_entry(page, &pool->page_list, page_list) {
359 if (dma < page->dma)
360 continue;
361 if (dma < (page->dma + pool->allocation))
362 goto done;
363 }
364 page = NULL;
e87aa773
MW
365 done:
366 spin_unlock_irqrestore(&pool->lock, flags);
1da177e4
LT
367 return page;
368}
369
1da177e4
LT
370/**
371 * dma_pool_free - put block back into dma pool
372 * @pool: the dma pool holding the block
373 * @vaddr: virtual address of block
374 * @dma: dma address of block
375 *
376 * Caller promises neither device nor driver will again touch this block
377 * unless it is first re-allocated.
378 */
e87aa773 379void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
1da177e4 380{
e87aa773
MW
381 struct dma_page *page;
382 unsigned long flags;
a35a3455 383 unsigned int offset;
1da177e4 384
e87aa773
MW
385 page = pool_find_page(pool, dma);
386 if (!page) {
1da177e4 387 if (pool->dev)
e87aa773
MW
388 dev_err(pool->dev,
389 "dma_pool_free %s, %p/%lx (bad dma)\n",
390 pool->name, vaddr, (unsigned long)dma);
1da177e4 391 else
e87aa773
MW
392 printk(KERN_ERR "dma_pool_free %s, %p/%lx (bad dma)\n",
393 pool->name, vaddr, (unsigned long)dma);
1da177e4
LT
394 return;
395 }
396
a35a3455 397 offset = vaddr - page->vaddr;
b5ee5bef 398#ifdef DMAPOOL_DEBUG
a35a3455 399 if ((dma - page->dma) != offset) {
1da177e4 400 if (pool->dev)
e87aa773
MW
401 dev_err(pool->dev,
402 "dma_pool_free %s, %p (bad vaddr)/%Lx\n",
403 pool->name, vaddr, (unsigned long long)dma);
1da177e4 404 else
e87aa773
MW
405 printk(KERN_ERR
406 "dma_pool_free %s, %p (bad vaddr)/%Lx\n",
407 pool->name, vaddr, (unsigned long long)dma);
1da177e4
LT
408 return;
409 }
a35a3455
MW
410 {
411 unsigned int chain = page->offset;
412 while (chain < pool->allocation) {
413 if (chain != offset) {
414 chain = *(int *)(page->vaddr + chain);
415 continue;
416 }
417 if (pool->dev)
418 dev_err(pool->dev, "dma_pool_free %s, dma %Lx "
419 "already free\n", pool->name,
420 (unsigned long long)dma);
421 else
422 printk(KERN_ERR "dma_pool_free %s, dma %Lx "
423 "already free\n", pool->name,
424 (unsigned long long)dma);
425 return;
426 }
1da177e4 427 }
e87aa773 428 memset(vaddr, POOL_POISON_FREED, pool->size);
1da177e4
LT
429#endif
430
e87aa773 431 spin_lock_irqsave(&pool->lock, flags);
1da177e4 432 page->in_use--;
a35a3455
MW
433 *(int *)vaddr = page->offset;
434 page->offset = offset;
e87aa773 435 if (waitqueue_active(&pool->waitq))
2cae367e 436 wake_up_locked(&pool->waitq);
1da177e4
LT
437 /*
438 * Resist a temptation to do
a35a3455 439 * if (!is_page_busy(page)) pool_free_page(pool, page);
1da177e4
LT
440 * Better have a few empty pages hang around.
441 */
e87aa773 442 spin_unlock_irqrestore(&pool->lock, flags);
1da177e4 443}
e87aa773 444EXPORT_SYMBOL(dma_pool_free);
1da177e4 445
9ac7849e
TH
446/*
447 * Managed DMA pool
448 */
449static void dmam_pool_release(struct device *dev, void *res)
450{
451 struct dma_pool *pool = *(struct dma_pool **)res;
452
453 dma_pool_destroy(pool);
454}
455
456static int dmam_pool_match(struct device *dev, void *res, void *match_data)
457{
458 return *(struct dma_pool **)res == match_data;
459}
460
461/**
462 * dmam_pool_create - Managed dma_pool_create()
463 * @name: name of pool, for diagnostics
464 * @dev: device that will be doing the DMA
465 * @size: size of the blocks in this pool.
466 * @align: alignment requirement for blocks; must be a power of two
467 * @allocation: returned blocks won't cross this boundary (or zero)
468 *
469 * Managed dma_pool_create(). DMA pool created with this function is
470 * automatically destroyed on driver detach.
471 */
472struct dma_pool *dmam_pool_create(const char *name, struct device *dev,
473 size_t size, size_t align, size_t allocation)
474{
475 struct dma_pool **ptr, *pool;
476
477 ptr = devres_alloc(dmam_pool_release, sizeof(*ptr), GFP_KERNEL);
478 if (!ptr)
479 return NULL;
480
481 pool = *ptr = dma_pool_create(name, dev, size, align, allocation);
482 if (pool)
483 devres_add(dev, ptr);
484 else
485 devres_free(ptr);
486
487 return pool;
488}
e87aa773 489EXPORT_SYMBOL(dmam_pool_create);
9ac7849e
TH
490
491/**
492 * dmam_pool_destroy - Managed dma_pool_destroy()
493 * @pool: dma pool that will be destroyed
494 *
495 * Managed dma_pool_destroy().
496 */
497void dmam_pool_destroy(struct dma_pool *pool)
498{
499 struct device *dev = pool->dev;
500
501 dma_pool_destroy(pool);
502 WARN_ON(devres_destroy(dev, dmam_pool_release, dmam_pool_match, pool));
503}
e87aa773 504EXPORT_SYMBOL(dmam_pool_destroy);