[PATCH] scsi_ioctl: only warn for rejected commands
[linux-block.git] / drivers / base / dmapool.c
CommitLineData
1da177e4
LT
1
2#include <linux/device.h>
3#include <linux/mm.h>
4#include <asm/io.h> /* Needed for i386 to build */
5#include <asm/scatterlist.h> /* Needed for i386 to build */
6#include <linux/dma-mapping.h>
7#include <linux/dmapool.h>
8#include <linux/slab.h>
9#include <linux/module.h>
10
11/*
12 * Pool allocator ... wraps the dma_alloc_coherent page allocator, so
13 * small blocks are easily used by drivers for bus mastering controllers.
14 * This should probably be sharing the guts of the slab allocator.
15 */
16
17struct dma_pool { /* the pool */
18 struct list_head page_list;
19 spinlock_t lock;
20 size_t blocks_per_page;
21 size_t size;
22 struct device *dev;
23 size_t allocation;
24 char name [32];
25 wait_queue_head_t waitq;
26 struct list_head pools;
27};
28
29struct dma_page { /* cacheable header for 'allocation' bytes */
30 struct list_head page_list;
31 void *vaddr;
32 dma_addr_t dma;
33 unsigned in_use;
34 unsigned long bitmap [0];
35};
36
37#define POOL_TIMEOUT_JIFFIES ((100 /* msec */ * HZ) / 1000)
38#define POOL_POISON_FREED 0xa7 /* !inuse */
39#define POOL_POISON_ALLOCATED 0xa9 /* !initted */
40
41static DECLARE_MUTEX (pools_lock);
42
43static ssize_t
74880c06 44show_pools (struct device *dev, struct device_attribute *attr, char *buf)
1da177e4
LT
45{
46 unsigned temp;
47 unsigned size;
48 char *next;
49 struct dma_page *page;
50 struct dma_pool *pool;
51
52 next = buf;
53 size = PAGE_SIZE;
54
55 temp = scnprintf(next, size, "poolinfo - 0.1\n");
56 size -= temp;
57 next += temp;
58
59 down (&pools_lock);
60 list_for_each_entry(pool, &dev->dma_pools, pools) {
61 unsigned pages = 0;
62 unsigned blocks = 0;
63
64 list_for_each_entry(page, &pool->page_list, page_list) {
65 pages++;
66 blocks += page->in_use;
67 }
68
69 /* per-pool info, no real statistics yet */
70 temp = scnprintf(next, size, "%-16s %4u %4Zu %4Zu %2u\n",
71 pool->name,
72 blocks, pages * pool->blocks_per_page,
73 pool->size, pages);
74 size -= temp;
75 next += temp;
76 }
77 up (&pools_lock);
78
79 return PAGE_SIZE - size;
80}
81static DEVICE_ATTR (pools, S_IRUGO, show_pools, NULL);
82
83/**
84 * dma_pool_create - Creates a pool of consistent memory blocks, for dma.
85 * @name: name of pool, for diagnostics
86 * @dev: device that will be doing the DMA
87 * @size: size of the blocks in this pool.
88 * @align: alignment requirement for blocks; must be a power of two
89 * @allocation: returned blocks won't cross this boundary (or zero)
90 * Context: !in_interrupt()
91 *
92 * Returns a dma allocation pool with the requested characteristics, or
93 * null if one can't be created. Given one of these pools, dma_pool_alloc()
94 * may be used to allocate memory. Such memory will all have "consistent"
95 * DMA mappings, accessible by the device and its driver without using
96 * cache flushing primitives. The actual size of blocks allocated may be
97 * larger than requested because of alignment.
98 *
99 * If allocation is nonzero, objects returned from dma_pool_alloc() won't
100 * cross that size boundary. This is useful for devices which have
101 * addressing restrictions on individual DMA transfers, such as not crossing
102 * boundaries of 4KBytes.
103 */
104struct dma_pool *
105dma_pool_create (const char *name, struct device *dev,
106 size_t size, size_t align, size_t allocation)
107{
108 struct dma_pool *retval;
109
110 if (align == 0)
111 align = 1;
112 if (size == 0)
113 return NULL;
114 else if (size < align)
115 size = align;
116 else if ((size % align) != 0) {
117 size += align + 1;
118 size &= ~(align - 1);
119 }
120
121 if (allocation == 0) {
122 if (PAGE_SIZE < size)
123 allocation = size;
124 else
125 allocation = PAGE_SIZE;
126 // FIXME: round up for less fragmentation
127 } else if (allocation < size)
128 return NULL;
129
130 if (!(retval = kmalloc (sizeof *retval, SLAB_KERNEL)))
131 return retval;
132
133 strlcpy (retval->name, name, sizeof retval->name);
134
135 retval->dev = dev;
136
137 INIT_LIST_HEAD (&retval->page_list);
138 spin_lock_init (&retval->lock);
139 retval->size = size;
140 retval->allocation = allocation;
141 retval->blocks_per_page = allocation / size;
142 init_waitqueue_head (&retval->waitq);
143
144 if (dev) {
145 down (&pools_lock);
146 if (list_empty (&dev->dma_pools))
147 device_create_file (dev, &dev_attr_pools);
148 /* note: not currently insisting "name" be unique */
149 list_add (&retval->pools, &dev->dma_pools);
150 up (&pools_lock);
151 } else
152 INIT_LIST_HEAD (&retval->pools);
153
154 return retval;
155}
156
157
158static struct dma_page *
159pool_alloc_page (struct dma_pool *pool, unsigned int __nocast mem_flags)
160{
161 struct dma_page *page;
162 int mapsize;
163
164 mapsize = pool->blocks_per_page;
165 mapsize = (mapsize + BITS_PER_LONG - 1) / BITS_PER_LONG;
166 mapsize *= sizeof (long);
167
168 page = (struct dma_page *) kmalloc (mapsize + sizeof *page, mem_flags);
169 if (!page)
170 return NULL;
171 page->vaddr = dma_alloc_coherent (pool->dev,
172 pool->allocation,
173 &page->dma,
174 mem_flags);
175 if (page->vaddr) {
176 memset (page->bitmap, 0xff, mapsize); // bit set == free
177#ifdef CONFIG_DEBUG_SLAB
178 memset (page->vaddr, POOL_POISON_FREED, pool->allocation);
179#endif
180 list_add (&page->page_list, &pool->page_list);
181 page->in_use = 0;
182 } else {
183 kfree (page);
184 page = NULL;
185 }
186 return page;
187}
188
189
190static inline int
191is_page_busy (int blocks, unsigned long *bitmap)
192{
193 while (blocks > 0) {
194 if (*bitmap++ != ~0UL)
195 return 1;
196 blocks -= BITS_PER_LONG;
197 }
198 return 0;
199}
200
201static void
202pool_free_page (struct dma_pool *pool, struct dma_page *page)
203{
204 dma_addr_t dma = page->dma;
205
206#ifdef CONFIG_DEBUG_SLAB
207 memset (page->vaddr, POOL_POISON_FREED, pool->allocation);
208#endif
209 dma_free_coherent (pool->dev, pool->allocation, page->vaddr, dma);
210 list_del (&page->page_list);
211 kfree (page);
212}
213
214
215/**
216 * dma_pool_destroy - destroys a pool of dma memory blocks.
217 * @pool: dma pool that will be destroyed
218 * Context: !in_interrupt()
219 *
220 * Caller guarantees that no more memory from the pool is in use,
221 * and that nothing will try to use the pool after this call.
222 */
223void
224dma_pool_destroy (struct dma_pool *pool)
225{
226 down (&pools_lock);
227 list_del (&pool->pools);
228 if (pool->dev && list_empty (&pool->dev->dma_pools))
229 device_remove_file (pool->dev, &dev_attr_pools);
230 up (&pools_lock);
231
232 while (!list_empty (&pool->page_list)) {
233 struct dma_page *page;
234 page = list_entry (pool->page_list.next,
235 struct dma_page, page_list);
236 if (is_page_busy (pool->blocks_per_page, page->bitmap)) {
237 if (pool->dev)
238 dev_err(pool->dev, "dma_pool_destroy %s, %p busy\n",
239 pool->name, page->vaddr);
240 else
241 printk (KERN_ERR "dma_pool_destroy %s, %p busy\n",
242 pool->name, page->vaddr);
243 /* leak the still-in-use consistent memory */
244 list_del (&page->page_list);
245 kfree (page);
246 } else
247 pool_free_page (pool, page);
248 }
249
250 kfree (pool);
251}
252
253
254/**
255 * dma_pool_alloc - get a block of consistent memory
256 * @pool: dma pool that will produce the block
257 * @mem_flags: GFP_* bitmask
258 * @handle: pointer to dma address of block
259 *
260 * This returns the kernel virtual address of a currently unused block,
261 * and reports its dma address through the handle.
262 * If such a memory block can't be allocated, null is returned.
263 */
264void *
3a11ec5e
VF
265dma_pool_alloc (struct dma_pool *pool, unsigned int __nocast mem_flags,
266 dma_addr_t *handle)
1da177e4
LT
267{
268 unsigned long flags;
269 struct dma_page *page;
270 int map, block;
271 size_t offset;
272 void *retval;
273
274restart:
275 spin_lock_irqsave (&pool->lock, flags);
276 list_for_each_entry(page, &pool->page_list, page_list) {
277 int i;
278 /* only cachable accesses here ... */
279 for (map = 0, i = 0;
280 i < pool->blocks_per_page;
281 i += BITS_PER_LONG, map++) {
282 if (page->bitmap [map] == 0)
283 continue;
284 block = ffz (~ page->bitmap [map]);
285 if ((i + block) < pool->blocks_per_page) {
286 clear_bit (block, &page->bitmap [map]);
287 offset = (BITS_PER_LONG * map) + block;
288 offset *= pool->size;
289 goto ready;
290 }
291 }
292 }
293 if (!(page = pool_alloc_page (pool, SLAB_ATOMIC))) {
294 if (mem_flags & __GFP_WAIT) {
295 DECLARE_WAITQUEUE (wait, current);
296
297 current->state = TASK_INTERRUPTIBLE;
298 add_wait_queue (&pool->waitq, &wait);
299 spin_unlock_irqrestore (&pool->lock, flags);
300
301 schedule_timeout (POOL_TIMEOUT_JIFFIES);
302
303 remove_wait_queue (&pool->waitq, &wait);
304 goto restart;
305 }
306 retval = NULL;
307 goto done;
308 }
309
310 clear_bit (0, &page->bitmap [0]);
311 offset = 0;
312ready:
313 page->in_use++;
314 retval = offset + page->vaddr;
315 *handle = offset + page->dma;
316#ifdef CONFIG_DEBUG_SLAB
317 memset (retval, POOL_POISON_ALLOCATED, pool->size);
318#endif
319done:
320 spin_unlock_irqrestore (&pool->lock, flags);
321 return retval;
322}
323
324
325static struct dma_page *
326pool_find_page (struct dma_pool *pool, dma_addr_t dma)
327{
328 unsigned long flags;
329 struct dma_page *page;
330
331 spin_lock_irqsave (&pool->lock, flags);
332 list_for_each_entry(page, &pool->page_list, page_list) {
333 if (dma < page->dma)
334 continue;
335 if (dma < (page->dma + pool->allocation))
336 goto done;
337 }
338 page = NULL;
339done:
340 spin_unlock_irqrestore (&pool->lock, flags);
341 return page;
342}
343
344
345/**
346 * dma_pool_free - put block back into dma pool
347 * @pool: the dma pool holding the block
348 * @vaddr: virtual address of block
349 * @dma: dma address of block
350 *
351 * Caller promises neither device nor driver will again touch this block
352 * unless it is first re-allocated.
353 */
354void
355dma_pool_free (struct dma_pool *pool, void *vaddr, dma_addr_t dma)
356{
357 struct dma_page *page;
358 unsigned long flags;
359 int map, block;
360
361 if ((page = pool_find_page (pool, dma)) == 0) {
362 if (pool->dev)
363 dev_err(pool->dev, "dma_pool_free %s, %p/%lx (bad dma)\n",
364 pool->name, vaddr, (unsigned long) dma);
365 else
366 printk (KERN_ERR "dma_pool_free %s, %p/%lx (bad dma)\n",
367 pool->name, vaddr, (unsigned long) dma);
368 return;
369 }
370
371 block = dma - page->dma;
372 block /= pool->size;
373 map = block / BITS_PER_LONG;
374 block %= BITS_PER_LONG;
375
376#ifdef CONFIG_DEBUG_SLAB
377 if (((dma - page->dma) + (void *)page->vaddr) != vaddr) {
378 if (pool->dev)
379 dev_err(pool->dev, "dma_pool_free %s, %p (bad vaddr)/%Lx\n",
380 pool->name, vaddr, (unsigned long long) dma);
381 else
382 printk (KERN_ERR "dma_pool_free %s, %p (bad vaddr)/%Lx\n",
383 pool->name, vaddr, (unsigned long long) dma);
384 return;
385 }
386 if (page->bitmap [map] & (1UL << block)) {
387 if (pool->dev)
388 dev_err(pool->dev, "dma_pool_free %s, dma %Lx already free\n",
389 pool->name, (unsigned long long)dma);
390 else
391 printk (KERN_ERR "dma_pool_free %s, dma %Lx already free\n",
392 pool->name, (unsigned long long)dma);
393 return;
394 }
395 memset (vaddr, POOL_POISON_FREED, pool->size);
396#endif
397
398 spin_lock_irqsave (&pool->lock, flags);
399 page->in_use--;
400 set_bit (block, &page->bitmap [map]);
401 if (waitqueue_active (&pool->waitq))
402 wake_up (&pool->waitq);
403 /*
404 * Resist a temptation to do
405 * if (!is_page_busy(bpp, page->bitmap)) pool_free_page(pool, page);
406 * Better have a few empty pages hang around.
407 */
408 spin_unlock_irqrestore (&pool->lock, flags);
409}
410
411
412EXPORT_SYMBOL (dma_pool_create);
413EXPORT_SYMBOL (dma_pool_destroy);
414EXPORT_SYMBOL (dma_pool_alloc);
415EXPORT_SYMBOL (dma_pool_free);