Merge branches 'amd-iommu/fixes' and 'dma-debug/fixes' into iommu/fixes
[linux-2.6-block.git] / arch / arm / common / dmabounce.c
CommitLineData
1da177e4
LT
1/*
2 * arch/arm/common/dmabounce.c
3 *
4 * Special dma_{map/unmap/dma_sync}_* routines for systems that have
5 * limited DMA windows. These functions utilize bounce buffers to
6 * copy data to/from buffers located outside the DMA region. This
7 * only works for systems in which DMA memory is at the bottom of
3a2916aa 8 * RAM, the remainder of memory is at the top and the DMA memory
6cbdc8c5 9 * can be marked as ZONE_DMA. Anything beyond that such as discontiguous
1da177e4
LT
10 * DMA windows will require custom implementations that reserve memory
11 * areas at early bootup.
12 *
13 * Original version by Brad Parker (brad@heeltoe.com)
14 * Re-written by Christopher Hoover <ch@murgatroid.com>
15 * Made generic by Deepak Saxena <dsaxena@plexity.net>
16 *
17 * Copyright (C) 2002 Hewlett Packard Company.
18 * Copyright (C) 2004 MontaVista Software, Inc.
19 *
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * version 2 as published by the Free Software Foundation.
23 */
24
25#include <linux/module.h>
26#include <linux/init.h>
27#include <linux/slab.h>
58edb515 28#include <linux/page-flags.h>
1da177e4
LT
29#include <linux/device.h>
30#include <linux/dma-mapping.h>
31#include <linux/dmapool.h>
32#include <linux/list.h>
9f2326be 33#include <linux/scatterlist.h>
1da177e4 34
14eb75b6
RK
35#include <asm/cacheflush.h>
36
1da177e4 37#undef STATS
cb7610d0 38
1da177e4
LT
39#ifdef STATS
40#define DO_STATS(X) do { X ; } while (0)
41#else
42#define DO_STATS(X) do { } while (0)
43#endif
44
45/* ************************************************** */
46
47struct safe_buffer {
48 struct list_head node;
49
50 /* original request */
51 void *ptr;
52 size_t size;
53 int direction;
54
55 /* safe buffer info */
cb7610d0 56 struct dmabounce_pool *pool;
1da177e4
LT
57 void *safe;
58 dma_addr_t safe_dma_addr;
59};
60
cb7610d0
RK
61struct dmabounce_pool {
62 unsigned long size;
63 struct dma_pool *pool;
64#ifdef STATS
65 unsigned long allocs;
66#endif
67};
68
1da177e4 69struct dmabounce_device_info {
1da177e4 70 struct device *dev;
1da177e4 71 struct list_head safe_buffers;
1da177e4 72#ifdef STATS
1da177e4
LT
73 unsigned long total_allocs;
74 unsigned long map_op_count;
75 unsigned long bounce_count;
017cc022 76 int attr_res;
1da177e4 77#endif
cb7610d0
RK
78 struct dmabounce_pool small;
79 struct dmabounce_pool large;
823588c1
KH
80
81 rwlock_t lock;
1da177e4
LT
82};
83
1da177e4 84#ifdef STATS
017cc022
RK
85static ssize_t dmabounce_show(struct device *dev, struct device_attribute *attr,
86 char *buf)
1da177e4 87{
017cc022
RK
88 struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
89 return sprintf(buf, "%lu %lu %lu %lu %lu %lu\n",
90 device_info->small.allocs,
91 device_info->large.allocs,
cb7610d0
RK
92 device_info->total_allocs - device_info->small.allocs -
93 device_info->large.allocs,
017cc022
RK
94 device_info->total_allocs,
95 device_info->map_op_count,
96 device_info->bounce_count);
1da177e4 97}
017cc022
RK
98
99static DEVICE_ATTR(dmabounce_stats, 0400, dmabounce_show, NULL);
1da177e4
LT
100#endif
101
1da177e4
LT
102
103/* allocate a 'safe' buffer and keep track of it */
104static inline struct safe_buffer *
105alloc_safe_buffer(struct dmabounce_device_info *device_info, void *ptr,
cb7610d0 106 size_t size, enum dma_data_direction dir)
1da177e4
LT
107{
108 struct safe_buffer *buf;
cb7610d0 109 struct dmabounce_pool *pool;
1da177e4 110 struct device *dev = device_info->dev;
823588c1 111 unsigned long flags;
1da177e4
LT
112
113 dev_dbg(dev, "%s(ptr=%p, size=%d, dir=%d)\n",
114 __func__, ptr, size, dir);
115
cb7610d0
RK
116 if (size <= device_info->small.size) {
117 pool = &device_info->small;
118 } else if (size <= device_info->large.size) {
119 pool = &device_info->large;
120 } else {
121 pool = NULL;
122 }
1da177e4
LT
123
124 buf = kmalloc(sizeof(struct safe_buffer), GFP_ATOMIC);
125 if (buf == NULL) {
126 dev_warn(dev, "%s: kmalloc failed\n", __func__);
127 return NULL;
128 }
129
cb7610d0
RK
130 buf->ptr = ptr;
131 buf->size = size;
132 buf->direction = dir;
133 buf->pool = pool;
1da177e4 134
cb7610d0
RK
135 if (pool) {
136 buf->safe = dma_pool_alloc(pool->pool, GFP_ATOMIC,
137 &buf->safe_dma_addr);
1da177e4 138 } else {
cb7610d0
RK
139 buf->safe = dma_alloc_coherent(dev, size, &buf->safe_dma_addr,
140 GFP_ATOMIC);
1da177e4
LT
141 }
142
cb7610d0
RK
143 if (buf->safe == NULL) {
144 dev_warn(dev,
145 "%s: could not alloc dma memory (size=%d)\n",
146 __func__, size);
1da177e4
LT
147 kfree(buf);
148 return NULL;
149 }
150
151#ifdef STATS
cb7610d0
RK
152 if (pool)
153 pool->allocs++;
154 device_info->total_allocs++;
1da177e4
LT
155#endif
156
823588c1 157 write_lock_irqsave(&device_info->lock, flags);
1da177e4 158 list_add(&buf->node, &device_info->safe_buffers);
823588c1
KH
159 write_unlock_irqrestore(&device_info->lock, flags);
160
1da177e4
LT
161 return buf;
162}
163
164/* determine if a buffer is from our "safe" pool */
165static inline struct safe_buffer *
166find_safe_buffer(struct dmabounce_device_info *device_info, dma_addr_t safe_dma_addr)
167{
e2785f0d 168 struct safe_buffer *b, *rb = NULL;
823588c1
KH
169 unsigned long flags;
170
171 read_lock_irqsave(&device_info->lock, flags);
1da177e4 172
b46a58fd 173 list_for_each_entry(b, &device_info->safe_buffers, node)
e2785f0d
KH
174 if (b->safe_dma_addr == safe_dma_addr) {
175 rb = b;
823588c1 176 break;
e2785f0d 177 }
1da177e4 178
823588c1 179 read_unlock_irqrestore(&device_info->lock, flags);
e2785f0d 180 return rb;
1da177e4
LT
181}
182
183static inline void
184free_safe_buffer(struct dmabounce_device_info *device_info, struct safe_buffer *buf)
185{
823588c1
KH
186 unsigned long flags;
187
1da177e4
LT
188 dev_dbg(device_info->dev, "%s(buf=%p)\n", __func__, buf);
189
823588c1
KH
190 write_lock_irqsave(&device_info->lock, flags);
191
1da177e4
LT
192 list_del(&buf->node);
193
823588c1
KH
194 write_unlock_irqrestore(&device_info->lock, flags);
195
1da177e4 196 if (buf->pool)
cb7610d0 197 dma_pool_free(buf->pool->pool, buf->safe, buf->safe_dma_addr);
1da177e4
LT
198 else
199 dma_free_coherent(device_info->dev, buf->size, buf->safe,
200 buf->safe_dma_addr);
201
202 kfree(buf);
203}
204
205/* ************************************************** */
206
125ab12a
RK
207static struct safe_buffer *find_safe_buffer_dev(struct device *dev,
208 dma_addr_t dma_addr, const char *where)
209{
210 if (!dev || !dev->archdata.dmabounce)
211 return NULL;
212 if (dma_mapping_error(dev, dma_addr)) {
213 if (dev)
214 dev_err(dev, "Trying to %s invalid mapping\n", where);
215 else
216 pr_err("unknown device: Trying to %s invalid mapping\n", where);
217 return NULL;
218 }
219 return find_safe_buffer(dev->archdata.dmabounce, dma_addr);
220}
221
3216a97b 222static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size,
1da177e4
LT
223 enum dma_data_direction dir)
224{
ab2c2152 225 struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
1da177e4
LT
226 dma_addr_t dma_addr;
227 int needs_bounce = 0;
228
229 if (device_info)
230 DO_STATS ( device_info->map_op_count++ );
231
232 dma_addr = virt_to_dma(dev, ptr);
233
234 if (dev->dma_mask) {
235 unsigned long mask = *dev->dma_mask;
236 unsigned long limit;
237
238 limit = (mask + 1) & ~mask;
239 if (limit && size > limit) {
240 dev_err(dev, "DMA mapping too big (requested %#x "
241 "mask %#Lx)\n", size, *dev->dma_mask);
242 return ~0;
243 }
244
245 /*
246 * Figure out if we need to bounce from the DMA mask.
247 */
248 needs_bounce = (dma_addr | (dma_addr + size - 1)) & ~mask;
249 }
250
251 if (device_info && (needs_bounce || dma_needs_bounce(dev, dma_addr, size))) {
252 struct safe_buffer *buf;
253
254 buf = alloc_safe_buffer(device_info, ptr, size, dir);
255 if (buf == 0) {
256 dev_err(dev, "%s: unable to map unsafe buffer %p!\n",
257 __func__, ptr);
258 return 0;
259 }
260
261 dev_dbg(dev,
98ed7d4b
RK
262 "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
263 __func__, buf->ptr, virt_to_dma(dev, buf->ptr),
264 buf->safe, buf->safe_dma_addr);
1da177e4
LT
265
266 if ((dir == DMA_TO_DEVICE) ||
267 (dir == DMA_BIDIRECTIONAL)) {
268 dev_dbg(dev, "%s: copy unsafe %p to safe %p, size %d\n",
269 __func__, ptr, buf->safe, size);
270 memcpy(buf->safe, ptr, size);
271 }
cb7610d0 272 ptr = buf->safe;
1da177e4
LT
273
274 dma_addr = buf->safe_dma_addr;
7f8e3354
RK
275 } else {
276 /*
277 * We don't need to sync the DMA buffer since
278 * it was allocated via the coherent allocators.
279 */
84aa462e 280 dma_cache_maint(ptr, size, dir);
1da177e4
LT
281 }
282
283 return dma_addr;
284}
285
3216a97b
RK
286static inline void unmap_single(struct device *dev, dma_addr_t dma_addr,
287 size_t size, enum dma_data_direction dir)
1da177e4 288{
125ab12a 289 struct safe_buffer *buf = find_safe_buffer_dev(dev, dma_addr, "unmap");
1da177e4
LT
290
291 if (buf) {
292 BUG_ON(buf->size != size);
0e18b5d7 293 BUG_ON(buf->direction != dir);
1da177e4
LT
294
295 dev_dbg(dev,
98ed7d4b
RK
296 "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
297 __func__, buf->ptr, virt_to_dma(dev, buf->ptr),
298 buf->safe, buf->safe_dma_addr);
1da177e4 299
125ab12a 300 DO_STATS(dev->archdata.dmabounce->bounce_count++);
1da177e4 301
5abc100e 302 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) {
7ae5a761 303 void *ptr = buf->ptr;
5abc100e 304
1da177e4
LT
305 dev_dbg(dev,
306 "%s: copy back safe %p to unsafe %p size %d\n",
7ae5a761
RK
307 __func__, buf->safe, ptr, size);
308 memcpy(ptr, buf->safe, size);
5abc100e
RK
309
310 /*
f74f7e57
RK
311 * Since we may have written to a page cache page,
312 * we need to ensure that the data will be coherent
313 * with user mappings.
5abc100e 314 */
d9fd3ab8 315 __cpuc_flush_dcache_area(ptr, size);
1da177e4 316 }
125ab12a 317 free_safe_buffer(dev->archdata.dmabounce, buf);
1da177e4
LT
318 }
319}
320
321/* ************************************************** */
322
323/*
324 * see if a buffer address is in an 'unsafe' range. if it is
325 * allocate a 'safe' buffer and copy the unsafe buffer into it.
326 * substitute the safe buffer for the unsafe one.
327 * (basically move the buffer from an unsafe area to a safe one)
328 */
3216a97b 329dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
1da177e4
LT
330 enum dma_data_direction dir)
331{
1da177e4
LT
332 dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
333 __func__, ptr, size, dir);
334
0e18b5d7 335 BUG_ON(!valid_dma_direction(dir));
1da177e4 336
3216a97b 337 return map_single(dev, ptr, size, dir);
1da177e4 338}
3216a97b 339EXPORT_SYMBOL(dma_map_single);
1da177e4 340
29cb8d0d
RK
341/*
342 * see if a mapped address was really a "safe" buffer and if so, copy
343 * the data from the safe buffer back to the unsafe buffer and free up
344 * the safe buffer. (basically return things back to the way they
345 * should be)
346 */
347void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
348 enum dma_data_direction dir)
349{
350 dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
351 __func__, (void *) dma_addr, size, dir);
352
353 unmap_single(dev, dma_addr, size, dir);
354}
355EXPORT_SYMBOL(dma_unmap_single);
356
56f55f8b 357dma_addr_t dma_map_page(struct device *dev, struct page *page,
3216a97b 358 unsigned long offset, size_t size, enum dma_data_direction dir)
56f55f8b
RK
359{
360 dev_dbg(dev, "%s(page=%p,off=%#lx,size=%zx,dir=%x)\n",
361 __func__, page, offset, size, dir);
362
0e18b5d7 363 BUG_ON(!valid_dma_direction(dir));
56f55f8b 364
58edb515
NP
365 if (PageHighMem(page)) {
366 dev_err(dev, "DMA buffer bouncing of HIGHMEM pages "
367 "is not supported\n");
368 return ~0;
369 }
370
56f55f8b
RK
371 return map_single(dev, page_address(page) + offset, size, dir);
372}
373EXPORT_SYMBOL(dma_map_page);
374
1da177e4
LT
375/*
376 * see if a mapped address was really a "safe" buffer and if so, copy
377 * the data from the safe buffer back to the unsafe buffer and free up
378 * the safe buffer. (basically return things back to the way they
379 * should be)
380 */
29cb8d0d 381void dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
3216a97b 382 enum dma_data_direction dir)
1da177e4 383{
1da177e4
LT
384 dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
385 __func__, (void *) dma_addr, size, dir);
386
1da177e4 387 unmap_single(dev, dma_addr, size, dir);
1da177e4 388}
29cb8d0d 389EXPORT_SYMBOL(dma_unmap_page);
1da177e4 390
2638b4db
RK
391int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
392 unsigned long off, size_t sz, enum dma_data_direction dir)
1da177e4 393{
125ab12a
RK
394 struct safe_buffer *buf;
395
396 dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n",
2638b4db 397 __func__, addr, off, sz, dir);
125ab12a
RK
398
399 buf = find_safe_buffer_dev(dev, addr, __func__);
400 if (!buf)
401 return 1;
402
0e18b5d7
RK
403 BUG_ON(buf->direction != dir);
404
125ab12a
RK
405 dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
406 __func__, buf->ptr, virt_to_dma(dev, buf->ptr),
407 buf->safe, buf->safe_dma_addr);
408
409 DO_STATS(dev->archdata.dmabounce->bounce_count++);
410
411 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) {
412 dev_dbg(dev, "%s: copy back safe %p to unsafe %p size %d\n",
413 __func__, buf->safe + off, buf->ptr + off, sz);
414 memcpy(buf->ptr + off, buf->safe + off, sz);
415 }
416 return 0;
1da177e4 417}
2638b4db 418EXPORT_SYMBOL(dmabounce_sync_for_cpu);
1da177e4 419
2638b4db
RK
420int dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
421 unsigned long off, size_t sz, enum dma_data_direction dir)
1da177e4 422{
125ab12a
RK
423 struct safe_buffer *buf;
424
425 dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n",
2638b4db 426 __func__, addr, off, sz, dir);
125ab12a
RK
427
428 buf = find_safe_buffer_dev(dev, addr, __func__);
429 if (!buf)
430 return 1;
431
0e18b5d7
RK
432 BUG_ON(buf->direction != dir);
433
125ab12a
RK
434 dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
435 __func__, buf->ptr, virt_to_dma(dev, buf->ptr),
436 buf->safe, buf->safe_dma_addr);
437
438 DO_STATS(dev->archdata.dmabounce->bounce_count++);
439
440 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) {
441 dev_dbg(dev, "%s: copy out unsafe %p to safe %p, size %d\n",
442 __func__,buf->ptr + off, buf->safe + off, sz);
443 memcpy(buf->safe + off, buf->ptr + off, sz);
444 }
445 return 0;
1da177e4 446}
2638b4db 447EXPORT_SYMBOL(dmabounce_sync_for_device);
1da177e4 448
3216a97b
RK
449static int dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev,
450 const char *name, unsigned long size)
cb7610d0
RK
451{
452 pool->size = size;
453 DO_STATS(pool->allocs = 0);
454 pool->pool = dma_pool_create(name, dev, size,
455 0 /* byte alignment */,
456 0 /* no page-crossing issues */);
457
458 return pool->pool ? 0 : -ENOMEM;
459}
460
3216a97b
RK
461int dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size,
462 unsigned long large_buffer_size)
1da177e4
LT
463{
464 struct dmabounce_device_info *device_info;
cb7610d0 465 int ret;
1da177e4
LT
466
467 device_info = kmalloc(sizeof(struct dmabounce_device_info), GFP_ATOMIC);
468 if (!device_info) {
fc3a8828
GKH
469 dev_err(dev,
470 "Could not allocated dmabounce_device_info\n");
1da177e4
LT
471 return -ENOMEM;
472 }
473
cb7610d0
RK
474 ret = dmabounce_init_pool(&device_info->small, dev,
475 "small_dmabounce_pool", small_buffer_size);
476 if (ret) {
477 dev_err(dev,
478 "dmabounce: could not allocate DMA pool for %ld byte objects\n",
479 small_buffer_size);
480 goto err_free;
1da177e4
LT
481 }
482
483 if (large_buffer_size) {
cb7610d0
RK
484 ret = dmabounce_init_pool(&device_info->large, dev,
485 "large_dmabounce_pool",
486 large_buffer_size);
487 if (ret) {
488 dev_err(dev,
489 "dmabounce: could not allocate DMA pool for %ld byte objects\n",
490 large_buffer_size);
491 goto err_destroy;
1da177e4
LT
492 }
493 }
494
495 device_info->dev = dev;
1da177e4 496 INIT_LIST_HEAD(&device_info->safe_buffers);
823588c1 497 rwlock_init(&device_info->lock);
1da177e4
LT
498
499#ifdef STATS
1da177e4
LT
500 device_info->total_allocs = 0;
501 device_info->map_op_count = 0;
502 device_info->bounce_count = 0;
017cc022 503 device_info->attr_res = device_create_file(dev, &dev_attr_dmabounce_stats);
1da177e4
LT
504#endif
505
ab2c2152 506 dev->archdata.dmabounce = device_info;
1da177e4 507
fc3a8828 508 dev_info(dev, "dmabounce: registered device\n");
1da177e4
LT
509
510 return 0;
cb7610d0
RK
511
512 err_destroy:
513 dma_pool_destroy(device_info->small.pool);
514 err_free:
515 kfree(device_info);
516 return ret;
1da177e4 517}
3216a97b 518EXPORT_SYMBOL(dmabounce_register_dev);
1da177e4 519
3216a97b 520void dmabounce_unregister_dev(struct device *dev)
1da177e4 521{
ab2c2152
RK
522 struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
523
524 dev->archdata.dmabounce = NULL;
1da177e4
LT
525
526 if (!device_info) {
fc3a8828
GKH
527 dev_warn(dev,
528 "Never registered with dmabounce but attempting"
529 "to unregister!\n");
1da177e4
LT
530 return;
531 }
532
533 if (!list_empty(&device_info->safe_buffers)) {
fc3a8828
GKH
534 dev_err(dev,
535 "Removing from dmabounce with pending buffers!\n");
1da177e4
LT
536 BUG();
537 }
538
cb7610d0
RK
539 if (device_info->small.pool)
540 dma_pool_destroy(device_info->small.pool);
541 if (device_info->large.pool)
542 dma_pool_destroy(device_info->large.pool);
1da177e4
LT
543
544#ifdef STATS
017cc022
RK
545 if (device_info->attr_res == 0)
546 device_remove_file(dev, &dev_attr_dmabounce_stats);
1da177e4
LT
547#endif
548
1da177e4
LT
549 kfree(device_info);
550
fc3a8828 551 dev_info(dev, "dmabounce: device unregistered\n");
1da177e4 552}
1da177e4
LT
553EXPORT_SYMBOL(dmabounce_unregister_dev);
554
555MODULE_AUTHOR("Christopher Hoover <ch@hpl.hp.com>, Deepak Saxena <dsaxena@plexity.net>");
556MODULE_DESCRIPTION("Special dma_{map/unmap/dma_sync}_* routines for systems with limited DMA windows");
557MODULE_LICENSE("GPL");