dma-debug: use pr_* instead of printk(KERN_* ...)
[linux-2.6-block.git] / lib / dma-debug.c
CommitLineData
f2f45e5f
JR
1/*
2 * Copyright (C) 2008 Advanced Micro Devices, Inc.
3 *
4 * Author: Joerg Roedel <joerg.roedel@amd.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
972aa45c 20#include <linux/scatterlist.h>
2d62ece1 21#include <linux/dma-mapping.h>
6c132d1b 22#include <linux/stacktrace.h>
f2f45e5f 23#include <linux/dma-debug.h>
30dfa90c 24#include <linux/spinlock.h>
788dcfa6 25#include <linux/debugfs.h>
8a6fc708 26#include <linux/uaccess.h>
2d62ece1 27#include <linux/device.h>
f2f45e5f 28#include <linux/types.h>
2d62ece1 29#include <linux/sched.h>
8a6fc708 30#include <linux/ctype.h>
f2f45e5f 31#include <linux/list.h>
6bf07871 32#include <linux/slab.h>
f2f45e5f 33
2e34bde1
JR
34#include <asm/sections.h>
35
30dfa90c
JR
36#define HASH_SIZE 1024ULL
37#define HASH_FN_SHIFT 13
38#define HASH_FN_MASK (HASH_SIZE - 1)
39
f2f45e5f
JR
40enum {
41 dma_debug_single,
42 dma_debug_page,
43 dma_debug_sg,
44 dma_debug_coherent,
45};
46
6c132d1b
DW
47#define DMA_DEBUG_STACKTRACE_ENTRIES 5
48
f2f45e5f
JR
49struct dma_debug_entry {
50 struct list_head list;
51 struct device *dev;
52 int type;
53 phys_addr_t paddr;
54 u64 dev_addr;
55 u64 size;
56 int direction;
57 int sg_call_ents;
58 int sg_mapped_ents;
6c132d1b
DW
59#ifdef CONFIG_STACKTRACE
60 struct stack_trace stacktrace;
61 unsigned long st_entries[DMA_DEBUG_STACKTRACE_ENTRIES];
62#endif
f2f45e5f
JR
63};
64
30dfa90c
JR
65struct hash_bucket {
66 struct list_head list;
67 spinlock_t lock;
2d62ece1 68} ____cacheline_aligned_in_smp;
30dfa90c
JR
69
70/* Hash list to save the allocated dma addresses */
71static struct hash_bucket dma_entry_hash[HASH_SIZE];
3b1e79ed
JR
72/* List of pre-allocated dma_debug_entry's */
73static LIST_HEAD(free_entries);
74/* Lock for the list above */
75static DEFINE_SPINLOCK(free_entries_lock);
76
77/* Global disable flag - will be set in case of an error */
78static bool global_disable __read_mostly;
79
788dcfa6
JR
80/* Global error count */
81static u32 error_count;
82
83/* Global error show enable*/
84static u32 show_all_errors __read_mostly;
85/* Number of errors to show */
86static u32 show_num_errors = 1;
87
3b1e79ed
JR
88static u32 num_free_entries;
89static u32 min_free_entries;
e6a1a89d 90static u32 nr_total_entries;
30dfa90c 91
59d3daaf
JR
92/* number of preallocated entries requested by kernel cmdline */
93static u32 req_entries;
94
788dcfa6
JR
95/* debugfs dentry's for the stuff above */
96static struct dentry *dma_debug_dent __read_mostly;
97static struct dentry *global_disable_dent __read_mostly;
98static struct dentry *error_count_dent __read_mostly;
99static struct dentry *show_all_errors_dent __read_mostly;
100static struct dentry *show_num_errors_dent __read_mostly;
101static struct dentry *num_free_entries_dent __read_mostly;
102static struct dentry *min_free_entries_dent __read_mostly;
8a6fc708 103static struct dentry *filter_dent __read_mostly;
788dcfa6 104
2e507d84
JR
105/* per-driver filter related state */
106
107#define NAME_MAX_LEN 64
108
109static char current_driver_name[NAME_MAX_LEN] __read_mostly;
110static struct device_driver *current_driver __read_mostly;
111
112static DEFINE_RWLOCK(driver_name_lock);
788dcfa6 113
2d62ece1
JR
114static const char *type2name[4] = { "single", "page",
115 "scather-gather", "coherent" };
116
117static const char *dir2name[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE",
118 "DMA_FROM_DEVICE", "DMA_NONE" };
119
ed888aef
JR
120/* little merge helper - remove it after the merge window */
121#ifndef BUS_NOTIFY_UNBOUND_DRIVER
122#define BUS_NOTIFY_UNBOUND_DRIVER 0x0005
123#endif
124
2d62ece1
JR
125/*
126 * The access to some variables in this macro is racy. We can't use atomic_t
127 * here because all these variables are exported to debugfs. Some of them even
128 * writeable. This is also the reason why a lock won't help much. But anyway,
129 * the races are no big deal. Here is why:
130 *
131 * error_count: the addition is racy, but the worst thing that can happen is
132 * that we don't count some errors
133 * show_num_errors: the subtraction is racy. Also no big deal because in
134 * worst case this will result in one warning more in the
135 * system log than the user configured. This variable is
136 * writeable via debugfs.
137 */
6c132d1b
DW
138static inline void dump_entry_trace(struct dma_debug_entry *entry)
139{
140#ifdef CONFIG_STACKTRACE
141 if (entry) {
e7ed70ee 142 pr_warning("Mapped at:\n");
6c132d1b
DW
143 print_stack_trace(&entry->stacktrace, 0);
144 }
145#endif
146}
147
2e507d84
JR
148static bool driver_filter(struct device *dev)
149{
150 /* driver filter off */
151 if (likely(!current_driver_name[0]))
152 return true;
153
154 /* driver filter on and initialized */
155 if (current_driver && dev->driver == current_driver)
156 return true;
157
158 /* driver filter on but not yet initialized */
159 if (!current_driver && current_driver_name[0]) {
160 struct device_driver *drv = get_driver(dev->driver);
161 unsigned long flags;
162 bool ret = false;
163
164 if (!drv)
165 return false;
166
167 /* lock to protect against change of current_driver_name */
168 read_lock_irqsave(&driver_name_lock, flags);
169
170 if (drv->name &&
8a6fc708
JR
171 strncmp(current_driver_name, drv->name,
172 NAME_MAX_LEN-1) == 0) {
2e507d84
JR
173 current_driver = drv;
174 ret = true;
175 }
176
177 read_unlock_irqrestore(&driver_name_lock, flags);
178 put_driver(drv);
179
180 return ret;
181 }
182
183 return false;
184}
185
6c132d1b 186#define err_printk(dev, entry, format, arg...) do { \
2d62ece1 187 error_count += 1; \
2e507d84
JR
188 if (driver_filter(dev) && \
189 (show_all_errors || show_num_errors > 0)) { \
2d62ece1
JR
190 WARN(1, "%s %s: " format, \
191 dev_driver_string(dev), \
192 dev_name(dev) , ## arg); \
6c132d1b 193 dump_entry_trace(entry); \
2d62ece1
JR
194 } \
195 if (!show_all_errors && show_num_errors > 0) \
196 show_num_errors -= 1; \
197 } while (0);
198
30dfa90c
JR
199/*
200 * Hash related functions
201 *
202 * Every DMA-API request is saved into a struct dma_debug_entry. To
203 * have quick access to these structs they are stored into a hash.
204 */
205static int hash_fn(struct dma_debug_entry *entry)
206{
207 /*
208 * Hash function is based on the dma address.
209 * We use bits 20-27 here as the index into the hash
210 */
211 return (entry->dev_addr >> HASH_FN_SHIFT) & HASH_FN_MASK;
212}
213
214/*
215 * Request exclusive access to a hash bucket for a given dma_debug_entry.
216 */
217static struct hash_bucket *get_hash_bucket(struct dma_debug_entry *entry,
218 unsigned long *flags)
219{
220 int idx = hash_fn(entry);
221 unsigned long __flags;
222
223 spin_lock_irqsave(&dma_entry_hash[idx].lock, __flags);
224 *flags = __flags;
225 return &dma_entry_hash[idx];
226}
227
228/*
229 * Give up exclusive access to the hash bucket
230 */
231static void put_hash_bucket(struct hash_bucket *bucket,
232 unsigned long *flags)
233{
234 unsigned long __flags = *flags;
235
236 spin_unlock_irqrestore(&bucket->lock, __flags);
237}
238
239/*
240 * Search a given entry in the hash bucket list
241 */
242static struct dma_debug_entry *hash_bucket_find(struct hash_bucket *bucket,
243 struct dma_debug_entry *ref)
244{
7caf6a49
JR
245 struct dma_debug_entry *entry, *ret = NULL;
246 int matches = 0, match_lvl, last_lvl = 0;
30dfa90c
JR
247
248 list_for_each_entry(entry, &bucket->list, list) {
7caf6a49
JR
249 if ((entry->dev_addr != ref->dev_addr) ||
250 (entry->dev != ref->dev))
251 continue;
252
253 /*
254 * Some drivers map the same physical address multiple
255 * times. Without a hardware IOMMU this results in the
256 * same device addresses being put into the dma-debug
257 * hash multiple times too. This can result in false
258 * positives being reported. Therfore we implement a
259 * best-fit algorithm here which returns the entry from
260 * the hash which fits best to the reference value
261 * instead of the first-fit.
262 */
263 matches += 1;
264 match_lvl = 0;
265 entry->size == ref->size ? ++match_lvl : match_lvl;
266 entry->type == ref->type ? ++match_lvl : match_lvl;
267 entry->direction == ref->direction ? ++match_lvl : match_lvl;
268
269 if (match_lvl == 3) {
270 /* perfect-fit - return the result */
30dfa90c 271 return entry;
7caf6a49
JR
272 } else if (match_lvl > last_lvl) {
273 /*
274 * We found an entry that fits better then the
275 * previous one
276 */
277 last_lvl = match_lvl;
278 ret = entry;
279 }
30dfa90c
JR
280 }
281
7caf6a49
JR
282 /*
283 * If we have multiple matches but no perfect-fit, just return
284 * NULL.
285 */
286 ret = (matches == 1) ? ret : NULL;
287
288 return ret;
30dfa90c
JR
289}
290
291/*
292 * Add an entry to a hash bucket
293 */
294static void hash_bucket_add(struct hash_bucket *bucket,
295 struct dma_debug_entry *entry)
296{
297 list_add_tail(&entry->list, &bucket->list);
298}
299
300/*
301 * Remove entry from a hash bucket list
302 */
303static void hash_bucket_del(struct dma_debug_entry *entry)
304{
305 list_del(&entry->list);
306}
307
ac26c18b
DW
308/*
309 * Dump mapping entries for debugging purposes
310 */
311void debug_dma_dump_mappings(struct device *dev)
312{
313 int idx;
314
315 for (idx = 0; idx < HASH_SIZE; idx++) {
316 struct hash_bucket *bucket = &dma_entry_hash[idx];
317 struct dma_debug_entry *entry;
318 unsigned long flags;
319
320 spin_lock_irqsave(&bucket->lock, flags);
321
322 list_for_each_entry(entry, &bucket->list, list) {
323 if (!dev || dev == entry->dev) {
324 dev_info(entry->dev,
325 "%s idx %d P=%Lx D=%Lx L=%Lx %s\n",
326 type2name[entry->type], idx,
327 (unsigned long long)entry->paddr,
328 entry->dev_addr, entry->size,
329 dir2name[entry->direction]);
330 }
331 }
332
333 spin_unlock_irqrestore(&bucket->lock, flags);
334 }
335}
336EXPORT_SYMBOL(debug_dma_dump_mappings);
337
30dfa90c
JR
338/*
339 * Wrapper function for adding an entry to the hash.
340 * This function takes care of locking itself.
341 */
342static void add_dma_entry(struct dma_debug_entry *entry)
343{
344 struct hash_bucket *bucket;
345 unsigned long flags;
346
347 bucket = get_hash_bucket(entry, &flags);
348 hash_bucket_add(bucket, entry);
349 put_hash_bucket(bucket, &flags);
350}
351
e6a1a89d
FT
352static struct dma_debug_entry *__dma_entry_alloc(void)
353{
354 struct dma_debug_entry *entry;
355
356 entry = list_entry(free_entries.next, struct dma_debug_entry, list);
357 list_del(&entry->list);
358 memset(entry, 0, sizeof(*entry));
359
360 num_free_entries -= 1;
361 if (num_free_entries < min_free_entries)
362 min_free_entries = num_free_entries;
363
364 return entry;
365}
366
3b1e79ed
JR
367/* struct dma_entry allocator
368 *
369 * The next two functions implement the allocator for
370 * struct dma_debug_entries.
371 */
372static struct dma_debug_entry *dma_entry_alloc(void)
373{
374 struct dma_debug_entry *entry = NULL;
375 unsigned long flags;
376
377 spin_lock_irqsave(&free_entries_lock, flags);
378
379 if (list_empty(&free_entries)) {
e7ed70ee 380 pr_err("DMA-API: debugging out of memory - disabling\n");
3b1e79ed
JR
381 global_disable = true;
382 goto out;
383 }
384
e6a1a89d 385 entry = __dma_entry_alloc();
3b1e79ed 386
6c132d1b
DW
387#ifdef CONFIG_STACKTRACE
388 entry->stacktrace.max_entries = DMA_DEBUG_STACKTRACE_ENTRIES;
389 entry->stacktrace.entries = entry->st_entries;
390 entry->stacktrace.skip = 2;
391 save_stack_trace(&entry->stacktrace);
392#endif
3b1e79ed
JR
393
394out:
395 spin_unlock_irqrestore(&free_entries_lock, flags);
396
397 return entry;
398}
399
400static void dma_entry_free(struct dma_debug_entry *entry)
401{
402 unsigned long flags;
403
404 /*
405 * add to beginning of the list - this way the entries are
406 * more likely cache hot when they are reallocated.
407 */
408 spin_lock_irqsave(&free_entries_lock, flags);
409 list_add(&entry->list, &free_entries);
410 num_free_entries += 1;
411 spin_unlock_irqrestore(&free_entries_lock, flags);
412}
413
e6a1a89d
FT
414int dma_debug_resize_entries(u32 num_entries)
415{
416 int i, delta, ret = 0;
417 unsigned long flags;
418 struct dma_debug_entry *entry;
419 LIST_HEAD(tmp);
420
421 spin_lock_irqsave(&free_entries_lock, flags);
422
423 if (nr_total_entries < num_entries) {
424 delta = num_entries - nr_total_entries;
425
426 spin_unlock_irqrestore(&free_entries_lock, flags);
427
428 for (i = 0; i < delta; i++) {
429 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
430 if (!entry)
431 break;
432
433 list_add_tail(&entry->list, &tmp);
434 }
435
436 spin_lock_irqsave(&free_entries_lock, flags);
437
438 list_splice(&tmp, &free_entries);
439 nr_total_entries += i;
440 num_free_entries += i;
441 } else {
442 delta = nr_total_entries - num_entries;
443
444 for (i = 0; i < delta && !list_empty(&free_entries); i++) {
445 entry = __dma_entry_alloc();
446 kfree(entry);
447 }
448
449 nr_total_entries -= i;
450 }
451
452 if (nr_total_entries != num_entries)
453 ret = 1;
454
455 spin_unlock_irqrestore(&free_entries_lock, flags);
456
457 return ret;
458}
459EXPORT_SYMBOL(dma_debug_resize_entries);
460
6bf07871
JR
461/*
462 * DMA-API debugging init code
463 *
464 * The init code does two things:
465 * 1. Initialize core data structures
466 * 2. Preallocate a given number of dma_debug_entry structs
467 */
468
469static int prealloc_memory(u32 num_entries)
470{
471 struct dma_debug_entry *entry, *next_entry;
472 int i;
473
474 for (i = 0; i < num_entries; ++i) {
475 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
476 if (!entry)
477 goto out_err;
478
479 list_add_tail(&entry->list, &free_entries);
480 }
481
482 num_free_entries = num_entries;
483 min_free_entries = num_entries;
484
e7ed70ee 485 pr_info("DMA-API: preallocated %d debug entries\n", num_entries);
6bf07871
JR
486
487 return 0;
488
489out_err:
490
491 list_for_each_entry_safe(entry, next_entry, &free_entries, list) {
492 list_del(&entry->list);
493 kfree(entry);
494 }
495
496 return -ENOMEM;
497}
498
8a6fc708
JR
499static ssize_t filter_read(struct file *file, char __user *user_buf,
500 size_t count, loff_t *ppos)
501{
8a6fc708 502 char buf[NAME_MAX_LEN + 1];
c17e2cf7 503 unsigned long flags;
8a6fc708
JR
504 int len;
505
506 if (!current_driver_name[0])
507 return 0;
508
509 /*
510 * We can't copy to userspace directly because current_driver_name can
511 * only be read under the driver_name_lock with irqs disabled. So
512 * create a temporary copy first.
513 */
514 read_lock_irqsave(&driver_name_lock, flags);
515 len = scnprintf(buf, NAME_MAX_LEN + 1, "%s\n", current_driver_name);
516 read_unlock_irqrestore(&driver_name_lock, flags);
517
518 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
519}
520
521static ssize_t filter_write(struct file *file, const char __user *userbuf,
522 size_t count, loff_t *ppos)
523{
8a6fc708 524 char buf[NAME_MAX_LEN];
c17e2cf7
JR
525 unsigned long flags;
526 size_t len;
8a6fc708
JR
527 int i;
528
529 /*
530 * We can't copy from userspace directly. Access to
531 * current_driver_name is protected with a write_lock with irqs
532 * disabled. Since copy_from_user can fault and may sleep we
533 * need to copy to temporary buffer first
534 */
e7ed70ee 535 len = min(count, (size_t)(NAME_MAX_LEN - 1));
8a6fc708
JR
536 if (copy_from_user(buf, userbuf, len))
537 return -EFAULT;
538
539 buf[len] = 0;
540
541 write_lock_irqsave(&driver_name_lock, flags);
542
31232509
JR
543 /*
544 * Now handle the string we got from userspace very carefully.
8a6fc708
JR
545 * The rules are:
546 * - only use the first token we got
547 * - token delimiter is everything looking like a space
548 * character (' ', '\n', '\t' ...)
549 *
550 */
551 if (!isalnum(buf[0])) {
552 /*
31232509 553 * If the first character userspace gave us is not
8a6fc708
JR
554 * alphanumerical then assume the filter should be
555 * switched off.
556 */
557 if (current_driver_name[0])
e7ed70ee 558 pr_info("DMA-API: switching off dma-debug driver filter\n");
8a6fc708
JR
559 current_driver_name[0] = 0;
560 current_driver = NULL;
561 goto out_unlock;
562 }
563
564 /*
565 * Now parse out the first token and use it as the name for the
566 * driver to filter for.
567 */
568 for (i = 0; i < NAME_MAX_LEN; ++i) {
569 current_driver_name[i] = buf[i];
570 if (isspace(buf[i]) || buf[i] == ' ' || buf[i] == 0)
571 break;
572 }
573 current_driver_name[i] = 0;
574 current_driver = NULL;
575
e7ed70ee
JR
576 pr_info("DMA-API: enable driver filter for driver [%s]\n",
577 current_driver_name);
8a6fc708
JR
578
579out_unlock:
580 write_unlock_irqrestore(&driver_name_lock, flags);
581
582 return count;
583}
584
585const struct file_operations filter_fops = {
586 .read = filter_read,
587 .write = filter_write,
588};
589
788dcfa6
JR
590static int dma_debug_fs_init(void)
591{
592 dma_debug_dent = debugfs_create_dir("dma-api", NULL);
593 if (!dma_debug_dent) {
e7ed70ee 594 pr_err("DMA-API: can not create debugfs directory\n");
788dcfa6
JR
595 return -ENOMEM;
596 }
597
598 global_disable_dent = debugfs_create_bool("disabled", 0444,
599 dma_debug_dent,
600 (u32 *)&global_disable);
601 if (!global_disable_dent)
602 goto out_err;
603
604 error_count_dent = debugfs_create_u32("error_count", 0444,
605 dma_debug_dent, &error_count);
606 if (!error_count_dent)
607 goto out_err;
608
609 show_all_errors_dent = debugfs_create_u32("all_errors", 0644,
610 dma_debug_dent,
611 &show_all_errors);
612 if (!show_all_errors_dent)
613 goto out_err;
614
615 show_num_errors_dent = debugfs_create_u32("num_errors", 0644,
616 dma_debug_dent,
617 &show_num_errors);
618 if (!show_num_errors_dent)
619 goto out_err;
620
621 num_free_entries_dent = debugfs_create_u32("num_free_entries", 0444,
622 dma_debug_dent,
623 &num_free_entries);
624 if (!num_free_entries_dent)
625 goto out_err;
626
627 min_free_entries_dent = debugfs_create_u32("min_free_entries", 0444,
628 dma_debug_dent,
629 &min_free_entries);
630 if (!min_free_entries_dent)
631 goto out_err;
632
8a6fc708
JR
633 filter_dent = debugfs_create_file("driver_filter", 0644,
634 dma_debug_dent, NULL, &filter_fops);
635 if (!filter_dent)
636 goto out_err;
637
788dcfa6
JR
638 return 0;
639
640out_err:
641 debugfs_remove_recursive(dma_debug_dent);
642
643 return -ENOMEM;
644}
645
ed888aef
JR
646static int device_dma_allocations(struct device *dev)
647{
648 struct dma_debug_entry *entry;
649 unsigned long flags;
650 int count = 0, i;
651
652 for (i = 0; i < HASH_SIZE; ++i) {
653 spin_lock_irqsave(&dma_entry_hash[i].lock, flags);
654 list_for_each_entry(entry, &dma_entry_hash[i].list, list) {
655 if (entry->dev == dev)
656 count += 1;
657 }
658 spin_unlock_irqrestore(&dma_entry_hash[i].lock, flags);
659 }
660
661 return count;
662}
663
664static int dma_debug_device_change(struct notifier_block *nb,
665 unsigned long action, void *data)
666{
667 struct device *dev = data;
668 int count;
669
670
671 switch (action) {
672 case BUS_NOTIFY_UNBOUND_DRIVER:
673 count = device_dma_allocations(dev);
674 if (count == 0)
675 break;
676 err_printk(dev, NULL, "DMA-API: device driver has pending "
677 "DMA allocations while released from device "
678 "[count=%d]\n", count);
679 break;
680 default:
681 break;
682 }
683
684 return 0;
685}
686
41531c8f
JR
687void dma_debug_add_bus(struct bus_type *bus)
688{
ed888aef
JR
689 struct notifier_block *nb;
690
691 nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
692 if (nb == NULL) {
e7ed70ee 693 pr_err("dma_debug_add_bus: out of memory\n");
ed888aef
JR
694 return;
695 }
696
697 nb->notifier_call = dma_debug_device_change;
698
699 bus_register_notifier(bus, nb);
41531c8f 700}
788dcfa6 701
6bf07871
JR
702/*
703 * Let the architectures decide how many entries should be preallocated.
704 */
705void dma_debug_init(u32 num_entries)
706{
707 int i;
708
709 if (global_disable)
710 return;
711
712 for (i = 0; i < HASH_SIZE; ++i) {
713 INIT_LIST_HEAD(&dma_entry_hash[i].list);
714 dma_entry_hash[i].lock = SPIN_LOCK_UNLOCKED;
715 }
716
788dcfa6 717 if (dma_debug_fs_init() != 0) {
e7ed70ee 718 pr_err("DMA-API: error creating debugfs entries - disabling\n");
788dcfa6
JR
719 global_disable = true;
720
721 return;
722 }
723
59d3daaf
JR
724 if (req_entries)
725 num_entries = req_entries;
726
6bf07871 727 if (prealloc_memory(num_entries) != 0) {
e7ed70ee 728 pr_err("DMA-API: debugging out of memory error - disabled\n");
6bf07871
JR
729 global_disable = true;
730
731 return;
732 }
733
e6a1a89d
FT
734 nr_total_entries = num_free_entries;
735
e7ed70ee 736 pr_info("DMA-API: debugging enabled by kernel config\n");
6bf07871
JR
737}
738
59d3daaf
JR
739static __init int dma_debug_cmdline(char *str)
740{
741 if (!str)
742 return -EINVAL;
743
744 if (strncmp(str, "off", 3) == 0) {
e7ed70ee 745 pr_info("DMA-API: debugging disabled on kernel command line\n");
59d3daaf
JR
746 global_disable = true;
747 }
748
749 return 0;
750}
751
752static __init int dma_debug_entries_cmdline(char *str)
753{
754 int res;
755
756 if (!str)
757 return -EINVAL;
758
759 res = get_option(&str, &req_entries);
760
761 if (!res)
762 req_entries = 0;
763
764 return 0;
765}
766
767__setup("dma_debug=", dma_debug_cmdline);
768__setup("dma_debug_entries=", dma_debug_entries_cmdline);
769
2d62ece1
JR
770static void check_unmap(struct dma_debug_entry *ref)
771{
772 struct dma_debug_entry *entry;
773 struct hash_bucket *bucket;
774 unsigned long flags;
775
35d40952
FT
776 if (dma_mapping_error(ref->dev, ref->dev_addr)) {
777 err_printk(ref->dev, NULL, "DMA-API: device driver tries "
778 "to free an invalid DMA memory address\n");
2d62ece1 779 return;
35d40952 780 }
2d62ece1
JR
781
782 bucket = get_hash_bucket(ref, &flags);
783 entry = hash_bucket_find(bucket, ref);
784
785 if (!entry) {
6c132d1b 786 err_printk(ref->dev, NULL, "DMA-API: device driver tries "
2d62ece1
JR
787 "to free DMA memory it has not allocated "
788 "[device address=0x%016llx] [size=%llu bytes]\n",
789 ref->dev_addr, ref->size);
790 goto out;
791 }
792
793 if (ref->size != entry->size) {
6c132d1b 794 err_printk(ref->dev, entry, "DMA-API: device driver frees "
2d62ece1
JR
795 "DMA memory with different size "
796 "[device address=0x%016llx] [map size=%llu bytes] "
797 "[unmap size=%llu bytes]\n",
798 ref->dev_addr, entry->size, ref->size);
799 }
800
801 if (ref->type != entry->type) {
6c132d1b 802 err_printk(ref->dev, entry, "DMA-API: device driver frees "
2d62ece1
JR
803 "DMA memory with wrong function "
804 "[device address=0x%016llx] [size=%llu bytes] "
805 "[mapped as %s] [unmapped as %s]\n",
806 ref->dev_addr, ref->size,
807 type2name[entry->type], type2name[ref->type]);
808 } else if ((entry->type == dma_debug_coherent) &&
809 (ref->paddr != entry->paddr)) {
6c132d1b 810 err_printk(ref->dev, entry, "DMA-API: device driver frees "
2d62ece1
JR
811 "DMA memory with different CPU address "
812 "[device address=0x%016llx] [size=%llu bytes] "
813 "[cpu alloc address=%p] [cpu free address=%p]",
814 ref->dev_addr, ref->size,
815 (void *)entry->paddr, (void *)ref->paddr);
816 }
817
818 if (ref->sg_call_ents && ref->type == dma_debug_sg &&
819 ref->sg_call_ents != entry->sg_call_ents) {
6c132d1b 820 err_printk(ref->dev, entry, "DMA-API: device driver frees "
2d62ece1
JR
821 "DMA sg list with different entry count "
822 "[map count=%d] [unmap count=%d]\n",
823 entry->sg_call_ents, ref->sg_call_ents);
824 }
825
826 /*
827 * This may be no bug in reality - but most implementations of the
828 * DMA API don't handle this properly, so check for it here
829 */
830 if (ref->direction != entry->direction) {
6c132d1b 831 err_printk(ref->dev, entry, "DMA-API: device driver frees "
2d62ece1
JR
832 "DMA memory with different direction "
833 "[device address=0x%016llx] [size=%llu bytes] "
834 "[mapped with %s] [unmapped with %s]\n",
835 ref->dev_addr, ref->size,
836 dir2name[entry->direction],
837 dir2name[ref->direction]);
838 }
839
840 hash_bucket_del(entry);
841 dma_entry_free(entry);
842
843out:
844 put_hash_bucket(bucket, &flags);
845}
846
847static void check_for_stack(struct device *dev, void *addr)
848{
849 if (object_is_on_stack(addr))
6c132d1b
DW
850 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
851 "stack [addr=%p]\n", addr);
2d62ece1
JR
852}
853
2e34bde1
JR
854static inline bool overlap(void *addr, u64 size, void *start, void *end)
855{
856 void *addr2 = (char *)addr + size;
857
858 return ((addr >= start && addr < end) ||
859 (addr2 >= start && addr2 < end) ||
860 ((addr < start) && (addr2 >= end)));
861}
862
863static void check_for_illegal_area(struct device *dev, void *addr, u64 size)
864{
865 if (overlap(addr, size, _text, _etext) ||
866 overlap(addr, size, __start_rodata, __end_rodata))
867 err_printk(dev, NULL, "DMA-API: device driver maps "
868 "memory from kernel text or rodata "
869 "[addr=%p] [size=%llu]\n", addr, size);
870}
871
2d62ece1
JR
872static void check_sync(struct device *dev, dma_addr_t addr,
873 u64 size, u64 offset, int direction, bool to_cpu)
874{
875 struct dma_debug_entry ref = {
876 .dev = dev,
877 .dev_addr = addr,
878 .size = size,
879 .direction = direction,
880 };
881 struct dma_debug_entry *entry;
882 struct hash_bucket *bucket;
883 unsigned long flags;
884
885 bucket = get_hash_bucket(&ref, &flags);
886
887 entry = hash_bucket_find(bucket, &ref);
888
889 if (!entry) {
6c132d1b 890 err_printk(dev, NULL, "DMA-API: device driver tries "
2d62ece1
JR
891 "to sync DMA memory it has not allocated "
892 "[device address=0x%016llx] [size=%llu bytes]\n",
93c36ed8 893 (unsigned long long)addr, size);
2d62ece1
JR
894 goto out;
895 }
896
897 if ((offset + size) > entry->size) {
6c132d1b 898 err_printk(dev, entry, "DMA-API: device driver syncs"
2d62ece1
JR
899 " DMA memory outside allocated range "
900 "[device address=0x%016llx] "
901 "[allocation size=%llu bytes] [sync offset=%llu] "
902 "[sync size=%llu]\n", entry->dev_addr, entry->size,
903 offset, size);
904 }
905
906 if (direction != entry->direction) {
6c132d1b 907 err_printk(dev, entry, "DMA-API: device driver syncs "
2d62ece1
JR
908 "DMA memory with different direction "
909 "[device address=0x%016llx] [size=%llu bytes] "
910 "[mapped with %s] [synced with %s]\n",
93c36ed8 911 (unsigned long long)addr, entry->size,
2d62ece1
JR
912 dir2name[entry->direction],
913 dir2name[direction]);
914 }
915
916 if (entry->direction == DMA_BIDIRECTIONAL)
917 goto out;
918
919 if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) &&
920 !(direction == DMA_TO_DEVICE))
6c132d1b 921 err_printk(dev, entry, "DMA-API: device driver syncs "
2d62ece1
JR
922 "device read-only DMA memory for cpu "
923 "[device address=0x%016llx] [size=%llu bytes] "
924 "[mapped with %s] [synced with %s]\n",
93c36ed8 925 (unsigned long long)addr, entry->size,
2d62ece1
JR
926 dir2name[entry->direction],
927 dir2name[direction]);
928
929 if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) &&
930 !(direction == DMA_FROM_DEVICE))
6c132d1b 931 err_printk(dev, entry, "DMA-API: device driver syncs "
2d62ece1
JR
932 "device write-only DMA memory to device "
933 "[device address=0x%016llx] [size=%llu bytes] "
934 "[mapped with %s] [synced with %s]\n",
93c36ed8 935 (unsigned long long)addr, entry->size,
2d62ece1
JR
936 dir2name[entry->direction],
937 dir2name[direction]);
938
939out:
940 put_hash_bucket(bucket, &flags);
941
942}
943
f62bc980
JR
944void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
945 size_t size, int direction, dma_addr_t dma_addr,
946 bool map_single)
947{
948 struct dma_debug_entry *entry;
949
950 if (unlikely(global_disable))
951 return;
952
953 if (unlikely(dma_mapping_error(dev, dma_addr)))
954 return;
955
956 entry = dma_entry_alloc();
957 if (!entry)
958 return;
959
960 entry->dev = dev;
961 entry->type = dma_debug_page;
962 entry->paddr = page_to_phys(page) + offset;
963 entry->dev_addr = dma_addr;
964 entry->size = size;
965 entry->direction = direction;
966
9537a48e 967 if (map_single)
f62bc980 968 entry->type = dma_debug_single;
9537a48e
JR
969
970 if (!PageHighMem(page)) {
971 void *addr = ((char *)page_address(page)) + offset;
2e34bde1
JR
972 check_for_stack(dev, addr);
973 check_for_illegal_area(dev, addr, size);
f62bc980
JR
974 }
975
976 add_dma_entry(entry);
977}
978EXPORT_SYMBOL(debug_dma_map_page);
979
980void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
981 size_t size, int direction, bool map_single)
982{
983 struct dma_debug_entry ref = {
984 .type = dma_debug_page,
985 .dev = dev,
986 .dev_addr = addr,
987 .size = size,
988 .direction = direction,
989 };
990
991 if (unlikely(global_disable))
992 return;
993
994 if (map_single)
995 ref.type = dma_debug_single;
996
997 check_unmap(&ref);
998}
999EXPORT_SYMBOL(debug_dma_unmap_page);
1000
972aa45c
JR
1001void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
1002 int nents, int mapped_ents, int direction)
1003{
1004 struct dma_debug_entry *entry;
1005 struct scatterlist *s;
1006 int i;
1007
1008 if (unlikely(global_disable))
1009 return;
1010
1011 for_each_sg(sg, s, mapped_ents, i) {
1012 entry = dma_entry_alloc();
1013 if (!entry)
1014 return;
1015
1016 entry->type = dma_debug_sg;
1017 entry->dev = dev;
1018 entry->paddr = sg_phys(s);
884d0597 1019 entry->size = sg_dma_len(s);
15aedea4 1020 entry->dev_addr = sg_dma_address(s);
972aa45c
JR
1021 entry->direction = direction;
1022 entry->sg_call_ents = nents;
1023 entry->sg_mapped_ents = mapped_ents;
1024
9537a48e
JR
1025 if (!PageHighMem(sg_page(s))) {
1026 check_for_stack(dev, sg_virt(s));
884d0597 1027 check_for_illegal_area(dev, sg_virt(s), sg_dma_len(s));
9537a48e 1028 }
972aa45c
JR
1029
1030 add_dma_entry(entry);
1031 }
1032}
1033EXPORT_SYMBOL(debug_dma_map_sg);
1034
88f3907f
FT
1035static int get_nr_mapped_entries(struct device *dev, struct scatterlist *s)
1036{
c17e2cf7 1037 struct dma_debug_entry *entry, ref;
88f3907f
FT
1038 struct hash_bucket *bucket;
1039 unsigned long flags;
c17e2cf7 1040 int mapped_ents;
88f3907f 1041
c17e2cf7 1042 ref.dev = dev;
88f3907f 1043 ref.dev_addr = sg_dma_address(s);
c17e2cf7
JR
1044 ref.size = sg_dma_len(s),
1045
1046 bucket = get_hash_bucket(&ref, &flags);
1047 entry = hash_bucket_find(bucket, &ref);
1048 mapped_ents = 0;
88f3907f 1049
88f3907f
FT
1050 if (entry)
1051 mapped_ents = entry->sg_mapped_ents;
1052 put_hash_bucket(bucket, &flags);
1053
1054 return mapped_ents;
1055}
1056
972aa45c
JR
1057void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
1058 int nelems, int dir)
1059{
972aa45c
JR
1060 struct scatterlist *s;
1061 int mapped_ents = 0, i;
972aa45c
JR
1062
1063 if (unlikely(global_disable))
1064 return;
1065
1066 for_each_sg(sglist, s, nelems, i) {
1067
1068 struct dma_debug_entry ref = {
1069 .type = dma_debug_sg,
1070 .dev = dev,
1071 .paddr = sg_phys(s),
15aedea4 1072 .dev_addr = sg_dma_address(s),
884d0597 1073 .size = sg_dma_len(s),
972aa45c
JR
1074 .direction = dir,
1075 .sg_call_ents = 0,
1076 };
1077
1078 if (mapped_ents && i >= mapped_ents)
1079 break;
1080
88f3907f 1081 if (!i) {
972aa45c 1082 ref.sg_call_ents = nelems;
88f3907f 1083 mapped_ents = get_nr_mapped_entries(dev, s);
972aa45c
JR
1084 }
1085
1086 check_unmap(&ref);
1087 }
1088}
1089EXPORT_SYMBOL(debug_dma_unmap_sg);
1090
6bfd4498
JR
1091void debug_dma_alloc_coherent(struct device *dev, size_t size,
1092 dma_addr_t dma_addr, void *virt)
1093{
1094 struct dma_debug_entry *entry;
1095
1096 if (unlikely(global_disable))
1097 return;
1098
1099 if (unlikely(virt == NULL))
1100 return;
1101
1102 entry = dma_entry_alloc();
1103 if (!entry)
1104 return;
1105
1106 entry->type = dma_debug_coherent;
1107 entry->dev = dev;
1108 entry->paddr = virt_to_phys(virt);
1109 entry->size = size;
1110 entry->dev_addr = dma_addr;
1111 entry->direction = DMA_BIDIRECTIONAL;
1112
1113 add_dma_entry(entry);
1114}
1115EXPORT_SYMBOL(debug_dma_alloc_coherent);
1116
1117void debug_dma_free_coherent(struct device *dev, size_t size,
1118 void *virt, dma_addr_t addr)
1119{
1120 struct dma_debug_entry ref = {
1121 .type = dma_debug_coherent,
1122 .dev = dev,
1123 .paddr = virt_to_phys(virt),
1124 .dev_addr = addr,
1125 .size = size,
1126 .direction = DMA_BIDIRECTIONAL,
1127 };
1128
1129 if (unlikely(global_disable))
1130 return;
1131
1132 check_unmap(&ref);
1133}
1134EXPORT_SYMBOL(debug_dma_free_coherent);
1135
b9d2317e
JR
1136void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
1137 size_t size, int direction)
1138{
1139 if (unlikely(global_disable))
1140 return;
1141
1142 check_sync(dev, dma_handle, size, 0, direction, true);
1143}
1144EXPORT_SYMBOL(debug_dma_sync_single_for_cpu);
1145
1146void debug_dma_sync_single_for_device(struct device *dev,
1147 dma_addr_t dma_handle, size_t size,
1148 int direction)
1149{
1150 if (unlikely(global_disable))
1151 return;
1152
1153 check_sync(dev, dma_handle, size, 0, direction, false);
1154}
1155EXPORT_SYMBOL(debug_dma_sync_single_for_device);
1156
948408ba
JR
1157void debug_dma_sync_single_range_for_cpu(struct device *dev,
1158 dma_addr_t dma_handle,
1159 unsigned long offset, size_t size,
1160 int direction)
1161{
1162 if (unlikely(global_disable))
1163 return;
1164
1165 check_sync(dev, dma_handle, size, offset, direction, true);
1166}
1167EXPORT_SYMBOL(debug_dma_sync_single_range_for_cpu);
1168
1169void debug_dma_sync_single_range_for_device(struct device *dev,
1170 dma_addr_t dma_handle,
1171 unsigned long offset,
1172 size_t size, int direction)
1173{
1174 if (unlikely(global_disable))
1175 return;
1176
1177 check_sync(dev, dma_handle, size, offset, direction, false);
1178}
1179EXPORT_SYMBOL(debug_dma_sync_single_range_for_device);
1180
a31fba5d
JR
1181void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
1182 int nelems, int direction)
1183{
1184 struct scatterlist *s;
88f3907f 1185 int mapped_ents = 0, i;
a31fba5d
JR
1186
1187 if (unlikely(global_disable))
1188 return;
1189
1190 for_each_sg(sg, s, nelems, i) {
88f3907f
FT
1191 if (!i)
1192 mapped_ents = get_nr_mapped_entries(dev, s);
1193
1194 if (i >= mapped_ents)
1195 break;
1196
884d0597 1197 check_sync(dev, sg_dma_address(s), sg_dma_len(s), 0,
15aedea4 1198 direction, true);
a31fba5d
JR
1199 }
1200}
1201EXPORT_SYMBOL(debug_dma_sync_sg_for_cpu);
1202
1203void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
1204 int nelems, int direction)
1205{
1206 struct scatterlist *s;
88f3907f 1207 int mapped_ents = 0, i;
a31fba5d
JR
1208
1209 if (unlikely(global_disable))
1210 return;
1211
1212 for_each_sg(sg, s, nelems, i) {
88f3907f
FT
1213 if (!i)
1214 mapped_ents = get_nr_mapped_entries(dev, s);
1215
1216 if (i >= mapped_ents)
1217 break;
1218
884d0597 1219 check_sync(dev, sg_dma_address(s), sg_dma_len(s), 0,
15aedea4 1220 direction, false);
a31fba5d
JR
1221 }
1222}
1223EXPORT_SYMBOL(debug_dma_sync_sg_for_device);
1224
1745de5e
JR
1225static int __init dma_debug_driver_setup(char *str)
1226{
1227 int i;
1228
1229 for (i = 0; i < NAME_MAX_LEN - 1; ++i, ++str) {
1230 current_driver_name[i] = *str;
1231 if (*str == 0)
1232 break;
1233 }
1234
1235 if (current_driver_name[0])
e7ed70ee
JR
1236 pr_info("DMA-API: enable driver filter for driver [%s]\n",
1237 current_driver_name);
1745de5e
JR
1238
1239
1240 return 1;
1241}
1242__setup("dma_debug_driver=", dma_debug_driver_setup);