2 * Copyright (C) 2008 Advanced Micro Devices, Inc.
4 * Author: Joerg Roedel <joerg.roedel@amd.com>
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include <linux/scatterlist.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/stacktrace.h>
23 #include <linux/dma-debug.h>
24 #include <linux/spinlock.h>
25 #include <linux/debugfs.h>
26 #include <linux/device.h>
27 #include <linux/types.h>
28 #include <linux/sched.h>
29 #include <linux/list.h>
30 #include <linux/slab.h>
32 #include <asm/sections.h>
34 #define HASH_SIZE 1024ULL
35 #define HASH_FN_SHIFT 13
36 #define HASH_FN_MASK (HASH_SIZE - 1)
45 #define DMA_DEBUG_STACKTRACE_ENTRIES 5
47 struct dma_debug_entry {
48 struct list_head list;
57 #ifdef CONFIG_STACKTRACE
58 struct stack_trace stacktrace;
59 unsigned long st_entries[DMA_DEBUG_STACKTRACE_ENTRIES];
64 struct list_head list;
66 } ____cacheline_aligned_in_smp;
68 /* Hash list to save the allocated dma addresses */
69 static struct hash_bucket dma_entry_hash[HASH_SIZE];
70 /* List of pre-allocated dma_debug_entry's */
71 static LIST_HEAD(free_entries);
72 /* Lock for the list above */
73 static DEFINE_SPINLOCK(free_entries_lock);
75 /* Global disable flag - will be set in case of an error */
76 static bool global_disable __read_mostly;
78 /* Global error count */
79 static u32 error_count;
81 /* Global error show enable*/
82 static u32 show_all_errors __read_mostly;
83 /* Number of errors to show */
84 static u32 show_num_errors = 1;
86 static u32 num_free_entries;
87 static u32 min_free_entries;
88 static u32 nr_total_entries;
90 /* number of preallocated entries requested by kernel cmdline */
91 static u32 req_entries;
93 /* debugfs dentry's for the stuff above */
94 static struct dentry *dma_debug_dent __read_mostly;
95 static struct dentry *global_disable_dent __read_mostly;
96 static struct dentry *error_count_dent __read_mostly;
97 static struct dentry *show_all_errors_dent __read_mostly;
98 static struct dentry *show_num_errors_dent __read_mostly;
99 static struct dentry *num_free_entries_dent __read_mostly;
100 static struct dentry *min_free_entries_dent __read_mostly;
102 static const char *type2name[4] = { "single", "page",
103 "scather-gather", "coherent" };
105 static const char *dir2name[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE",
106 "DMA_FROM_DEVICE", "DMA_NONE" };
109 * The access to some variables in this macro is racy. We can't use atomic_t
110 * here because all these variables are exported to debugfs. Some of them even
111 * writeable. This is also the reason why a lock won't help much. But anyway,
112 * the races are no big deal. Here is why:
114 * error_count: the addition is racy, but the worst thing that can happen is
115 * that we don't count some errors
116 * show_num_errors: the subtraction is racy. Also no big deal because in
117 * worst case this will result in one warning more in the
118 * system log than the user configured. This variable is
119 * writeable via debugfs.
121 static inline void dump_entry_trace(struct dma_debug_entry *entry)
123 #ifdef CONFIG_STACKTRACE
125 printk(KERN_WARNING "Mapped at:\n");
126 print_stack_trace(&entry->stacktrace, 0);
131 #define err_printk(dev, entry, format, arg...) do { \
133 if (show_all_errors || show_num_errors > 0) { \
134 WARN(1, "%s %s: " format, \
135 dev_driver_string(dev), \
136 dev_name(dev) , ## arg); \
137 dump_entry_trace(entry); \
139 if (!show_all_errors && show_num_errors > 0) \
140 show_num_errors -= 1; \
144 * Hash related functions
146 * Every DMA-API request is saved into a struct dma_debug_entry. To
147 * have quick access to these structs they are stored into a hash.
149 static int hash_fn(struct dma_debug_entry *entry)
152 * Hash function is based on the dma address.
153 * We use bits 20-27 here as the index into the hash
155 return (entry->dev_addr >> HASH_FN_SHIFT) & HASH_FN_MASK;
159 * Request exclusive access to a hash bucket for a given dma_debug_entry.
161 static struct hash_bucket *get_hash_bucket(struct dma_debug_entry *entry,
162 unsigned long *flags)
164 int idx = hash_fn(entry);
165 unsigned long __flags;
167 spin_lock_irqsave(&dma_entry_hash[idx].lock, __flags);
169 return &dma_entry_hash[idx];
173 * Give up exclusive access to the hash bucket
175 static void put_hash_bucket(struct hash_bucket *bucket,
176 unsigned long *flags)
178 unsigned long __flags = *flags;
180 spin_unlock_irqrestore(&bucket->lock, __flags);
184 * Search a given entry in the hash bucket list
186 static struct dma_debug_entry *hash_bucket_find(struct hash_bucket *bucket,
187 struct dma_debug_entry *ref)
189 struct dma_debug_entry *entry;
191 list_for_each_entry(entry, &bucket->list, list) {
192 if ((entry->dev_addr == ref->dev_addr) &&
193 (entry->dev == ref->dev))
201 * Add an entry to a hash bucket
203 static void hash_bucket_add(struct hash_bucket *bucket,
204 struct dma_debug_entry *entry)
206 list_add_tail(&entry->list, &bucket->list);
210 * Remove entry from a hash bucket list
212 static void hash_bucket_del(struct dma_debug_entry *entry)
214 list_del(&entry->list);
218 * Dump mapping entries for debugging purposes
220 void debug_dma_dump_mappings(struct device *dev)
224 for (idx = 0; idx < HASH_SIZE; idx++) {
225 struct hash_bucket *bucket = &dma_entry_hash[idx];
226 struct dma_debug_entry *entry;
229 spin_lock_irqsave(&bucket->lock, flags);
231 list_for_each_entry(entry, &bucket->list, list) {
232 if (!dev || dev == entry->dev) {
234 "%s idx %d P=%Lx D=%Lx L=%Lx %s\n",
235 type2name[entry->type], idx,
236 (unsigned long long)entry->paddr,
237 entry->dev_addr, entry->size,
238 dir2name[entry->direction]);
242 spin_unlock_irqrestore(&bucket->lock, flags);
245 EXPORT_SYMBOL(debug_dma_dump_mappings);
248 * Wrapper function for adding an entry to the hash.
249 * This function takes care of locking itself.
251 static void add_dma_entry(struct dma_debug_entry *entry)
253 struct hash_bucket *bucket;
256 bucket = get_hash_bucket(entry, &flags);
257 hash_bucket_add(bucket, entry);
258 put_hash_bucket(bucket, &flags);
261 static struct dma_debug_entry *__dma_entry_alloc(void)
263 struct dma_debug_entry *entry;
265 entry = list_entry(free_entries.next, struct dma_debug_entry, list);
266 list_del(&entry->list);
267 memset(entry, 0, sizeof(*entry));
269 num_free_entries -= 1;
270 if (num_free_entries < min_free_entries)
271 min_free_entries = num_free_entries;
276 /* struct dma_entry allocator
278 * The next two functions implement the allocator for
279 * struct dma_debug_entries.
281 static struct dma_debug_entry *dma_entry_alloc(void)
283 struct dma_debug_entry *entry = NULL;
286 spin_lock_irqsave(&free_entries_lock, flags);
288 if (list_empty(&free_entries)) {
289 printk(KERN_ERR "DMA-API: debugging out of memory "
291 global_disable = true;
295 entry = __dma_entry_alloc();
297 #ifdef CONFIG_STACKTRACE
298 entry->stacktrace.max_entries = DMA_DEBUG_STACKTRACE_ENTRIES;
299 entry->stacktrace.entries = entry->st_entries;
300 entry->stacktrace.skip = 2;
301 save_stack_trace(&entry->stacktrace);
305 spin_unlock_irqrestore(&free_entries_lock, flags);
310 static void dma_entry_free(struct dma_debug_entry *entry)
315 * add to beginning of the list - this way the entries are
316 * more likely cache hot when they are reallocated.
318 spin_lock_irqsave(&free_entries_lock, flags);
319 list_add(&entry->list, &free_entries);
320 num_free_entries += 1;
321 spin_unlock_irqrestore(&free_entries_lock, flags);
324 int dma_debug_resize_entries(u32 num_entries)
326 int i, delta, ret = 0;
328 struct dma_debug_entry *entry;
331 spin_lock_irqsave(&free_entries_lock, flags);
333 if (nr_total_entries < num_entries) {
334 delta = num_entries - nr_total_entries;
336 spin_unlock_irqrestore(&free_entries_lock, flags);
338 for (i = 0; i < delta; i++) {
339 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
343 list_add_tail(&entry->list, &tmp);
346 spin_lock_irqsave(&free_entries_lock, flags);
348 list_splice(&tmp, &free_entries);
349 nr_total_entries += i;
350 num_free_entries += i;
352 delta = nr_total_entries - num_entries;
354 for (i = 0; i < delta && !list_empty(&free_entries); i++) {
355 entry = __dma_entry_alloc();
359 nr_total_entries -= i;
362 if (nr_total_entries != num_entries)
365 spin_unlock_irqrestore(&free_entries_lock, flags);
369 EXPORT_SYMBOL(dma_debug_resize_entries);
372 * DMA-API debugging init code
374 * The init code does two things:
375 * 1. Initialize core data structures
376 * 2. Preallocate a given number of dma_debug_entry structs
379 static int prealloc_memory(u32 num_entries)
381 struct dma_debug_entry *entry, *next_entry;
384 for (i = 0; i < num_entries; ++i) {
385 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
389 list_add_tail(&entry->list, &free_entries);
392 num_free_entries = num_entries;
393 min_free_entries = num_entries;
395 printk(KERN_INFO "DMA-API: preallocated %d debug entries\n",
402 list_for_each_entry_safe(entry, next_entry, &free_entries, list) {
403 list_del(&entry->list);
410 static int dma_debug_fs_init(void)
412 dma_debug_dent = debugfs_create_dir("dma-api", NULL);
413 if (!dma_debug_dent) {
414 printk(KERN_ERR "DMA-API: can not create debugfs directory\n");
418 global_disable_dent = debugfs_create_bool("disabled", 0444,
420 (u32 *)&global_disable);
421 if (!global_disable_dent)
424 error_count_dent = debugfs_create_u32("error_count", 0444,
425 dma_debug_dent, &error_count);
426 if (!error_count_dent)
429 show_all_errors_dent = debugfs_create_u32("all_errors", 0644,
432 if (!show_all_errors_dent)
435 show_num_errors_dent = debugfs_create_u32("num_errors", 0644,
438 if (!show_num_errors_dent)
441 num_free_entries_dent = debugfs_create_u32("num_free_entries", 0444,
444 if (!num_free_entries_dent)
447 min_free_entries_dent = debugfs_create_u32("min_free_entries", 0444,
450 if (!min_free_entries_dent)
456 debugfs_remove_recursive(dma_debug_dent);
461 void dma_debug_add_bus(struct bus_type *bus)
463 /* FIXME: register notifier */
467 * Let the architectures decide how many entries should be preallocated.
469 void dma_debug_init(u32 num_entries)
476 for (i = 0; i < HASH_SIZE; ++i) {
477 INIT_LIST_HEAD(&dma_entry_hash[i].list);
478 dma_entry_hash[i].lock = SPIN_LOCK_UNLOCKED;
481 if (dma_debug_fs_init() != 0) {
482 printk(KERN_ERR "DMA-API: error creating debugfs entries "
484 global_disable = true;
490 num_entries = req_entries;
492 if (prealloc_memory(num_entries) != 0) {
493 printk(KERN_ERR "DMA-API: debugging out of memory error "
495 global_disable = true;
500 nr_total_entries = num_free_entries;
502 printk(KERN_INFO "DMA-API: debugging enabled by kernel config\n");
505 static __init int dma_debug_cmdline(char *str)
510 if (strncmp(str, "off", 3) == 0) {
511 printk(KERN_INFO "DMA-API: debugging disabled on kernel "
513 global_disable = true;
519 static __init int dma_debug_entries_cmdline(char *str)
526 res = get_option(&str, &req_entries);
534 __setup("dma_debug=", dma_debug_cmdline);
535 __setup("dma_debug_entries=", dma_debug_entries_cmdline);
537 static void check_unmap(struct dma_debug_entry *ref)
539 struct dma_debug_entry *entry;
540 struct hash_bucket *bucket;
543 if (dma_mapping_error(ref->dev, ref->dev_addr)) {
544 err_printk(ref->dev, NULL, "DMA-API: device driver tries "
545 "to free an invalid DMA memory address\n");
549 bucket = get_hash_bucket(ref, &flags);
550 entry = hash_bucket_find(bucket, ref);
553 err_printk(ref->dev, NULL, "DMA-API: device driver tries "
554 "to free DMA memory it has not allocated "
555 "[device address=0x%016llx] [size=%llu bytes]\n",
556 ref->dev_addr, ref->size);
560 if (ref->size != entry->size) {
561 err_printk(ref->dev, entry, "DMA-API: device driver frees "
562 "DMA memory with different size "
563 "[device address=0x%016llx] [map size=%llu bytes] "
564 "[unmap size=%llu bytes]\n",
565 ref->dev_addr, entry->size, ref->size);
568 if (ref->type != entry->type) {
569 err_printk(ref->dev, entry, "DMA-API: device driver frees "
570 "DMA memory with wrong function "
571 "[device address=0x%016llx] [size=%llu bytes] "
572 "[mapped as %s] [unmapped as %s]\n",
573 ref->dev_addr, ref->size,
574 type2name[entry->type], type2name[ref->type]);
575 } else if ((entry->type == dma_debug_coherent) &&
576 (ref->paddr != entry->paddr)) {
577 err_printk(ref->dev, entry, "DMA-API: device driver frees "
578 "DMA memory with different CPU address "
579 "[device address=0x%016llx] [size=%llu bytes] "
580 "[cpu alloc address=%p] [cpu free address=%p]",
581 ref->dev_addr, ref->size,
582 (void *)entry->paddr, (void *)ref->paddr);
585 if (ref->sg_call_ents && ref->type == dma_debug_sg &&
586 ref->sg_call_ents != entry->sg_call_ents) {
587 err_printk(ref->dev, entry, "DMA-API: device driver frees "
588 "DMA sg list with different entry count "
589 "[map count=%d] [unmap count=%d]\n",
590 entry->sg_call_ents, ref->sg_call_ents);
594 * This may be no bug in reality - but most implementations of the
595 * DMA API don't handle this properly, so check for it here
597 if (ref->direction != entry->direction) {
598 err_printk(ref->dev, entry, "DMA-API: device driver frees "
599 "DMA memory with different direction "
600 "[device address=0x%016llx] [size=%llu bytes] "
601 "[mapped with %s] [unmapped with %s]\n",
602 ref->dev_addr, ref->size,
603 dir2name[entry->direction],
604 dir2name[ref->direction]);
607 hash_bucket_del(entry);
608 dma_entry_free(entry);
611 put_hash_bucket(bucket, &flags);
614 static void check_for_stack(struct device *dev, void *addr)
616 if (object_is_on_stack(addr))
617 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
618 "stack [addr=%p]\n", addr);
621 static inline bool overlap(void *addr, u64 size, void *start, void *end)
623 void *addr2 = (char *)addr + size;
625 return ((addr >= start && addr < end) ||
626 (addr2 >= start && addr2 < end) ||
627 ((addr < start) && (addr2 >= end)));
630 static void check_for_illegal_area(struct device *dev, void *addr, u64 size)
632 if (overlap(addr, size, _text, _etext) ||
633 overlap(addr, size, __start_rodata, __end_rodata))
634 err_printk(dev, NULL, "DMA-API: device driver maps "
635 "memory from kernel text or rodata "
636 "[addr=%p] [size=%llu]\n", addr, size);
639 static void check_sync(struct device *dev, dma_addr_t addr,
640 u64 size, u64 offset, int direction, bool to_cpu)
642 struct dma_debug_entry ref = {
646 .direction = direction,
648 struct dma_debug_entry *entry;
649 struct hash_bucket *bucket;
652 bucket = get_hash_bucket(&ref, &flags);
654 entry = hash_bucket_find(bucket, &ref);
657 err_printk(dev, NULL, "DMA-API: device driver tries "
658 "to sync DMA memory it has not allocated "
659 "[device address=0x%016llx] [size=%llu bytes]\n",
660 (unsigned long long)addr, size);
664 if ((offset + size) > entry->size) {
665 err_printk(dev, entry, "DMA-API: device driver syncs"
666 " DMA memory outside allocated range "
667 "[device address=0x%016llx] "
668 "[allocation size=%llu bytes] [sync offset=%llu] "
669 "[sync size=%llu]\n", entry->dev_addr, entry->size,
673 if (direction != entry->direction) {
674 err_printk(dev, entry, "DMA-API: device driver syncs "
675 "DMA memory with different direction "
676 "[device address=0x%016llx] [size=%llu bytes] "
677 "[mapped with %s] [synced with %s]\n",
678 (unsigned long long)addr, entry->size,
679 dir2name[entry->direction],
680 dir2name[direction]);
683 if (entry->direction == DMA_BIDIRECTIONAL)
686 if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) &&
687 !(direction == DMA_TO_DEVICE))
688 err_printk(dev, entry, "DMA-API: device driver syncs "
689 "device read-only DMA memory for cpu "
690 "[device address=0x%016llx] [size=%llu bytes] "
691 "[mapped with %s] [synced with %s]\n",
692 (unsigned long long)addr, entry->size,
693 dir2name[entry->direction],
694 dir2name[direction]);
696 if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) &&
697 !(direction == DMA_FROM_DEVICE))
698 err_printk(dev, entry, "DMA-API: device driver syncs "
699 "device write-only DMA memory to device "
700 "[device address=0x%016llx] [size=%llu bytes] "
701 "[mapped with %s] [synced with %s]\n",
702 (unsigned long long)addr, entry->size,
703 dir2name[entry->direction],
704 dir2name[direction]);
707 put_hash_bucket(bucket, &flags);
711 void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
712 size_t size, int direction, dma_addr_t dma_addr,
715 struct dma_debug_entry *entry;
717 if (unlikely(global_disable))
720 if (unlikely(dma_mapping_error(dev, dma_addr)))
723 entry = dma_entry_alloc();
728 entry->type = dma_debug_page;
729 entry->paddr = page_to_phys(page) + offset;
730 entry->dev_addr = dma_addr;
732 entry->direction = direction;
735 entry->type = dma_debug_single;
737 if (!PageHighMem(page)) {
738 void *addr = ((char *)page_address(page)) + offset;
739 check_for_stack(dev, addr);
740 check_for_illegal_area(dev, addr, size);
743 add_dma_entry(entry);
745 EXPORT_SYMBOL(debug_dma_map_page);
747 void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
748 size_t size, int direction, bool map_single)
750 struct dma_debug_entry ref = {
751 .type = dma_debug_page,
755 .direction = direction,
758 if (unlikely(global_disable))
762 ref.type = dma_debug_single;
766 EXPORT_SYMBOL(debug_dma_unmap_page);
768 void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
769 int nents, int mapped_ents, int direction)
771 struct dma_debug_entry *entry;
772 struct scatterlist *s;
775 if (unlikely(global_disable))
778 for_each_sg(sg, s, mapped_ents, i) {
779 entry = dma_entry_alloc();
783 entry->type = dma_debug_sg;
785 entry->paddr = sg_phys(s);
786 entry->size = s->length;
787 entry->dev_addr = s->dma_address;
788 entry->direction = direction;
789 entry->sg_call_ents = nents;
790 entry->sg_mapped_ents = mapped_ents;
792 if (!PageHighMem(sg_page(s))) {
793 check_for_stack(dev, sg_virt(s));
794 check_for_illegal_area(dev, sg_virt(s), s->length);
797 add_dma_entry(entry);
800 EXPORT_SYMBOL(debug_dma_map_sg);
802 void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
805 struct dma_debug_entry *entry;
806 struct scatterlist *s;
807 int mapped_ents = 0, i;
810 if (unlikely(global_disable))
813 for_each_sg(sglist, s, nelems, i) {
815 struct dma_debug_entry ref = {
816 .type = dma_debug_sg,
819 .dev_addr = s->dma_address,
825 if (mapped_ents && i >= mapped_ents)
828 if (mapped_ents == 0) {
829 struct hash_bucket *bucket;
830 ref.sg_call_ents = nelems;
831 bucket = get_hash_bucket(&ref, &flags);
832 entry = hash_bucket_find(bucket, &ref);
834 mapped_ents = entry->sg_mapped_ents;
835 put_hash_bucket(bucket, &flags);
841 EXPORT_SYMBOL(debug_dma_unmap_sg);
843 void debug_dma_alloc_coherent(struct device *dev, size_t size,
844 dma_addr_t dma_addr, void *virt)
846 struct dma_debug_entry *entry;
848 if (unlikely(global_disable))
851 if (unlikely(virt == NULL))
854 entry = dma_entry_alloc();
858 entry->type = dma_debug_coherent;
860 entry->paddr = virt_to_phys(virt);
862 entry->dev_addr = dma_addr;
863 entry->direction = DMA_BIDIRECTIONAL;
865 add_dma_entry(entry);
867 EXPORT_SYMBOL(debug_dma_alloc_coherent);
869 void debug_dma_free_coherent(struct device *dev, size_t size,
870 void *virt, dma_addr_t addr)
872 struct dma_debug_entry ref = {
873 .type = dma_debug_coherent,
875 .paddr = virt_to_phys(virt),
878 .direction = DMA_BIDIRECTIONAL,
881 if (unlikely(global_disable))
886 EXPORT_SYMBOL(debug_dma_free_coherent);
888 void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
889 size_t size, int direction)
891 if (unlikely(global_disable))
894 check_sync(dev, dma_handle, size, 0, direction, true);
896 EXPORT_SYMBOL(debug_dma_sync_single_for_cpu);
898 void debug_dma_sync_single_for_device(struct device *dev,
899 dma_addr_t dma_handle, size_t size,
902 if (unlikely(global_disable))
905 check_sync(dev, dma_handle, size, 0, direction, false);
907 EXPORT_SYMBOL(debug_dma_sync_single_for_device);
909 void debug_dma_sync_single_range_for_cpu(struct device *dev,
910 dma_addr_t dma_handle,
911 unsigned long offset, size_t size,
914 if (unlikely(global_disable))
917 check_sync(dev, dma_handle, size, offset, direction, true);
919 EXPORT_SYMBOL(debug_dma_sync_single_range_for_cpu);
921 void debug_dma_sync_single_range_for_device(struct device *dev,
922 dma_addr_t dma_handle,
923 unsigned long offset,
924 size_t size, int direction)
926 if (unlikely(global_disable))
929 check_sync(dev, dma_handle, size, offset, direction, false);
931 EXPORT_SYMBOL(debug_dma_sync_single_range_for_device);
933 void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
934 int nelems, int direction)
936 struct scatterlist *s;
939 if (unlikely(global_disable))
942 for_each_sg(sg, s, nelems, i) {
943 check_sync(dev, s->dma_address, s->dma_length, 0,
947 EXPORT_SYMBOL(debug_dma_sync_sg_for_cpu);
949 void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
950 int nelems, int direction)
952 struct scatterlist *s;
955 if (unlikely(global_disable))
958 for_each_sg(sg, s, nelems, i) {
959 check_sync(dev, s->dma_address, s->dma_length, 0,
963 EXPORT_SYMBOL(debug_dma_sync_sg_for_device);