Commit | Line | Data |
---|---|---|
f2f45e5f JR |
1 | /* |
2 | * Copyright (C) 2008 Advanced Micro Devices, Inc. | |
3 | * | |
4 | * Author: Joerg Roedel <joerg.roedel@amd.com> | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify it | |
7 | * under the terms of the GNU General Public License version 2 as published | |
8 | * by the Free Software Foundation. | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, | |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
13 | * GNU General Public License for more details. | |
14 | * | |
15 | * You should have received a copy of the GNU General Public License | |
16 | * along with this program; if not, write to the Free Software | |
17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
18 | */ | |
19 | ||
972aa45c | 20 | #include <linux/scatterlist.h> |
2d62ece1 | 21 | #include <linux/dma-mapping.h> |
f2f45e5f | 22 | #include <linux/dma-debug.h> |
30dfa90c | 23 | #include <linux/spinlock.h> |
788dcfa6 | 24 | #include <linux/debugfs.h> |
2d62ece1 | 25 | #include <linux/device.h> |
f2f45e5f | 26 | #include <linux/types.h> |
2d62ece1 | 27 | #include <linux/sched.h> |
f2f45e5f | 28 | #include <linux/list.h> |
6bf07871 | 29 | #include <linux/slab.h> |
f2f45e5f | 30 | |
30dfa90c JR |
31 | #define HASH_SIZE 1024ULL |
32 | #define HASH_FN_SHIFT 13 | |
33 | #define HASH_FN_MASK (HASH_SIZE - 1) | |
34 | ||
f2f45e5f JR |
35 | enum { |
36 | dma_debug_single, | |
37 | dma_debug_page, | |
38 | dma_debug_sg, | |
39 | dma_debug_coherent, | |
40 | }; | |
41 | ||
42 | struct dma_debug_entry { | |
43 | struct list_head list; | |
44 | struct device *dev; | |
45 | int type; | |
46 | phys_addr_t paddr; | |
47 | u64 dev_addr; | |
48 | u64 size; | |
49 | int direction; | |
50 | int sg_call_ents; | |
51 | int sg_mapped_ents; | |
52 | }; | |
53 | ||
30dfa90c JR |
54 | struct hash_bucket { |
55 | struct list_head list; | |
56 | spinlock_t lock; | |
2d62ece1 | 57 | } ____cacheline_aligned_in_smp; |
30dfa90c JR |
58 | |
59 | /* Hash list to save the allocated dma addresses */ | |
60 | static struct hash_bucket dma_entry_hash[HASH_SIZE]; | |
3b1e79ed JR |
61 | /* List of pre-allocated dma_debug_entry's */ |
62 | static LIST_HEAD(free_entries); | |
63 | /* Lock for the list above */ | |
64 | static DEFINE_SPINLOCK(free_entries_lock); | |
65 | ||
66 | /* Global disable flag - will be set in case of an error */ | |
67 | static bool global_disable __read_mostly; | |
68 | ||
788dcfa6 JR |
69 | /* Global error count */ |
70 | static u32 error_count; | |
71 | ||
72 | /* Global error show enable*/ | |
73 | static u32 show_all_errors __read_mostly; | |
74 | /* Number of errors to show */ | |
75 | static u32 show_num_errors = 1; | |
76 | ||
3b1e79ed JR |
77 | static u32 num_free_entries; |
78 | static u32 min_free_entries; | |
30dfa90c | 79 | |
59d3daaf JR |
80 | /* number of preallocated entries requested by kernel cmdline */ |
81 | static u32 req_entries; | |
82 | ||
788dcfa6 JR |
83 | /* debugfs dentry's for the stuff above */ |
84 | static struct dentry *dma_debug_dent __read_mostly; | |
85 | static struct dentry *global_disable_dent __read_mostly; | |
86 | static struct dentry *error_count_dent __read_mostly; | |
87 | static struct dentry *show_all_errors_dent __read_mostly; | |
88 | static struct dentry *show_num_errors_dent __read_mostly; | |
89 | static struct dentry *num_free_entries_dent __read_mostly; | |
90 | static struct dentry *min_free_entries_dent __read_mostly; | |
91 | ||
2d62ece1 JR |
92 | static const char *type2name[4] = { "single", "page", |
93 | "scather-gather", "coherent" }; | |
94 | ||
95 | static const char *dir2name[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE", | |
96 | "DMA_FROM_DEVICE", "DMA_NONE" }; | |
97 | ||
98 | /* | |
99 | * The access to some variables in this macro is racy. We can't use atomic_t | |
100 | * here because all these variables are exported to debugfs. Some of them even | |
101 | * writeable. This is also the reason why a lock won't help much. But anyway, | |
102 | * the races are no big deal. Here is why: | |
103 | * | |
104 | * error_count: the addition is racy, but the worst thing that can happen is | |
105 | * that we don't count some errors | |
106 | * show_num_errors: the subtraction is racy. Also no big deal because in | |
107 | * worst case this will result in one warning more in the | |
108 | * system log than the user configured. This variable is | |
109 | * writeable via debugfs. | |
110 | */ | |
111 | #define err_printk(dev, format, arg...) do { \ | |
112 | error_count += 1; \ | |
113 | if (show_all_errors || show_num_errors > 0) { \ | |
114 | WARN(1, "%s %s: " format, \ | |
115 | dev_driver_string(dev), \ | |
116 | dev_name(dev) , ## arg); \ | |
117 | } \ | |
118 | if (!show_all_errors && show_num_errors > 0) \ | |
119 | show_num_errors -= 1; \ | |
120 | } while (0); | |
121 | ||
30dfa90c JR |
122 | /* |
123 | * Hash related functions | |
124 | * | |
125 | * Every DMA-API request is saved into a struct dma_debug_entry. To | |
126 | * have quick access to these structs they are stored into a hash. | |
127 | */ | |
128 | static int hash_fn(struct dma_debug_entry *entry) | |
129 | { | |
130 | /* | |
131 | * Hash function is based on the dma address. | |
132 | * We use bits 20-27 here as the index into the hash | |
133 | */ | |
134 | return (entry->dev_addr >> HASH_FN_SHIFT) & HASH_FN_MASK; | |
135 | } | |
136 | ||
137 | /* | |
138 | * Request exclusive access to a hash bucket for a given dma_debug_entry. | |
139 | */ | |
140 | static struct hash_bucket *get_hash_bucket(struct dma_debug_entry *entry, | |
141 | unsigned long *flags) | |
142 | { | |
143 | int idx = hash_fn(entry); | |
144 | unsigned long __flags; | |
145 | ||
146 | spin_lock_irqsave(&dma_entry_hash[idx].lock, __flags); | |
147 | *flags = __flags; | |
148 | return &dma_entry_hash[idx]; | |
149 | } | |
150 | ||
151 | /* | |
152 | * Give up exclusive access to the hash bucket | |
153 | */ | |
154 | static void put_hash_bucket(struct hash_bucket *bucket, | |
155 | unsigned long *flags) | |
156 | { | |
157 | unsigned long __flags = *flags; | |
158 | ||
159 | spin_unlock_irqrestore(&bucket->lock, __flags); | |
160 | } | |
161 | ||
162 | /* | |
163 | * Search a given entry in the hash bucket list | |
164 | */ | |
165 | static struct dma_debug_entry *hash_bucket_find(struct hash_bucket *bucket, | |
166 | struct dma_debug_entry *ref) | |
167 | { | |
168 | struct dma_debug_entry *entry; | |
169 | ||
170 | list_for_each_entry(entry, &bucket->list, list) { | |
171 | if ((entry->dev_addr == ref->dev_addr) && | |
172 | (entry->dev == ref->dev)) | |
173 | return entry; | |
174 | } | |
175 | ||
176 | return NULL; | |
177 | } | |
178 | ||
179 | /* | |
180 | * Add an entry to a hash bucket | |
181 | */ | |
182 | static void hash_bucket_add(struct hash_bucket *bucket, | |
183 | struct dma_debug_entry *entry) | |
184 | { | |
185 | list_add_tail(&entry->list, &bucket->list); | |
186 | } | |
187 | ||
188 | /* | |
189 | * Remove entry from a hash bucket list | |
190 | */ | |
191 | static void hash_bucket_del(struct dma_debug_entry *entry) | |
192 | { | |
193 | list_del(&entry->list); | |
194 | } | |
195 | ||
ac26c18b DW |
196 | /* |
197 | * Dump mapping entries for debugging purposes | |
198 | */ | |
199 | void debug_dma_dump_mappings(struct device *dev) | |
200 | { | |
201 | int idx; | |
202 | ||
203 | for (idx = 0; idx < HASH_SIZE; idx++) { | |
204 | struct hash_bucket *bucket = &dma_entry_hash[idx]; | |
205 | struct dma_debug_entry *entry; | |
206 | unsigned long flags; | |
207 | ||
208 | spin_lock_irqsave(&bucket->lock, flags); | |
209 | ||
210 | list_for_each_entry(entry, &bucket->list, list) { | |
211 | if (!dev || dev == entry->dev) { | |
212 | dev_info(entry->dev, | |
213 | "%s idx %d P=%Lx D=%Lx L=%Lx %s\n", | |
214 | type2name[entry->type], idx, | |
215 | (unsigned long long)entry->paddr, | |
216 | entry->dev_addr, entry->size, | |
217 | dir2name[entry->direction]); | |
218 | } | |
219 | } | |
220 | ||
221 | spin_unlock_irqrestore(&bucket->lock, flags); | |
222 | } | |
223 | } | |
224 | EXPORT_SYMBOL(debug_dma_dump_mappings); | |
225 | ||
30dfa90c JR |
226 | /* |
227 | * Wrapper function for adding an entry to the hash. | |
228 | * This function takes care of locking itself. | |
229 | */ | |
230 | static void add_dma_entry(struct dma_debug_entry *entry) | |
231 | { | |
232 | struct hash_bucket *bucket; | |
233 | unsigned long flags; | |
234 | ||
235 | bucket = get_hash_bucket(entry, &flags); | |
236 | hash_bucket_add(bucket, entry); | |
237 | put_hash_bucket(bucket, &flags); | |
238 | } | |
239 | ||
3b1e79ed JR |
240 | /* struct dma_entry allocator |
241 | * | |
242 | * The next two functions implement the allocator for | |
243 | * struct dma_debug_entries. | |
244 | */ | |
245 | static struct dma_debug_entry *dma_entry_alloc(void) | |
246 | { | |
247 | struct dma_debug_entry *entry = NULL; | |
248 | unsigned long flags; | |
249 | ||
250 | spin_lock_irqsave(&free_entries_lock, flags); | |
251 | ||
252 | if (list_empty(&free_entries)) { | |
253 | printk(KERN_ERR "DMA-API: debugging out of memory " | |
254 | "- disabling\n"); | |
255 | global_disable = true; | |
256 | goto out; | |
257 | } | |
258 | ||
259 | entry = list_entry(free_entries.next, struct dma_debug_entry, list); | |
260 | list_del(&entry->list); | |
261 | memset(entry, 0, sizeof(*entry)); | |
262 | ||
263 | num_free_entries -= 1; | |
264 | if (num_free_entries < min_free_entries) | |
265 | min_free_entries = num_free_entries; | |
266 | ||
267 | out: | |
268 | spin_unlock_irqrestore(&free_entries_lock, flags); | |
269 | ||
270 | return entry; | |
271 | } | |
272 | ||
273 | static void dma_entry_free(struct dma_debug_entry *entry) | |
274 | { | |
275 | unsigned long flags; | |
276 | ||
277 | /* | |
278 | * add to beginning of the list - this way the entries are | |
279 | * more likely cache hot when they are reallocated. | |
280 | */ | |
281 | spin_lock_irqsave(&free_entries_lock, flags); | |
282 | list_add(&entry->list, &free_entries); | |
283 | num_free_entries += 1; | |
284 | spin_unlock_irqrestore(&free_entries_lock, flags); | |
285 | } | |
286 | ||
6bf07871 JR |
287 | /* |
288 | * DMA-API debugging init code | |
289 | * | |
290 | * The init code does two things: | |
291 | * 1. Initialize core data structures | |
292 | * 2. Preallocate a given number of dma_debug_entry structs | |
293 | */ | |
294 | ||
295 | static int prealloc_memory(u32 num_entries) | |
296 | { | |
297 | struct dma_debug_entry *entry, *next_entry; | |
298 | int i; | |
299 | ||
300 | for (i = 0; i < num_entries; ++i) { | |
301 | entry = kzalloc(sizeof(*entry), GFP_KERNEL); | |
302 | if (!entry) | |
303 | goto out_err; | |
304 | ||
305 | list_add_tail(&entry->list, &free_entries); | |
306 | } | |
307 | ||
308 | num_free_entries = num_entries; | |
309 | min_free_entries = num_entries; | |
310 | ||
311 | printk(KERN_INFO "DMA-API: preallocated %d debug entries\n", | |
312 | num_entries); | |
313 | ||
314 | return 0; | |
315 | ||
316 | out_err: | |
317 | ||
318 | list_for_each_entry_safe(entry, next_entry, &free_entries, list) { | |
319 | list_del(&entry->list); | |
320 | kfree(entry); | |
321 | } | |
322 | ||
323 | return -ENOMEM; | |
324 | } | |
325 | ||
788dcfa6 JR |
326 | static int dma_debug_fs_init(void) |
327 | { | |
328 | dma_debug_dent = debugfs_create_dir("dma-api", NULL); | |
329 | if (!dma_debug_dent) { | |
330 | printk(KERN_ERR "DMA-API: can not create debugfs directory\n"); | |
331 | return -ENOMEM; | |
332 | } | |
333 | ||
334 | global_disable_dent = debugfs_create_bool("disabled", 0444, | |
335 | dma_debug_dent, | |
336 | (u32 *)&global_disable); | |
337 | if (!global_disable_dent) | |
338 | goto out_err; | |
339 | ||
340 | error_count_dent = debugfs_create_u32("error_count", 0444, | |
341 | dma_debug_dent, &error_count); | |
342 | if (!error_count_dent) | |
343 | goto out_err; | |
344 | ||
345 | show_all_errors_dent = debugfs_create_u32("all_errors", 0644, | |
346 | dma_debug_dent, | |
347 | &show_all_errors); | |
348 | if (!show_all_errors_dent) | |
349 | goto out_err; | |
350 | ||
351 | show_num_errors_dent = debugfs_create_u32("num_errors", 0644, | |
352 | dma_debug_dent, | |
353 | &show_num_errors); | |
354 | if (!show_num_errors_dent) | |
355 | goto out_err; | |
356 | ||
357 | num_free_entries_dent = debugfs_create_u32("num_free_entries", 0444, | |
358 | dma_debug_dent, | |
359 | &num_free_entries); | |
360 | if (!num_free_entries_dent) | |
361 | goto out_err; | |
362 | ||
363 | min_free_entries_dent = debugfs_create_u32("min_free_entries", 0444, | |
364 | dma_debug_dent, | |
365 | &min_free_entries); | |
366 | if (!min_free_entries_dent) | |
367 | goto out_err; | |
368 | ||
369 | return 0; | |
370 | ||
371 | out_err: | |
372 | debugfs_remove_recursive(dma_debug_dent); | |
373 | ||
374 | return -ENOMEM; | |
375 | } | |
376 | ||
377 | ||
6bf07871 JR |
378 | /* |
379 | * Let the architectures decide how many entries should be preallocated. | |
380 | */ | |
381 | void dma_debug_init(u32 num_entries) | |
382 | { | |
383 | int i; | |
384 | ||
385 | if (global_disable) | |
386 | return; | |
387 | ||
388 | for (i = 0; i < HASH_SIZE; ++i) { | |
389 | INIT_LIST_HEAD(&dma_entry_hash[i].list); | |
390 | dma_entry_hash[i].lock = SPIN_LOCK_UNLOCKED; | |
391 | } | |
392 | ||
788dcfa6 JR |
393 | if (dma_debug_fs_init() != 0) { |
394 | printk(KERN_ERR "DMA-API: error creating debugfs entries " | |
395 | "- disabling\n"); | |
396 | global_disable = true; | |
397 | ||
398 | return; | |
399 | } | |
400 | ||
59d3daaf JR |
401 | if (req_entries) |
402 | num_entries = req_entries; | |
403 | ||
6bf07871 JR |
404 | if (prealloc_memory(num_entries) != 0) { |
405 | printk(KERN_ERR "DMA-API: debugging out of memory error " | |
406 | "- disabled\n"); | |
407 | global_disable = true; | |
408 | ||
409 | return; | |
410 | } | |
411 | ||
412 | printk(KERN_INFO "DMA-API: debugging enabled by kernel config\n"); | |
413 | } | |
414 | ||
59d3daaf JR |
415 | static __init int dma_debug_cmdline(char *str) |
416 | { | |
417 | if (!str) | |
418 | return -EINVAL; | |
419 | ||
420 | if (strncmp(str, "off", 3) == 0) { | |
421 | printk(KERN_INFO "DMA-API: debugging disabled on kernel " | |
422 | "command line\n"); | |
423 | global_disable = true; | |
424 | } | |
425 | ||
426 | return 0; | |
427 | } | |
428 | ||
429 | static __init int dma_debug_entries_cmdline(char *str) | |
430 | { | |
431 | int res; | |
432 | ||
433 | if (!str) | |
434 | return -EINVAL; | |
435 | ||
436 | res = get_option(&str, &req_entries); | |
437 | ||
438 | if (!res) | |
439 | req_entries = 0; | |
440 | ||
441 | return 0; | |
442 | } | |
443 | ||
444 | __setup("dma_debug=", dma_debug_cmdline); | |
445 | __setup("dma_debug_entries=", dma_debug_entries_cmdline); | |
446 | ||
2d62ece1 JR |
447 | static void check_unmap(struct dma_debug_entry *ref) |
448 | { | |
449 | struct dma_debug_entry *entry; | |
450 | struct hash_bucket *bucket; | |
451 | unsigned long flags; | |
452 | ||
453 | if (dma_mapping_error(ref->dev, ref->dev_addr)) | |
454 | return; | |
455 | ||
456 | bucket = get_hash_bucket(ref, &flags); | |
457 | entry = hash_bucket_find(bucket, ref); | |
458 | ||
459 | if (!entry) { | |
460 | err_printk(ref->dev, "DMA-API: device driver tries " | |
461 | "to free DMA memory it has not allocated " | |
462 | "[device address=0x%016llx] [size=%llu bytes]\n", | |
463 | ref->dev_addr, ref->size); | |
464 | goto out; | |
465 | } | |
466 | ||
467 | if (ref->size != entry->size) { | |
468 | err_printk(ref->dev, "DMA-API: device driver frees " | |
469 | "DMA memory with different size " | |
470 | "[device address=0x%016llx] [map size=%llu bytes] " | |
471 | "[unmap size=%llu bytes]\n", | |
472 | ref->dev_addr, entry->size, ref->size); | |
473 | } | |
474 | ||
475 | if (ref->type != entry->type) { | |
476 | err_printk(ref->dev, "DMA-API: device driver frees " | |
477 | "DMA memory with wrong function " | |
478 | "[device address=0x%016llx] [size=%llu bytes] " | |
479 | "[mapped as %s] [unmapped as %s]\n", | |
480 | ref->dev_addr, ref->size, | |
481 | type2name[entry->type], type2name[ref->type]); | |
482 | } else if ((entry->type == dma_debug_coherent) && | |
483 | (ref->paddr != entry->paddr)) { | |
484 | err_printk(ref->dev, "DMA-API: device driver frees " | |
485 | "DMA memory with different CPU address " | |
486 | "[device address=0x%016llx] [size=%llu bytes] " | |
487 | "[cpu alloc address=%p] [cpu free address=%p]", | |
488 | ref->dev_addr, ref->size, | |
489 | (void *)entry->paddr, (void *)ref->paddr); | |
490 | } | |
491 | ||
492 | if (ref->sg_call_ents && ref->type == dma_debug_sg && | |
493 | ref->sg_call_ents != entry->sg_call_ents) { | |
494 | err_printk(ref->dev, "DMA-API: device driver frees " | |
495 | "DMA sg list with different entry count " | |
496 | "[map count=%d] [unmap count=%d]\n", | |
497 | entry->sg_call_ents, ref->sg_call_ents); | |
498 | } | |
499 | ||
500 | /* | |
501 | * This may be no bug in reality - but most implementations of the | |
502 | * DMA API don't handle this properly, so check for it here | |
503 | */ | |
504 | if (ref->direction != entry->direction) { | |
505 | err_printk(ref->dev, "DMA-API: device driver frees " | |
506 | "DMA memory with different direction " | |
507 | "[device address=0x%016llx] [size=%llu bytes] " | |
508 | "[mapped with %s] [unmapped with %s]\n", | |
509 | ref->dev_addr, ref->size, | |
510 | dir2name[entry->direction], | |
511 | dir2name[ref->direction]); | |
512 | } | |
513 | ||
514 | hash_bucket_del(entry); | |
515 | dma_entry_free(entry); | |
516 | ||
517 | out: | |
518 | put_hash_bucket(bucket, &flags); | |
519 | } | |
520 | ||
521 | static void check_for_stack(struct device *dev, void *addr) | |
522 | { | |
523 | if (object_is_on_stack(addr)) | |
524 | err_printk(dev, "DMA-API: device driver maps memory from stack" | |
525 | " [addr=%p]\n", addr); | |
526 | } | |
527 | ||
528 | static void check_sync(struct device *dev, dma_addr_t addr, | |
529 | u64 size, u64 offset, int direction, bool to_cpu) | |
530 | { | |
531 | struct dma_debug_entry ref = { | |
532 | .dev = dev, | |
533 | .dev_addr = addr, | |
534 | .size = size, | |
535 | .direction = direction, | |
536 | }; | |
537 | struct dma_debug_entry *entry; | |
538 | struct hash_bucket *bucket; | |
539 | unsigned long flags; | |
540 | ||
541 | bucket = get_hash_bucket(&ref, &flags); | |
542 | ||
543 | entry = hash_bucket_find(bucket, &ref); | |
544 | ||
545 | if (!entry) { | |
546 | err_printk(dev, "DMA-API: device driver tries " | |
547 | "to sync DMA memory it has not allocated " | |
548 | "[device address=0x%016llx] [size=%llu bytes]\n", | |
549 | addr, size); | |
550 | goto out; | |
551 | } | |
552 | ||
553 | if ((offset + size) > entry->size) { | |
554 | err_printk(dev, "DMA-API: device driver syncs" | |
555 | " DMA memory outside allocated range " | |
556 | "[device address=0x%016llx] " | |
557 | "[allocation size=%llu bytes] [sync offset=%llu] " | |
558 | "[sync size=%llu]\n", entry->dev_addr, entry->size, | |
559 | offset, size); | |
560 | } | |
561 | ||
562 | if (direction != entry->direction) { | |
563 | err_printk(dev, "DMA-API: device driver syncs " | |
564 | "DMA memory with different direction " | |
565 | "[device address=0x%016llx] [size=%llu bytes] " | |
566 | "[mapped with %s] [synced with %s]\n", | |
567 | addr, entry->size, | |
568 | dir2name[entry->direction], | |
569 | dir2name[direction]); | |
570 | } | |
571 | ||
572 | if (entry->direction == DMA_BIDIRECTIONAL) | |
573 | goto out; | |
574 | ||
575 | if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) && | |
576 | !(direction == DMA_TO_DEVICE)) | |
577 | err_printk(dev, "DMA-API: device driver syncs " | |
578 | "device read-only DMA memory for cpu " | |
579 | "[device address=0x%016llx] [size=%llu bytes] " | |
580 | "[mapped with %s] [synced with %s]\n", | |
581 | addr, entry->size, | |
582 | dir2name[entry->direction], | |
583 | dir2name[direction]); | |
584 | ||
585 | if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) && | |
586 | !(direction == DMA_FROM_DEVICE)) | |
587 | err_printk(dev, "DMA-API: device driver syncs " | |
588 | "device write-only DMA memory to device " | |
589 | "[device address=0x%016llx] [size=%llu bytes] " | |
590 | "[mapped with %s] [synced with %s]\n", | |
591 | addr, entry->size, | |
592 | dir2name[entry->direction], | |
593 | dir2name[direction]); | |
594 | ||
595 | out: | |
596 | put_hash_bucket(bucket, &flags); | |
597 | ||
598 | } | |
599 | ||
f62bc980 JR |
600 | void debug_dma_map_page(struct device *dev, struct page *page, size_t offset, |
601 | size_t size, int direction, dma_addr_t dma_addr, | |
602 | bool map_single) | |
603 | { | |
604 | struct dma_debug_entry *entry; | |
605 | ||
606 | if (unlikely(global_disable)) | |
607 | return; | |
608 | ||
609 | if (unlikely(dma_mapping_error(dev, dma_addr))) | |
610 | return; | |
611 | ||
612 | entry = dma_entry_alloc(); | |
613 | if (!entry) | |
614 | return; | |
615 | ||
616 | entry->dev = dev; | |
617 | entry->type = dma_debug_page; | |
618 | entry->paddr = page_to_phys(page) + offset; | |
619 | entry->dev_addr = dma_addr; | |
620 | entry->size = size; | |
621 | entry->direction = direction; | |
622 | ||
623 | if (map_single) { | |
624 | entry->type = dma_debug_single; | |
625 | check_for_stack(dev, page_address(page) + offset); | |
626 | } | |
627 | ||
628 | add_dma_entry(entry); | |
629 | } | |
630 | EXPORT_SYMBOL(debug_dma_map_page); | |
631 | ||
632 | void debug_dma_unmap_page(struct device *dev, dma_addr_t addr, | |
633 | size_t size, int direction, bool map_single) | |
634 | { | |
635 | struct dma_debug_entry ref = { | |
636 | .type = dma_debug_page, | |
637 | .dev = dev, | |
638 | .dev_addr = addr, | |
639 | .size = size, | |
640 | .direction = direction, | |
641 | }; | |
642 | ||
643 | if (unlikely(global_disable)) | |
644 | return; | |
645 | ||
646 | if (map_single) | |
647 | ref.type = dma_debug_single; | |
648 | ||
649 | check_unmap(&ref); | |
650 | } | |
651 | EXPORT_SYMBOL(debug_dma_unmap_page); | |
652 | ||
972aa45c JR |
653 | void debug_dma_map_sg(struct device *dev, struct scatterlist *sg, |
654 | int nents, int mapped_ents, int direction) | |
655 | { | |
656 | struct dma_debug_entry *entry; | |
657 | struct scatterlist *s; | |
658 | int i; | |
659 | ||
660 | if (unlikely(global_disable)) | |
661 | return; | |
662 | ||
663 | for_each_sg(sg, s, mapped_ents, i) { | |
664 | entry = dma_entry_alloc(); | |
665 | if (!entry) | |
666 | return; | |
667 | ||
668 | entry->type = dma_debug_sg; | |
669 | entry->dev = dev; | |
670 | entry->paddr = sg_phys(s); | |
671 | entry->size = s->length; | |
672 | entry->dev_addr = s->dma_address; | |
673 | entry->direction = direction; | |
674 | entry->sg_call_ents = nents; | |
675 | entry->sg_mapped_ents = mapped_ents; | |
676 | ||
677 | check_for_stack(dev, sg_virt(s)); | |
678 | ||
679 | add_dma_entry(entry); | |
680 | } | |
681 | } | |
682 | EXPORT_SYMBOL(debug_dma_map_sg); | |
683 | ||
684 | void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, | |
685 | int nelems, int dir) | |
686 | { | |
687 | struct dma_debug_entry *entry; | |
688 | struct scatterlist *s; | |
689 | int mapped_ents = 0, i; | |
690 | unsigned long flags; | |
691 | ||
692 | if (unlikely(global_disable)) | |
693 | return; | |
694 | ||
695 | for_each_sg(sglist, s, nelems, i) { | |
696 | ||
697 | struct dma_debug_entry ref = { | |
698 | .type = dma_debug_sg, | |
699 | .dev = dev, | |
700 | .paddr = sg_phys(s), | |
701 | .dev_addr = s->dma_address, | |
702 | .size = s->length, | |
703 | .direction = dir, | |
704 | .sg_call_ents = 0, | |
705 | }; | |
706 | ||
707 | if (mapped_ents && i >= mapped_ents) | |
708 | break; | |
709 | ||
710 | if (mapped_ents == 0) { | |
711 | struct hash_bucket *bucket; | |
712 | ref.sg_call_ents = nelems; | |
713 | bucket = get_hash_bucket(&ref, &flags); | |
714 | entry = hash_bucket_find(bucket, &ref); | |
715 | if (entry) | |
716 | mapped_ents = entry->sg_mapped_ents; | |
717 | put_hash_bucket(bucket, &flags); | |
718 | } | |
719 | ||
720 | check_unmap(&ref); | |
721 | } | |
722 | } | |
723 | EXPORT_SYMBOL(debug_dma_unmap_sg); | |
724 | ||
6bfd4498 JR |
725 | void debug_dma_alloc_coherent(struct device *dev, size_t size, |
726 | dma_addr_t dma_addr, void *virt) | |
727 | { | |
728 | struct dma_debug_entry *entry; | |
729 | ||
730 | if (unlikely(global_disable)) | |
731 | return; | |
732 | ||
733 | if (unlikely(virt == NULL)) | |
734 | return; | |
735 | ||
736 | entry = dma_entry_alloc(); | |
737 | if (!entry) | |
738 | return; | |
739 | ||
740 | entry->type = dma_debug_coherent; | |
741 | entry->dev = dev; | |
742 | entry->paddr = virt_to_phys(virt); | |
743 | entry->size = size; | |
744 | entry->dev_addr = dma_addr; | |
745 | entry->direction = DMA_BIDIRECTIONAL; | |
746 | ||
747 | add_dma_entry(entry); | |
748 | } | |
749 | EXPORT_SYMBOL(debug_dma_alloc_coherent); | |
750 | ||
751 | void debug_dma_free_coherent(struct device *dev, size_t size, | |
752 | void *virt, dma_addr_t addr) | |
753 | { | |
754 | struct dma_debug_entry ref = { | |
755 | .type = dma_debug_coherent, | |
756 | .dev = dev, | |
757 | .paddr = virt_to_phys(virt), | |
758 | .dev_addr = addr, | |
759 | .size = size, | |
760 | .direction = DMA_BIDIRECTIONAL, | |
761 | }; | |
762 | ||
763 | if (unlikely(global_disable)) | |
764 | return; | |
765 | ||
766 | check_unmap(&ref); | |
767 | } | |
768 | EXPORT_SYMBOL(debug_dma_free_coherent); | |
769 | ||
b9d2317e JR |
770 | void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, |
771 | size_t size, int direction) | |
772 | { | |
773 | if (unlikely(global_disable)) | |
774 | return; | |
775 | ||
776 | check_sync(dev, dma_handle, size, 0, direction, true); | |
777 | } | |
778 | EXPORT_SYMBOL(debug_dma_sync_single_for_cpu); | |
779 | ||
780 | void debug_dma_sync_single_for_device(struct device *dev, | |
781 | dma_addr_t dma_handle, size_t size, | |
782 | int direction) | |
783 | { | |
784 | if (unlikely(global_disable)) | |
785 | return; | |
786 | ||
787 | check_sync(dev, dma_handle, size, 0, direction, false); | |
788 | } | |
789 | EXPORT_SYMBOL(debug_dma_sync_single_for_device); | |
790 | ||
948408ba JR |
791 | void debug_dma_sync_single_range_for_cpu(struct device *dev, |
792 | dma_addr_t dma_handle, | |
793 | unsigned long offset, size_t size, | |
794 | int direction) | |
795 | { | |
796 | if (unlikely(global_disable)) | |
797 | return; | |
798 | ||
799 | check_sync(dev, dma_handle, size, offset, direction, true); | |
800 | } | |
801 | EXPORT_SYMBOL(debug_dma_sync_single_range_for_cpu); | |
802 | ||
803 | void debug_dma_sync_single_range_for_device(struct device *dev, | |
804 | dma_addr_t dma_handle, | |
805 | unsigned long offset, | |
806 | size_t size, int direction) | |
807 | { | |
808 | if (unlikely(global_disable)) | |
809 | return; | |
810 | ||
811 | check_sync(dev, dma_handle, size, offset, direction, false); | |
812 | } | |
813 | EXPORT_SYMBOL(debug_dma_sync_single_range_for_device); | |
814 | ||
a31fba5d JR |
815 | void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, |
816 | int nelems, int direction) | |
817 | { | |
818 | struct scatterlist *s; | |
819 | int i; | |
820 | ||
821 | if (unlikely(global_disable)) | |
822 | return; | |
823 | ||
824 | for_each_sg(sg, s, nelems, i) { | |
825 | check_sync(dev, s->dma_address, s->dma_length, 0, | |
826 | direction, true); | |
827 | } | |
828 | } | |
829 | EXPORT_SYMBOL(debug_dma_sync_sg_for_cpu); | |
830 | ||
831 | void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, | |
832 | int nelems, int direction) | |
833 | { | |
834 | struct scatterlist *s; | |
835 | int i; | |
836 | ||
837 | if (unlikely(global_disable)) | |
838 | return; | |
839 | ||
840 | for_each_sg(sg, s, nelems, i) { | |
841 | check_sync(dev, s->dma_address, s->dma_length, 0, | |
842 | direction, false); | |
843 | } | |
844 | } | |
845 | EXPORT_SYMBOL(debug_dma_sync_sg_for_device); | |
846 |