Commit | Line | Data |
---|---|---|
45051539 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
f2f45e5f JR |
2 | /* |
3 | * Copyright (C) 2008 Advanced Micro Devices, Inc. | |
4 | * | |
5 | * Author: Joerg Roedel <joerg.roedel@amd.com> | |
f2f45e5f JR |
6 | */ |
7 | ||
f737b095 RM |
8 | #define pr_fmt(fmt) "DMA-API: " fmt |
9 | ||
68db0cf1 | 10 | #include <linux/sched/task_stack.h> |
972aa45c | 11 | #include <linux/scatterlist.h> |
a1fd09e8 | 12 | #include <linux/dma-map-ops.h> |
29930025 | 13 | #include <linux/sched/task.h> |
6c132d1b | 14 | #include <linux/stacktrace.h> |
30dfa90c | 15 | #include <linux/spinlock.h> |
b4a0f533 | 16 | #include <linux/vmalloc.h> |
788dcfa6 | 17 | #include <linux/debugfs.h> |
8a6fc708 | 18 | #include <linux/uaccess.h> |
23a7bfae | 19 | #include <linux/export.h> |
2d62ece1 | 20 | #include <linux/device.h> |
f2f45e5f | 21 | #include <linux/types.h> |
2d62ece1 | 22 | #include <linux/sched.h> |
8a6fc708 | 23 | #include <linux/ctype.h> |
f2f45e5f | 24 | #include <linux/list.h> |
6bf07871 | 25 | #include <linux/slab.h> |
2e34bde1 | 26 | #include <asm/sections.h> |
a1fd09e8 | 27 | #include "debug.h" |
2e34bde1 | 28 | |
5e76f564 | 29 | #define HASH_SIZE 16384ULL |
30dfa90c JR |
30 | #define HASH_FN_SHIFT 13 |
31 | #define HASH_FN_MASK (HASH_SIZE - 1) | |
32 | ||
15b28bbc | 33 | #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) |
2b9d9ac0 | 34 | /* If the pool runs out, add this many new entries at once */ |
ad78dee0 | 35 | #define DMA_DEBUG_DYNAMIC_ENTRIES (PAGE_SIZE / sizeof(struct dma_debug_entry)) |
15b28bbc | 36 | |
f2f45e5f JR |
37 | enum { |
38 | dma_debug_single, | |
f2f45e5f JR |
39 | dma_debug_sg, |
40 | dma_debug_coherent, | |
0e74b34d | 41 | dma_debug_resource, |
f2f45e5f JR |
42 | }; |
43 | ||
6c9c6d63 SK |
44 | enum map_err_types { |
45 | MAP_ERR_CHECK_NOT_APPLICABLE, | |
46 | MAP_ERR_NOT_CHECKED, | |
47 | MAP_ERR_CHECKED, | |
48 | }; | |
49 | ||
6c132d1b DW |
50 | #define DMA_DEBUG_STACKTRACE_ENTRIES 5 |
51 | ||
0abdd7a8 DW |
52 | /** |
53 | * struct dma_debug_entry - track a dma_map* or dma_alloc_coherent mapping | |
54 | * @list: node on pre-allocated free_entries list | |
55 | * @dev: 'dev' argument to dma_map_{page|single|sg} or dma_alloc_coherent | |
479623fd | 56 | * @dev_addr: dma address |
0abdd7a8 | 57 | * @size: length of the mapping |
d3694f30 | 58 | * @type: single, page, sg, coherent |
0abdd7a8 DW |
59 | * @direction: enum dma_data_direction |
60 | * @sg_call_ents: 'nents' from dma_map_sg | |
61 | * @sg_mapped_ents: 'mapped_ents' from dma_map_sg | |
d3694f30 ED |
62 | * @pfn: page frame of the start address |
63 | * @offset: offset of mapping relative to pfn | |
0abdd7a8 | 64 | * @map_err_type: track whether dma_mapping_error() was checked |
7c65aa3c RD |
65 | * @stack_len: number of backtrace entries in @stack_entries |
66 | * @stack_entries: stack of backtrace history | |
0abdd7a8 | 67 | */ |
f2f45e5f JR |
68 | struct dma_debug_entry { |
69 | struct list_head list; | |
70 | struct device *dev; | |
f2f45e5f JR |
71 | u64 dev_addr; |
72 | u64 size; | |
d3694f30 | 73 | int type; |
f2f45e5f JR |
74 | int direction; |
75 | int sg_call_ents; | |
76 | int sg_mapped_ents; | |
d3694f30 ED |
77 | unsigned long pfn; |
78 | size_t offset; | |
6c9c6d63 | 79 | enum map_err_types map_err_type; |
6c132d1b | 80 | #ifdef CONFIG_STACKTRACE |
746017ed TG |
81 | unsigned int stack_len; |
82 | unsigned long stack_entries[DMA_DEBUG_STACKTRACE_ENTRIES]; | |
6c132d1b | 83 | #endif |
d3694f30 | 84 | } ____cacheline_aligned_in_smp; |
f2f45e5f | 85 | |
c6a21d0b NH |
86 | typedef bool (*match_fn)(struct dma_debug_entry *, struct dma_debug_entry *); |
87 | ||
30dfa90c JR |
88 | struct hash_bucket { |
89 | struct list_head list; | |
90 | spinlock_t lock; | |
5e76f564 | 91 | }; |
30dfa90c JR |
92 | |
93 | /* Hash list to save the allocated dma addresses */ | |
94 | static struct hash_bucket dma_entry_hash[HASH_SIZE]; | |
3b1e79ed JR |
95 | /* List of pre-allocated dma_debug_entry's */ |
96 | static LIST_HEAD(free_entries); | |
97 | /* Lock for the list above */ | |
98 | static DEFINE_SPINLOCK(free_entries_lock); | |
99 | ||
100 | /* Global disable flag - will be set in case of an error */ | |
621a5f7a | 101 | static bool global_disable __read_mostly; |
3b1e79ed | 102 | |
2ce8e7ed FF |
103 | /* Early initialization disable flag, set at the end of dma_debug_init */ |
104 | static bool dma_debug_initialized __read_mostly; | |
105 | ||
01ce18b3 FF |
106 | static inline bool dma_debug_disabled(void) |
107 | { | |
2ce8e7ed | 108 | return global_disable || !dma_debug_initialized; |
01ce18b3 FF |
109 | } |
110 | ||
788dcfa6 JR |
111 | /* Global error count */ |
112 | static u32 error_count; | |
113 | ||
114 | /* Global error show enable*/ | |
115 | static u32 show_all_errors __read_mostly; | |
116 | /* Number of errors to show */ | |
117 | static u32 show_num_errors = 1; | |
118 | ||
3b1e79ed JR |
119 | static u32 num_free_entries; |
120 | static u32 min_free_entries; | |
e6a1a89d | 121 | static u32 nr_total_entries; |
30dfa90c | 122 | |
59d3daaf | 123 | /* number of preallocated entries requested by kernel cmdline */ |
bcebe324 | 124 | static u32 nr_prealloc_entries = PREALLOC_DMA_DEBUG_ENTRIES; |
59d3daaf | 125 | |
2e507d84 JR |
126 | /* per-driver filter related state */ |
127 | ||
128 | #define NAME_MAX_LEN 64 | |
129 | ||
130 | static char current_driver_name[NAME_MAX_LEN] __read_mostly; | |
131 | static struct device_driver *current_driver __read_mostly; | |
132 | ||
133 | static DEFINE_RWLOCK(driver_name_lock); | |
788dcfa6 | 134 | |
6c9c6d63 SK |
135 | static const char *const maperr2str[] = { |
136 | [MAP_ERR_CHECK_NOT_APPLICABLE] = "dma map error check not applicable", | |
137 | [MAP_ERR_NOT_CHECKED] = "dma map error not checked", | |
138 | [MAP_ERR_CHECKED] = "dma map error checked", | |
139 | }; | |
140 | ||
9bb50ed7 GS |
141 | static const char *type2name[] = { |
142 | [dma_debug_single] = "single", | |
36d91e85 | 143 | [dma_debug_sg] = "scatter-gather", |
9bb50ed7 GS |
144 | [dma_debug_coherent] = "coherent", |
145 | [dma_debug_resource] = "resource", | |
146 | }; | |
2d62ece1 | 147 | |
23efed6f CH |
148 | static const char *dir2name[] = { |
149 | [DMA_BIDIRECTIONAL] = "DMA_BIDIRECTIONAL", | |
150 | [DMA_TO_DEVICE] = "DMA_TO_DEVICE", | |
151 | [DMA_FROM_DEVICE] = "DMA_FROM_DEVICE", | |
152 | [DMA_NONE] = "DMA_NONE", | |
153 | }; | |
2d62ece1 JR |
154 | |
155 | /* | |
156 | * The access to some variables in this macro is racy. We can't use atomic_t | |
157 | * here because all these variables are exported to debugfs. Some of them even | |
158 | * writeable. This is also the reason why a lock won't help much. But anyway, | |
159 | * the races are no big deal. Here is why: | |
160 | * | |
161 | * error_count: the addition is racy, but the worst thing that can happen is | |
162 | * that we don't count some errors | |
163 | * show_num_errors: the subtraction is racy. Also no big deal because in | |
164 | * worst case this will result in one warning more in the | |
165 | * system log than the user configured. This variable is | |
166 | * writeable via debugfs. | |
167 | */ | |
6c132d1b DW |
168 | static inline void dump_entry_trace(struct dma_debug_entry *entry) |
169 | { | |
170 | #ifdef CONFIG_STACKTRACE | |
171 | if (entry) { | |
fc65104c | 172 | pr_warn("Mapped at:\n"); |
746017ed | 173 | stack_trace_print(entry->stack_entries, entry->stack_len, 0); |
6c132d1b DW |
174 | } |
175 | #endif | |
176 | } | |
177 | ||
2e507d84 JR |
178 | static bool driver_filter(struct device *dev) |
179 | { | |
0bf84128 JR |
180 | struct device_driver *drv; |
181 | unsigned long flags; | |
182 | bool ret; | |
183 | ||
2e507d84 JR |
184 | /* driver filter off */ |
185 | if (likely(!current_driver_name[0])) | |
186 | return true; | |
187 | ||
188 | /* driver filter on and initialized */ | |
ec9c96ef | 189 | if (current_driver && dev && dev->driver == current_driver) |
2e507d84 JR |
190 | return true; |
191 | ||
ec9c96ef KM |
192 | /* driver filter on, but we can't filter on a NULL device... */ |
193 | if (!dev) | |
194 | return false; | |
195 | ||
0bf84128 JR |
196 | if (current_driver || !current_driver_name[0]) |
197 | return false; | |
2e507d84 | 198 | |
0bf84128 | 199 | /* driver filter on but not yet initialized */ |
f3ff9247 | 200 | drv = dev->driver; |
0bf84128 JR |
201 | if (!drv) |
202 | return false; | |
203 | ||
204 | /* lock to protect against change of current_driver_name */ | |
205 | read_lock_irqsave(&driver_name_lock, flags); | |
206 | ||
207 | ret = false; | |
208 | if (drv->name && | |
209 | strncmp(current_driver_name, drv->name, NAME_MAX_LEN - 1) == 0) { | |
210 | current_driver = drv; | |
211 | ret = true; | |
2e507d84 JR |
212 | } |
213 | ||
0bf84128 | 214 | read_unlock_irqrestore(&driver_name_lock, flags); |
0bf84128 JR |
215 | |
216 | return ret; | |
2e507d84 JR |
217 | } |
218 | ||
ec9c96ef KM |
219 | #define err_printk(dev, entry, format, arg...) do { \ |
220 | error_count += 1; \ | |
221 | if (driver_filter(dev) && \ | |
222 | (show_all_errors || show_num_errors > 0)) { \ | |
f737b095 | 223 | WARN(1, pr_fmt("%s %s: ") format, \ |
ec9c96ef KM |
224 | dev ? dev_driver_string(dev) : "NULL", \ |
225 | dev ? dev_name(dev) : "NULL", ## arg); \ | |
226 | dump_entry_trace(entry); \ | |
227 | } \ | |
228 | if (!show_all_errors && show_num_errors > 0) \ | |
229 | show_num_errors -= 1; \ | |
2d62ece1 JR |
230 | } while (0); |
231 | ||
30dfa90c JR |
232 | /* |
233 | * Hash related functions | |
234 | * | |
235 | * Every DMA-API request is saved into a struct dma_debug_entry. To | |
236 | * have quick access to these structs they are stored into a hash. | |
237 | */ | |
238 | static int hash_fn(struct dma_debug_entry *entry) | |
239 | { | |
240 | /* | |
241 | * Hash function is based on the dma address. | |
242 | * We use bits 20-27 here as the index into the hash | |
243 | */ | |
244 | return (entry->dev_addr >> HASH_FN_SHIFT) & HASH_FN_MASK; | |
245 | } | |
246 | ||
247 | /* | |
248 | * Request exclusive access to a hash bucket for a given dma_debug_entry. | |
249 | */ | |
250 | static struct hash_bucket *get_hash_bucket(struct dma_debug_entry *entry, | |
251 | unsigned long *flags) | |
d5dfc80f | 252 | __acquires(&dma_entry_hash[idx].lock) |
30dfa90c JR |
253 | { |
254 | int idx = hash_fn(entry); | |
255 | unsigned long __flags; | |
256 | ||
257 | spin_lock_irqsave(&dma_entry_hash[idx].lock, __flags); | |
258 | *flags = __flags; | |
259 | return &dma_entry_hash[idx]; | |
260 | } | |
261 | ||
262 | /* | |
263 | * Give up exclusive access to the hash bucket | |
264 | */ | |
265 | static void put_hash_bucket(struct hash_bucket *bucket, | |
50f579a2 | 266 | unsigned long flags) |
d5dfc80f | 267 | __releases(&bucket->lock) |
30dfa90c | 268 | { |
50f579a2 | 269 | spin_unlock_irqrestore(&bucket->lock, flags); |
30dfa90c JR |
270 | } |
271 | ||
c6a21d0b NH |
272 | static bool exact_match(struct dma_debug_entry *a, struct dma_debug_entry *b) |
273 | { | |
91ec37cc | 274 | return ((a->dev_addr == b->dev_addr) && |
c6a21d0b NH |
275 | (a->dev == b->dev)) ? true : false; |
276 | } | |
277 | ||
278 | static bool containing_match(struct dma_debug_entry *a, | |
279 | struct dma_debug_entry *b) | |
280 | { | |
281 | if (a->dev != b->dev) | |
282 | return false; | |
283 | ||
284 | if ((b->dev_addr <= a->dev_addr) && | |
285 | ((b->dev_addr + b->size) >= (a->dev_addr + a->size))) | |
286 | return true; | |
287 | ||
288 | return false; | |
289 | } | |
290 | ||
30dfa90c JR |
291 | /* |
292 | * Search a given entry in the hash bucket list | |
293 | */ | |
c6a21d0b NH |
294 | static struct dma_debug_entry *__hash_bucket_find(struct hash_bucket *bucket, |
295 | struct dma_debug_entry *ref, | |
296 | match_fn match) | |
30dfa90c | 297 | { |
7caf6a49 | 298 | struct dma_debug_entry *entry, *ret = NULL; |
fe73fbe1 | 299 | int matches = 0, match_lvl, last_lvl = -1; |
30dfa90c JR |
300 | |
301 | list_for_each_entry(entry, &bucket->list, list) { | |
c6a21d0b | 302 | if (!match(ref, entry)) |
7caf6a49 JR |
303 | continue; |
304 | ||
305 | /* | |
306 | * Some drivers map the same physical address multiple | |
307 | * times. Without a hardware IOMMU this results in the | |
308 | * same device addresses being put into the dma-debug | |
309 | * hash multiple times too. This can result in false | |
af901ca1 | 310 | * positives being reported. Therefore we implement a |
7caf6a49 JR |
311 | * best-fit algorithm here which returns the entry from |
312 | * the hash which fits best to the reference value | |
313 | * instead of the first-fit. | |
314 | */ | |
315 | matches += 1; | |
316 | match_lvl = 0; | |
e5e8c5b9 JR |
317 | entry->size == ref->size ? ++match_lvl : 0; |
318 | entry->type == ref->type ? ++match_lvl : 0; | |
319 | entry->direction == ref->direction ? ++match_lvl : 0; | |
320 | entry->sg_call_ents == ref->sg_call_ents ? ++match_lvl : 0; | |
7caf6a49 | 321 | |
e5e8c5b9 | 322 | if (match_lvl == 4) { |
7caf6a49 | 323 | /* perfect-fit - return the result */ |
30dfa90c | 324 | return entry; |
7caf6a49 JR |
325 | } else if (match_lvl > last_lvl) { |
326 | /* | |
327 | * We found an entry that fits better then the | |
fe73fbe1 | 328 | * previous one or it is the 1st match. |
7caf6a49 JR |
329 | */ |
330 | last_lvl = match_lvl; | |
331 | ret = entry; | |
332 | } | |
30dfa90c JR |
333 | } |
334 | ||
7caf6a49 JR |
335 | /* |
336 | * If we have multiple matches but no perfect-fit, just return | |
337 | * NULL. | |
338 | */ | |
339 | ret = (matches == 1) ? ret : NULL; | |
340 | ||
341 | return ret; | |
30dfa90c JR |
342 | } |
343 | ||
c6a21d0b NH |
344 | static struct dma_debug_entry *bucket_find_exact(struct hash_bucket *bucket, |
345 | struct dma_debug_entry *ref) | |
346 | { | |
347 | return __hash_bucket_find(bucket, ref, exact_match); | |
348 | } | |
349 | ||
350 | static struct dma_debug_entry *bucket_find_contain(struct hash_bucket **bucket, | |
351 | struct dma_debug_entry *ref, | |
352 | unsigned long *flags) | |
353 | { | |
354 | ||
c6a21d0b | 355 | struct dma_debug_entry *entry, index = *ref; |
2995b800 | 356 | int limit = min(HASH_SIZE, (index.dev_addr >> HASH_FN_SHIFT) + 1); |
c6a21d0b | 357 | |
2995b800 | 358 | for (int i = 0; i < limit; i++) { |
a7a2c02a | 359 | entry = __hash_bucket_find(*bucket, ref, containing_match); |
c6a21d0b NH |
360 | |
361 | if (entry) | |
362 | return entry; | |
363 | ||
364 | /* | |
365 | * Nothing found, go back a hash bucket | |
366 | */ | |
50f579a2 | 367 | put_hash_bucket(*bucket, *flags); |
c6a21d0b NH |
368 | index.dev_addr -= (1 << HASH_FN_SHIFT); |
369 | *bucket = get_hash_bucket(&index, flags); | |
370 | } | |
371 | ||
372 | return NULL; | |
373 | } | |
374 | ||
30dfa90c JR |
375 | /* |
376 | * Add an entry to a hash bucket | |
377 | */ | |
378 | static void hash_bucket_add(struct hash_bucket *bucket, | |
379 | struct dma_debug_entry *entry) | |
380 | { | |
381 | list_add_tail(&entry->list, &bucket->list); | |
382 | } | |
383 | ||
384 | /* | |
385 | * Remove entry from a hash bucket list | |
386 | */ | |
387 | static void hash_bucket_del(struct dma_debug_entry *entry) | |
388 | { | |
389 | list_del(&entry->list); | |
390 | } | |
391 | ||
0abdd7a8 DW |
392 | static unsigned long long phys_addr(struct dma_debug_entry *entry) |
393 | { | |
0e74b34d NS |
394 | if (entry->type == dma_debug_resource) |
395 | return __pfn_to_phys(entry->pfn) + entry->offset; | |
396 | ||
0abdd7a8 DW |
397 | return page_to_phys(pfn_to_page(entry->pfn)) + entry->offset; |
398 | } | |
399 | ||
0abdd7a8 | 400 | /* |
3b7a6418 DW |
401 | * For each mapping (initial cacheline in the case of |
402 | * dma_alloc_coherent/dma_map_page, initial cacheline in each page of a | |
403 | * scatterlist, or the cacheline specified in dma_map_single) insert | |
404 | * into this tree using the cacheline as the key. At | |
0abdd7a8 | 405 | * dma_unmap_{single|sg|page} or dma_free_coherent delete the entry. If |
3b7a6418 | 406 | * the entry already exists at insertion time add a tag as a reference |
0abdd7a8 | 407 | * count for the overlapping mappings. For now, the overlap tracking |
3b7a6418 DW |
408 | * just ensures that 'unmaps' balance 'maps' before marking the |
409 | * cacheline idle, but we should also be flagging overlaps as an API | |
410 | * violation. | |
0abdd7a8 DW |
411 | * |
412 | * Memory usage is mostly constrained by the maximum number of available | |
413 | * dma-debug entries in that we need a free dma_debug_entry before | |
3b7a6418 DW |
414 | * inserting into the tree. In the case of dma_map_page and |
415 | * dma_alloc_coherent there is only one dma_debug_entry and one | |
416 | * dma_active_cacheline entry to track per event. dma_map_sg(), on the | |
417 | * other hand, consumes a single dma_debug_entry, but inserts 'nents' | |
418 | * entries into the tree. | |
0abdd7a8 | 419 | */ |
84bc4f1d | 420 | static RADIX_TREE(dma_active_cacheline, GFP_ATOMIC); |
0abdd7a8 | 421 | static DEFINE_SPINLOCK(radix_lock); |
3b7a6418 DW |
422 | #define ACTIVE_CACHELINE_MAX_OVERLAP ((1 << RADIX_TREE_MAX_TAGS) - 1) |
423 | #define CACHELINE_PER_PAGE_SHIFT (PAGE_SHIFT - L1_CACHE_SHIFT) | |
424 | #define CACHELINES_PER_PAGE (1 << CACHELINE_PER_PAGE_SHIFT) | |
0abdd7a8 | 425 | |
3b7a6418 DW |
426 | static phys_addr_t to_cacheline_number(struct dma_debug_entry *entry) |
427 | { | |
428 | return (entry->pfn << CACHELINE_PER_PAGE_SHIFT) + | |
429 | (entry->offset >> L1_CACHE_SHIFT); | |
430 | } | |
431 | ||
432 | static int active_cacheline_read_overlap(phys_addr_t cln) | |
0abdd7a8 DW |
433 | { |
434 | int overlap = 0, i; | |
435 | ||
436 | for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--) | |
3b7a6418 | 437 | if (radix_tree_tag_get(&dma_active_cacheline, cln, i)) |
0abdd7a8 DW |
438 | overlap |= 1 << i; |
439 | return overlap; | |
440 | } | |
441 | ||
3b7a6418 | 442 | static int active_cacheline_set_overlap(phys_addr_t cln, int overlap) |
0abdd7a8 DW |
443 | { |
444 | int i; | |
445 | ||
3b7a6418 | 446 | if (overlap > ACTIVE_CACHELINE_MAX_OVERLAP || overlap < 0) |
59f2e7df | 447 | return overlap; |
0abdd7a8 DW |
448 | |
449 | for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--) | |
450 | if (overlap & 1 << i) | |
3b7a6418 | 451 | radix_tree_tag_set(&dma_active_cacheline, cln, i); |
0abdd7a8 | 452 | else |
3b7a6418 | 453 | radix_tree_tag_clear(&dma_active_cacheline, cln, i); |
0abdd7a8 DW |
454 | |
455 | return overlap; | |
456 | } | |
457 | ||
3b7a6418 | 458 | static void active_cacheline_inc_overlap(phys_addr_t cln) |
0abdd7a8 | 459 | { |
3b7a6418 | 460 | int overlap = active_cacheline_read_overlap(cln); |
0abdd7a8 | 461 | |
3b7a6418 | 462 | overlap = active_cacheline_set_overlap(cln, ++overlap); |
0abdd7a8 DW |
463 | |
464 | /* If we overflowed the overlap counter then we're potentially | |
5848dc5b | 465 | * leaking dma-mappings. |
0abdd7a8 | 466 | */ |
3b7a6418 | 467 | WARN_ONCE(overlap > ACTIVE_CACHELINE_MAX_OVERLAP, |
f737b095 | 468 | pr_fmt("exceeded %d overlapping mappings of cacheline %pa\n"), |
3b7a6418 | 469 | ACTIVE_CACHELINE_MAX_OVERLAP, &cln); |
0abdd7a8 DW |
470 | } |
471 | ||
3b7a6418 | 472 | static int active_cacheline_dec_overlap(phys_addr_t cln) |
0abdd7a8 | 473 | { |
3b7a6418 | 474 | int overlap = active_cacheline_read_overlap(cln); |
0abdd7a8 | 475 | |
3b7a6418 | 476 | return active_cacheline_set_overlap(cln, --overlap); |
0abdd7a8 DW |
477 | } |
478 | ||
3b7a6418 | 479 | static int active_cacheline_insert(struct dma_debug_entry *entry) |
0abdd7a8 | 480 | { |
3b7a6418 | 481 | phys_addr_t cln = to_cacheline_number(entry); |
0abdd7a8 DW |
482 | unsigned long flags; |
483 | int rc; | |
484 | ||
3b7a6418 DW |
485 | /* If the device is not writing memory then we don't have any |
486 | * concerns about the cpu consuming stale data. This mitigates | |
487 | * legitimate usages of overlapping mappings. | |
488 | */ | |
489 | if (entry->direction == DMA_TO_DEVICE) | |
490 | return 0; | |
491 | ||
0abdd7a8 | 492 | spin_lock_irqsave(&radix_lock, flags); |
3b7a6418 | 493 | rc = radix_tree_insert(&dma_active_cacheline, cln, entry); |
0abdd7a8 | 494 | if (rc == -EEXIST) |
3b7a6418 | 495 | active_cacheline_inc_overlap(cln); |
0abdd7a8 DW |
496 | spin_unlock_irqrestore(&radix_lock, flags); |
497 | ||
498 | return rc; | |
499 | } | |
500 | ||
3b7a6418 | 501 | static void active_cacheline_remove(struct dma_debug_entry *entry) |
0abdd7a8 | 502 | { |
3b7a6418 | 503 | phys_addr_t cln = to_cacheline_number(entry); |
0abdd7a8 DW |
504 | unsigned long flags; |
505 | ||
3b7a6418 DW |
506 | /* ...mirror the insert case */ |
507 | if (entry->direction == DMA_TO_DEVICE) | |
508 | return; | |
509 | ||
0abdd7a8 | 510 | spin_lock_irqsave(&radix_lock, flags); |
59f2e7df | 511 | /* since we are counting overlaps the final put of the |
3b7a6418 DW |
512 | * cacheline will occur when the overlap count is 0. |
513 | * active_cacheline_dec_overlap() returns -1 in that case | |
59f2e7df | 514 | */ |
3b7a6418 DW |
515 | if (active_cacheline_dec_overlap(cln) < 0) |
516 | radix_tree_delete(&dma_active_cacheline, cln); | |
0abdd7a8 DW |
517 | spin_unlock_irqrestore(&radix_lock, flags); |
518 | } | |
519 | ||
bd89d69a DN |
520 | /* |
521 | * Dump mappings entries on kernel space for debugging purposes | |
522 | */ | |
523 | void debug_dma_dump_mappings(struct device *dev) | |
524 | { | |
525 | int idx; | |
526 | phys_addr_t cln; | |
527 | ||
528 | for (idx = 0; idx < HASH_SIZE; idx++) { | |
529 | struct hash_bucket *bucket = &dma_entry_hash[idx]; | |
530 | struct dma_debug_entry *entry; | |
531 | unsigned long flags; | |
532 | ||
533 | spin_lock_irqsave(&bucket->lock, flags); | |
534 | list_for_each_entry(entry, &bucket->list, list) { | |
535 | if (!dev || dev == entry->dev) { | |
536 | cln = to_cacheline_number(entry); | |
537 | dev_info(entry->dev, | |
b31507dc | 538 | "%s idx %d P=%llx N=%lx D=%llx L=%llx cln=%pa %s %s\n", |
bd89d69a DN |
539 | type2name[entry->type], idx, |
540 | phys_addr(entry), entry->pfn, | |
541 | entry->dev_addr, entry->size, | |
b31507dc | 542 | &cln, dir2name[entry->direction], |
bd89d69a DN |
543 | maperr2str[entry->map_err_type]); |
544 | } | |
545 | } | |
546 | spin_unlock_irqrestore(&bucket->lock, flags); | |
547 | ||
548 | cond_resched(); | |
549 | } | |
550 | } | |
551 | ||
552 | /* | |
553 | * Dump mappings entries on user space via debugfs | |
554 | */ | |
555 | static int dump_show(struct seq_file *seq, void *v) | |
556 | { | |
557 | int idx; | |
558 | phys_addr_t cln; | |
559 | ||
560 | for (idx = 0; idx < HASH_SIZE; idx++) { | |
561 | struct hash_bucket *bucket = &dma_entry_hash[idx]; | |
562 | struct dma_debug_entry *entry; | |
563 | unsigned long flags; | |
564 | ||
565 | spin_lock_irqsave(&bucket->lock, flags); | |
566 | list_for_each_entry(entry, &bucket->list, list) { | |
567 | cln = to_cacheline_number(entry); | |
568 | seq_printf(seq, | |
b31507dc | 569 | "%s %s %s idx %d P=%llx N=%lx D=%llx L=%llx cln=%pa %s %s\n", |
bd89d69a DN |
570 | dev_driver_string(entry->dev), |
571 | dev_name(entry->dev), | |
572 | type2name[entry->type], idx, | |
573 | phys_addr(entry), entry->pfn, | |
574 | entry->dev_addr, entry->size, | |
b31507dc | 575 | &cln, dir2name[entry->direction], |
bd89d69a DN |
576 | maperr2str[entry->map_err_type]); |
577 | } | |
578 | spin_unlock_irqrestore(&bucket->lock, flags); | |
579 | } | |
580 | return 0; | |
581 | } | |
582 | DEFINE_SHOW_ATTRIBUTE(dump); | |
583 | ||
30dfa90c JR |
584 | /* |
585 | * Wrapper function for adding an entry to the hash. | |
586 | * This function takes care of locking itself. | |
587 | */ | |
c2bbf9d1 | 588 | static void add_dma_entry(struct dma_debug_entry *entry, unsigned long attrs) |
30dfa90c JR |
589 | { |
590 | struct hash_bucket *bucket; | |
591 | unsigned long flags; | |
0abdd7a8 | 592 | int rc; |
30dfa90c JR |
593 | |
594 | bucket = get_hash_bucket(entry, &flags); | |
595 | hash_bucket_add(bucket, entry); | |
50f579a2 | 596 | put_hash_bucket(bucket, flags); |
0abdd7a8 | 597 | |
3b7a6418 | 598 | rc = active_cacheline_insert(entry); |
0abdd7a8 | 599 | if (rc == -ENOMEM) { |
e19f8fa6 | 600 | pr_err_once("cacheline tracking ENOMEM, dma-debug disabled\n"); |
0abdd7a8 | 601 | global_disable = true; |
c2bbf9d1 | 602 | } else if (rc == -EEXIST && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) { |
510e1a72 HM |
603 | err_printk(entry->dev, entry, |
604 | "cacheline tracking EEXIST, overlapping mappings aren't supported\n"); | |
0abdd7a8 | 605 | } |
30dfa90c JR |
606 | } |
607 | ||
ad78dee0 | 608 | static int dma_debug_create_entries(gfp_t gfp) |
2b9d9ac0 | 609 | { |
ad78dee0 | 610 | struct dma_debug_entry *entry; |
2b9d9ac0 RM |
611 | int i; |
612 | ||
ad78dee0 RM |
613 | entry = (void *)get_zeroed_page(gfp); |
614 | if (!entry) | |
615 | return -ENOMEM; | |
2b9d9ac0 | 616 | |
ad78dee0 RM |
617 | for (i = 0; i < DMA_DEBUG_DYNAMIC_ENTRIES; i++) |
618 | list_add_tail(&entry[i].list, &free_entries); | |
2b9d9ac0 | 619 | |
ad78dee0 RM |
620 | num_free_entries += DMA_DEBUG_DYNAMIC_ENTRIES; |
621 | nr_total_entries += DMA_DEBUG_DYNAMIC_ENTRIES; | |
2b9d9ac0 RM |
622 | |
623 | return 0; | |
2b9d9ac0 RM |
624 | } |
625 | ||
e6a1a89d FT |
626 | static struct dma_debug_entry *__dma_entry_alloc(void) |
627 | { | |
628 | struct dma_debug_entry *entry; | |
629 | ||
630 | entry = list_entry(free_entries.next, struct dma_debug_entry, list); | |
631 | list_del(&entry->list); | |
632 | memset(entry, 0, sizeof(*entry)); | |
633 | ||
634 | num_free_entries -= 1; | |
635 | if (num_free_entries < min_free_entries) | |
636 | min_free_entries = num_free_entries; | |
637 | ||
638 | return entry; | |
639 | } | |
640 | ||
fb5a4315 SS |
641 | /* |
642 | * This should be called outside of free_entries_lock scope to avoid potential | |
643 | * deadlocks with serial consoles that use DMA. | |
644 | */ | |
645 | static void __dma_entry_alloc_check_leak(u32 nr_entries) | |
ceb51173 | 646 | { |
fb5a4315 | 647 | u32 tmp = nr_entries % nr_prealloc_entries; |
ceb51173 RM |
648 | |
649 | /* Shout each time we tick over some multiple of the initial pool */ | |
650 | if (tmp < DMA_DEBUG_DYNAMIC_ENTRIES) { | |
651 | pr_info("dma_debug_entry pool grown to %u (%u00%%)\n", | |
fb5a4315 SS |
652 | nr_entries, |
653 | (nr_entries / nr_prealloc_entries)); | |
ceb51173 RM |
654 | } |
655 | } | |
656 | ||
3b1e79ed JR |
657 | /* struct dma_entry allocator |
658 | * | |
659 | * The next two functions implement the allocator for | |
660 | * struct dma_debug_entries. | |
661 | */ | |
662 | static struct dma_debug_entry *dma_entry_alloc(void) | |
663 | { | |
fb5a4315 | 664 | bool alloc_check_leak = false; |
29cdd4e4 | 665 | struct dma_debug_entry *entry; |
3b1e79ed | 666 | unsigned long flags; |
fb5a4315 | 667 | u32 nr_entries; |
3b1e79ed JR |
668 | |
669 | spin_lock_irqsave(&free_entries_lock, flags); | |
2b9d9ac0 | 670 | if (num_free_entries == 0) { |
ad78dee0 | 671 | if (dma_debug_create_entries(GFP_ATOMIC)) { |
2b9d9ac0 RM |
672 | global_disable = true; |
673 | spin_unlock_irqrestore(&free_entries_lock, flags); | |
674 | pr_err("debugging out of memory - disabling\n"); | |
675 | return NULL; | |
676 | } | |
fb5a4315 SS |
677 | alloc_check_leak = true; |
678 | nr_entries = nr_total_entries; | |
3b1e79ed JR |
679 | } |
680 | ||
e6a1a89d | 681 | entry = __dma_entry_alloc(); |
3b1e79ed | 682 | |
29cdd4e4 JK |
683 | spin_unlock_irqrestore(&free_entries_lock, flags); |
684 | ||
fb5a4315 SS |
685 | if (alloc_check_leak) |
686 | __dma_entry_alloc_check_leak(nr_entries); | |
687 | ||
6c132d1b | 688 | #ifdef CONFIG_STACKTRACE |
746017ed TG |
689 | entry->stack_len = stack_trace_save(entry->stack_entries, |
690 | ARRAY_SIZE(entry->stack_entries), | |
691 | 1); | |
6c132d1b | 692 | #endif |
3b1e79ed JR |
693 | return entry; |
694 | } | |
695 | ||
696 | static void dma_entry_free(struct dma_debug_entry *entry) | |
697 | { | |
698 | unsigned long flags; | |
699 | ||
3b7a6418 | 700 | active_cacheline_remove(entry); |
0abdd7a8 | 701 | |
3b1e79ed JR |
702 | /* |
703 | * add to beginning of the list - this way the entries are | |
704 | * more likely cache hot when they are reallocated. | |
705 | */ | |
706 | spin_lock_irqsave(&free_entries_lock, flags); | |
707 | list_add(&entry->list, &free_entries); | |
708 | num_free_entries += 1; | |
709 | spin_unlock_irqrestore(&free_entries_lock, flags); | |
710 | } | |
711 | ||
6bf07871 JR |
712 | /* |
713 | * DMA-API debugging init code | |
714 | * | |
715 | * The init code does two things: | |
716 | * 1. Initialize core data structures | |
717 | * 2. Preallocate a given number of dma_debug_entry structs | |
718 | */ | |
719 | ||
8a6fc708 JR |
720 | static ssize_t filter_read(struct file *file, char __user *user_buf, |
721 | size_t count, loff_t *ppos) | |
722 | { | |
8a6fc708 | 723 | char buf[NAME_MAX_LEN + 1]; |
c17e2cf7 | 724 | unsigned long flags; |
8a6fc708 JR |
725 | int len; |
726 | ||
727 | if (!current_driver_name[0]) | |
728 | return 0; | |
729 | ||
730 | /* | |
731 | * We can't copy to userspace directly because current_driver_name can | |
732 | * only be read under the driver_name_lock with irqs disabled. So | |
733 | * create a temporary copy first. | |
734 | */ | |
735 | read_lock_irqsave(&driver_name_lock, flags); | |
736 | len = scnprintf(buf, NAME_MAX_LEN + 1, "%s\n", current_driver_name); | |
737 | read_unlock_irqrestore(&driver_name_lock, flags); | |
738 | ||
739 | return simple_read_from_buffer(user_buf, count, ppos, buf, len); | |
740 | } | |
741 | ||
742 | static ssize_t filter_write(struct file *file, const char __user *userbuf, | |
743 | size_t count, loff_t *ppos) | |
744 | { | |
8a6fc708 | 745 | char buf[NAME_MAX_LEN]; |
c17e2cf7 JR |
746 | unsigned long flags; |
747 | size_t len; | |
8a6fc708 JR |
748 | int i; |
749 | ||
750 | /* | |
751 | * We can't copy from userspace directly. Access to | |
752 | * current_driver_name is protected with a write_lock with irqs | |
753 | * disabled. Since copy_from_user can fault and may sleep we | |
754 | * need to copy to temporary buffer first | |
755 | */ | |
e7ed70ee | 756 | len = min(count, (size_t)(NAME_MAX_LEN - 1)); |
8a6fc708 JR |
757 | if (copy_from_user(buf, userbuf, len)) |
758 | return -EFAULT; | |
759 | ||
760 | buf[len] = 0; | |
761 | ||
762 | write_lock_irqsave(&driver_name_lock, flags); | |
763 | ||
31232509 JR |
764 | /* |
765 | * Now handle the string we got from userspace very carefully. | |
8a6fc708 JR |
766 | * The rules are: |
767 | * - only use the first token we got | |
768 | * - token delimiter is everything looking like a space | |
769 | * character (' ', '\n', '\t' ...) | |
770 | * | |
771 | */ | |
772 | if (!isalnum(buf[0])) { | |
773 | /* | |
31232509 | 774 | * If the first character userspace gave us is not |
8a6fc708 JR |
775 | * alphanumerical then assume the filter should be |
776 | * switched off. | |
777 | */ | |
778 | if (current_driver_name[0]) | |
f737b095 | 779 | pr_info("switching off dma-debug driver filter\n"); |
8a6fc708 JR |
780 | current_driver_name[0] = 0; |
781 | current_driver = NULL; | |
782 | goto out_unlock; | |
783 | } | |
784 | ||
785 | /* | |
786 | * Now parse out the first token and use it as the name for the | |
787 | * driver to filter for. | |
788 | */ | |
39a37ce1 | 789 | for (i = 0; i < NAME_MAX_LEN - 1; ++i) { |
8a6fc708 JR |
790 | current_driver_name[i] = buf[i]; |
791 | if (isspace(buf[i]) || buf[i] == ' ' || buf[i] == 0) | |
792 | break; | |
793 | } | |
794 | current_driver_name[i] = 0; | |
795 | current_driver = NULL; | |
796 | ||
f737b095 | 797 | pr_info("enable driver filter for driver [%s]\n", |
e7ed70ee | 798 | current_driver_name); |
8a6fc708 JR |
799 | |
800 | out_unlock: | |
801 | write_unlock_irqrestore(&driver_name_lock, flags); | |
802 | ||
803 | return count; | |
804 | } | |
805 | ||
aeb583d0 | 806 | static const struct file_operations filter_fops = { |
8a6fc708 JR |
807 | .read = filter_read, |
808 | .write = filter_write, | |
6038f373 | 809 | .llseek = default_llseek, |
8a6fc708 JR |
810 | }; |
811 | ||
173735c3 | 812 | static int __init dma_debug_fs_init(void) |
788dcfa6 | 813 | { |
8e4d81b9 | 814 | struct dentry *dentry = debugfs_create_dir("dma-api", NULL); |
788dcfa6 | 815 | |
8e4d81b9 GKH |
816 | debugfs_create_bool("disabled", 0444, dentry, &global_disable); |
817 | debugfs_create_u32("error_count", 0444, dentry, &error_count); | |
818 | debugfs_create_u32("all_errors", 0644, dentry, &show_all_errors); | |
819 | debugfs_create_u32("num_errors", 0644, dentry, &show_num_errors); | |
820 | debugfs_create_u32("num_free_entries", 0444, dentry, &num_free_entries); | |
821 | debugfs_create_u32("min_free_entries", 0444, dentry, &min_free_entries); | |
822 | debugfs_create_u32("nr_total_entries", 0444, dentry, &nr_total_entries); | |
823 | debugfs_create_file("driver_filter", 0644, dentry, NULL, &filter_fops); | |
0a3b192c | 824 | debugfs_create_file("dump", 0444, dentry, NULL, &dump_fops); |
173735c3 AI |
825 | |
826 | return 0; | |
788dcfa6 | 827 | } |
173735c3 | 828 | core_initcall_sync(dma_debug_fs_init); |
788dcfa6 | 829 | |
ba4b87ad | 830 | static int device_dma_allocations(struct device *dev, struct dma_debug_entry **out_entry) |
ed888aef JR |
831 | { |
832 | struct dma_debug_entry *entry; | |
833 | unsigned long flags; | |
834 | int count = 0, i; | |
835 | ||
836 | for (i = 0; i < HASH_SIZE; ++i) { | |
6a5cd60b | 837 | spin_lock_irqsave(&dma_entry_hash[i].lock, flags); |
ed888aef | 838 | list_for_each_entry(entry, &dma_entry_hash[i].list, list) { |
ba4b87ad | 839 | if (entry->dev == dev) { |
ed888aef | 840 | count += 1; |
ba4b87ad SG |
841 | *out_entry = entry; |
842 | } | |
ed888aef | 843 | } |
6a5cd60b | 844 | spin_unlock_irqrestore(&dma_entry_hash[i].lock, flags); |
ed888aef JR |
845 | } |
846 | ||
847 | return count; | |
848 | } | |
849 | ||
a8fe9ea2 | 850 | static int dma_debug_device_change(struct notifier_block *nb, unsigned long action, void *data) |
ed888aef JR |
851 | { |
852 | struct device *dev = data; | |
3f649ab7 | 853 | struct dma_debug_entry *entry; |
ed888aef JR |
854 | int count; |
855 | ||
01ce18b3 | 856 | if (dma_debug_disabled()) |
a8fe9ea2 | 857 | return 0; |
ed888aef JR |
858 | |
859 | switch (action) { | |
860 | case BUS_NOTIFY_UNBOUND_DRIVER: | |
ba4b87ad | 861 | count = device_dma_allocations(dev, &entry); |
ed888aef JR |
862 | if (count == 0) |
863 | break; | |
f737b095 | 864 | err_printk(dev, entry, "device driver has pending " |
ed888aef | 865 | "DMA allocations while released from device " |
ba4b87ad SG |
866 | "[count=%d]\n" |
867 | "One of leaked entries details: " | |
868 | "[device address=0x%016llx] [size=%llu bytes] " | |
869 | "[mapped with %s] [mapped as %s]\n", | |
870 | count, entry->dev_addr, entry->size, | |
871 | dir2name[entry->direction], type2name[entry->type]); | |
ed888aef JR |
872 | break; |
873 | default: | |
874 | break; | |
875 | } | |
876 | ||
877 | return 0; | |
878 | } | |
879 | ||
86438841 | 880 | void dma_debug_add_bus(const struct bus_type *bus) |
41531c8f | 881 | { |
ed888aef JR |
882 | struct notifier_block *nb; |
883 | ||
01ce18b3 | 884 | if (dma_debug_disabled()) |
f797d988 SR |
885 | return; |
886 | ||
ed888aef JR |
887 | nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL); |
888 | if (nb == NULL) { | |
e7ed70ee | 889 | pr_err("dma_debug_add_bus: out of memory\n"); |
ed888aef JR |
890 | return; |
891 | } | |
892 | ||
893 | nb->notifier_call = dma_debug_device_change; | |
894 | ||
895 | bus_register_notifier(bus, nb); | |
41531c8f | 896 | } |
788dcfa6 | 897 | |
15b28bbc | 898 | static int dma_debug_init(void) |
6bf07871 | 899 | { |
ad78dee0 | 900 | int i, nr_pages; |
6bf07871 | 901 | |
2ce8e7ed FF |
902 | /* Do not use dma_debug_initialized here, since we really want to be |
903 | * called to set dma_debug_initialized | |
904 | */ | |
905 | if (global_disable) | |
15b28bbc | 906 | return 0; |
6bf07871 JR |
907 | |
908 | for (i = 0; i < HASH_SIZE; ++i) { | |
909 | INIT_LIST_HEAD(&dma_entry_hash[i].list); | |
b0a5b83e | 910 | spin_lock_init(&dma_entry_hash[i].lock); |
6bf07871 JR |
911 | } |
912 | ||
ad78dee0 RM |
913 | nr_pages = DIV_ROUND_UP(nr_prealloc_entries, DMA_DEBUG_DYNAMIC_ENTRIES); |
914 | for (i = 0; i < nr_pages; ++i) | |
915 | dma_debug_create_entries(GFP_KERNEL); | |
916 | if (num_free_entries >= nr_prealloc_entries) { | |
917 | pr_info("preallocated %d debug entries\n", nr_total_entries); | |
918 | } else if (num_free_entries > 0) { | |
919 | pr_warn("%d debug entries requested but only %d allocated\n", | |
920 | nr_prealloc_entries, nr_total_entries); | |
921 | } else { | |
f737b095 | 922 | pr_err("debugging out of memory error - disabled\n"); |
6bf07871 JR |
923 | global_disable = true; |
924 | ||
15b28bbc | 925 | return 0; |
6bf07871 | 926 | } |
2b9d9ac0 | 927 | min_free_entries = num_free_entries; |
e6a1a89d | 928 | |
2ce8e7ed FF |
929 | dma_debug_initialized = true; |
930 | ||
f737b095 | 931 | pr_info("debugging enabled by kernel config\n"); |
15b28bbc | 932 | return 0; |
6bf07871 | 933 | } |
15b28bbc | 934 | core_initcall(dma_debug_init); |
6bf07871 | 935 | |
59d3daaf JR |
936 | static __init int dma_debug_cmdline(char *str) |
937 | { | |
938 | if (!str) | |
939 | return -EINVAL; | |
940 | ||
941 | if (strncmp(str, "off", 3) == 0) { | |
f737b095 | 942 | pr_info("debugging disabled on kernel command line\n"); |
59d3daaf JR |
943 | global_disable = true; |
944 | } | |
945 | ||
80e43909 | 946 | return 1; |
59d3daaf JR |
947 | } |
948 | ||
949 | static __init int dma_debug_entries_cmdline(char *str) | |
950 | { | |
59d3daaf JR |
951 | if (!str) |
952 | return -EINVAL; | |
bcebe324 CH |
953 | if (!get_option(&str, &nr_prealloc_entries)) |
954 | nr_prealloc_entries = PREALLOC_DMA_DEBUG_ENTRIES; | |
80e43909 | 955 | return 1; |
59d3daaf JR |
956 | } |
957 | ||
958 | __setup("dma_debug=", dma_debug_cmdline); | |
959 | __setup("dma_debug_entries=", dma_debug_entries_cmdline); | |
960 | ||
2d62ece1 JR |
961 | static void check_unmap(struct dma_debug_entry *ref) |
962 | { | |
963 | struct dma_debug_entry *entry; | |
964 | struct hash_bucket *bucket; | |
965 | unsigned long flags; | |
966 | ||
2d62ece1 | 967 | bucket = get_hash_bucket(ref, &flags); |
c6a21d0b | 968 | entry = bucket_find_exact(bucket, ref); |
2d62ece1 JR |
969 | |
970 | if (!entry) { | |
8d640a51 | 971 | /* must drop lock before calling dma_mapping_error */ |
50f579a2 | 972 | put_hash_bucket(bucket, flags); |
8d640a51 | 973 | |
bfe0fb0f SK |
974 | if (dma_mapping_error(ref->dev, ref->dev_addr)) { |
975 | err_printk(ref->dev, NULL, | |
f737b095 | 976 | "device driver tries to free an " |
8d640a51 AD |
977 | "invalid DMA memory address\n"); |
978 | } else { | |
979 | err_printk(ref->dev, NULL, | |
f737b095 | 980 | "device driver tries to free DMA " |
8d640a51 AD |
981 | "memory it has not allocated [device " |
982 | "address=0x%016llx] [size=%llu bytes]\n", | |
983 | ref->dev_addr, ref->size); | |
bfe0fb0f | 984 | } |
8d640a51 | 985 | return; |
2d62ece1 JR |
986 | } |
987 | ||
988 | if (ref->size != entry->size) { | |
f737b095 | 989 | err_printk(ref->dev, entry, "device driver frees " |
2d62ece1 JR |
990 | "DMA memory with different size " |
991 | "[device address=0x%016llx] [map size=%llu bytes] " | |
992 | "[unmap size=%llu bytes]\n", | |
993 | ref->dev_addr, entry->size, ref->size); | |
994 | } | |
995 | ||
996 | if (ref->type != entry->type) { | |
f737b095 | 997 | err_printk(ref->dev, entry, "device driver frees " |
2d62ece1 JR |
998 | "DMA memory with wrong function " |
999 | "[device address=0x%016llx] [size=%llu bytes] " | |
1000 | "[mapped as %s] [unmapped as %s]\n", | |
1001 | ref->dev_addr, ref->size, | |
1002 | type2name[entry->type], type2name[ref->type]); | |
1003 | } else if ((entry->type == dma_debug_coherent) && | |
0abdd7a8 | 1004 | (phys_addr(ref) != phys_addr(entry))) { |
f737b095 | 1005 | err_printk(ref->dev, entry, "device driver frees " |
2d62ece1 JR |
1006 | "DMA memory with different CPU address " |
1007 | "[device address=0x%016llx] [size=%llu bytes] " | |
59a40e70 JR |
1008 | "[cpu alloc address=0x%016llx] " |
1009 | "[cpu free address=0x%016llx]", | |
2d62ece1 | 1010 | ref->dev_addr, ref->size, |
0abdd7a8 DW |
1011 | phys_addr(entry), |
1012 | phys_addr(ref)); | |
2d62ece1 JR |
1013 | } |
1014 | ||
1015 | if (ref->sg_call_ents && ref->type == dma_debug_sg && | |
1016 | ref->sg_call_ents != entry->sg_call_ents) { | |
f737b095 | 1017 | err_printk(ref->dev, entry, "device driver frees " |
2d62ece1 JR |
1018 | "DMA sg list with different entry count " |
1019 | "[map count=%d] [unmap count=%d]\n", | |
1020 | entry->sg_call_ents, ref->sg_call_ents); | |
1021 | } | |
1022 | ||
1023 | /* | |
1024 | * This may be no bug in reality - but most implementations of the | |
1025 | * DMA API don't handle this properly, so check for it here | |
1026 | */ | |
1027 | if (ref->direction != entry->direction) { | |
f737b095 | 1028 | err_printk(ref->dev, entry, "device driver frees " |
2d62ece1 JR |
1029 | "DMA memory with different direction " |
1030 | "[device address=0x%016llx] [size=%llu bytes] " | |
1031 | "[mapped with %s] [unmapped with %s]\n", | |
1032 | ref->dev_addr, ref->size, | |
1033 | dir2name[entry->direction], | |
1034 | dir2name[ref->direction]); | |
1035 | } | |
1036 | ||
a5759b2b MC |
1037 | /* |
1038 | * Drivers should use dma_mapping_error() to check the returned | |
1039 | * addresses of dma_map_single() and dma_map_page(). | |
985098a0 | 1040 | * If not, print this warning message. See Documentation/core-api/dma-api.rst. |
a5759b2b | 1041 | */ |
6c9c6d63 SK |
1042 | if (entry->map_err_type == MAP_ERR_NOT_CHECKED) { |
1043 | err_printk(ref->dev, entry, | |
f737b095 | 1044 | "device driver failed to check map error" |
6c9c6d63 SK |
1045 | "[device address=0x%016llx] [size=%llu bytes] " |
1046 | "[mapped as %s]", | |
1047 | ref->dev_addr, ref->size, | |
1048 | type2name[entry->type]); | |
1049 | } | |
1050 | ||
2d62ece1 JR |
1051 | hash_bucket_del(entry); |
1052 | dma_entry_free(entry); | |
1053 | ||
50f579a2 | 1054 | put_hash_bucket(bucket, flags); |
2d62ece1 JR |
1055 | } |
1056 | ||
b4a0f533 AL |
1057 | static void check_for_stack(struct device *dev, |
1058 | struct page *page, size_t offset) | |
2d62ece1 | 1059 | { |
b4a0f533 AL |
1060 | void *addr; |
1061 | struct vm_struct *stack_vm_area = task_stack_vm_area(current); | |
1062 | ||
1063 | if (!stack_vm_area) { | |
1064 | /* Stack is direct-mapped. */ | |
1065 | if (PageHighMem(page)) | |
1066 | return; | |
1067 | addr = page_address(page) + offset; | |
1068 | if (object_is_on_stack(addr)) | |
f737b095 | 1069 | err_printk(dev, NULL, "device driver maps memory from stack [addr=%p]\n", addr); |
b4a0f533 AL |
1070 | } else { |
1071 | /* Stack is vmalloced. */ | |
1072 | int i; | |
1073 | ||
1074 | for (i = 0; i < stack_vm_area->nr_pages; i++) { | |
1075 | if (page != stack_vm_area->pages[i]) | |
1076 | continue; | |
1077 | ||
1078 | addr = (u8 *)current->stack + i * PAGE_SIZE + offset; | |
f737b095 | 1079 | err_printk(dev, NULL, "device driver maps memory from stack [probable addr=%p]\n", addr); |
b4a0f533 AL |
1080 | break; |
1081 | } | |
1082 | } | |
2d62ece1 JR |
1083 | } |
1084 | ||
f39d1b97 | 1085 | static void check_for_illegal_area(struct device *dev, void *addr, unsigned long len) |
2e34bde1 | 1086 | { |
1d7db834 KW |
1087 | if (memory_intersects(_stext, _etext, addr, len) || |
1088 | memory_intersects(__start_rodata, __end_rodata, addr, len)) | |
f737b095 | 1089 | err_printk(dev, NULL, "device driver maps memory from kernel text or rodata [addr=%p] [len=%lu]\n", addr, len); |
2e34bde1 JR |
1090 | } |
1091 | ||
aa010efb JR |
1092 | static void check_sync(struct device *dev, |
1093 | struct dma_debug_entry *ref, | |
1094 | bool to_cpu) | |
2d62ece1 | 1095 | { |
2d62ece1 JR |
1096 | struct dma_debug_entry *entry; |
1097 | struct hash_bucket *bucket; | |
1098 | unsigned long flags; | |
1099 | ||
aa010efb | 1100 | bucket = get_hash_bucket(ref, &flags); |
2d62ece1 | 1101 | |
c6a21d0b | 1102 | entry = bucket_find_contain(&bucket, ref, &flags); |
2d62ece1 JR |
1103 | |
1104 | if (!entry) { | |
f737b095 | 1105 | err_printk(dev, NULL, "device driver tries " |
2d62ece1 JR |
1106 | "to sync DMA memory it has not allocated " |
1107 | "[device address=0x%016llx] [size=%llu bytes]\n", | |
aa010efb | 1108 | (unsigned long long)ref->dev_addr, ref->size); |
2d62ece1 JR |
1109 | goto out; |
1110 | } | |
1111 | ||
aa010efb | 1112 | if (ref->size > entry->size) { |
f737b095 | 1113 | err_printk(dev, entry, "device driver syncs" |
2d62ece1 JR |
1114 | " DMA memory outside allocated range " |
1115 | "[device address=0x%016llx] " | |
aa010efb JR |
1116 | "[allocation size=%llu bytes] " |
1117 | "[sync offset+size=%llu]\n", | |
1118 | entry->dev_addr, entry->size, | |
1119 | ref->size); | |
2d62ece1 JR |
1120 | } |
1121 | ||
42d53b4f KH |
1122 | if (entry->direction == DMA_BIDIRECTIONAL) |
1123 | goto out; | |
1124 | ||
aa010efb | 1125 | if (ref->direction != entry->direction) { |
f737b095 | 1126 | err_printk(dev, entry, "device driver syncs " |
2d62ece1 JR |
1127 | "DMA memory with different direction " |
1128 | "[device address=0x%016llx] [size=%llu bytes] " | |
1129 | "[mapped with %s] [synced with %s]\n", | |
aa010efb | 1130 | (unsigned long long)ref->dev_addr, entry->size, |
2d62ece1 | 1131 | dir2name[entry->direction], |
aa010efb | 1132 | dir2name[ref->direction]); |
2d62ece1 JR |
1133 | } |
1134 | ||
2d62ece1 | 1135 | if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) && |
aa010efb | 1136 | !(ref->direction == DMA_TO_DEVICE)) |
f737b095 | 1137 | err_printk(dev, entry, "device driver syncs " |
2d62ece1 JR |
1138 | "device read-only DMA memory for cpu " |
1139 | "[device address=0x%016llx] [size=%llu bytes] " | |
1140 | "[mapped with %s] [synced with %s]\n", | |
aa010efb | 1141 | (unsigned long long)ref->dev_addr, entry->size, |
2d62ece1 | 1142 | dir2name[entry->direction], |
aa010efb | 1143 | dir2name[ref->direction]); |
2d62ece1 JR |
1144 | |
1145 | if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) && | |
aa010efb | 1146 | !(ref->direction == DMA_FROM_DEVICE)) |
f737b095 | 1147 | err_printk(dev, entry, "device driver syncs " |
2d62ece1 JR |
1148 | "device write-only DMA memory to device " |
1149 | "[device address=0x%016llx] [size=%llu bytes] " | |
1150 | "[mapped with %s] [synced with %s]\n", | |
aa010efb | 1151 | (unsigned long long)ref->dev_addr, entry->size, |
2d62ece1 | 1152 | dir2name[entry->direction], |
aa010efb | 1153 | dir2name[ref->direction]); |
2d62ece1 | 1154 | |
7f830642 RM |
1155 | if (ref->sg_call_ents && ref->type == dma_debug_sg && |
1156 | ref->sg_call_ents != entry->sg_call_ents) { | |
f737b095 | 1157 | err_printk(ref->dev, entry, "device driver syncs " |
7f830642 RM |
1158 | "DMA sg list with different entry count " |
1159 | "[map count=%d] [sync count=%d]\n", | |
1160 | entry->sg_call_ents, ref->sg_call_ents); | |
1161 | } | |
1162 | ||
2d62ece1 | 1163 | out: |
50f579a2 | 1164 | put_hash_bucket(bucket, flags); |
2d62ece1 JR |
1165 | } |
1166 | ||
78c47830 RM |
1167 | static void check_sg_segment(struct device *dev, struct scatterlist *sg) |
1168 | { | |
1169 | #ifdef CONFIG_DMA_API_DEBUG_SG | |
1170 | unsigned int max_seg = dma_get_max_seg_size(dev); | |
1171 | u64 start, end, boundary = dma_get_seg_boundary(dev); | |
1172 | ||
1173 | /* | |
1174 | * Either the driver forgot to set dma_parms appropriately, or | |
1175 | * whoever generated the list forgot to check them. | |
1176 | */ | |
1177 | if (sg->length > max_seg) | |
f737b095 | 1178 | err_printk(dev, NULL, "mapping sg segment longer than device claims to support [len=%u] [max=%u]\n", |
78c47830 RM |
1179 | sg->length, max_seg); |
1180 | /* | |
1181 | * In some cases this could potentially be the DMA API | |
1182 | * implementation's fault, but it would usually imply that | |
1183 | * the scatterlist was built inappropriately to begin with. | |
1184 | */ | |
1185 | start = sg_dma_address(sg); | |
1186 | end = start + sg_dma_len(sg) - 1; | |
1187 | if ((start ^ end) & ~boundary) | |
f737b095 | 1188 | err_printk(dev, NULL, "mapping sg segment across boundary [start=0x%016llx] [end=0x%016llx] [boundary=0x%016llx]\n", |
78c47830 RM |
1189 | start, end, boundary); |
1190 | #endif | |
1191 | } | |
1192 | ||
99c65fa7 SB |
1193 | void debug_dma_map_single(struct device *dev, const void *addr, |
1194 | unsigned long len) | |
1195 | { | |
1196 | if (unlikely(dma_debug_disabled())) | |
1197 | return; | |
1198 | ||
1199 | if (!virt_addr_valid(addr)) | |
f737b095 | 1200 | err_printk(dev, NULL, "device driver maps memory from invalid area [addr=%p] [len=%lu]\n", |
99c65fa7 SB |
1201 | addr, len); |
1202 | ||
1203 | if (is_vmalloc_addr(addr)) | |
f737b095 | 1204 | err_printk(dev, NULL, "device driver maps memory from vmalloc area [addr=%p] [len=%lu]\n", |
99c65fa7 SB |
1205 | addr, len); |
1206 | } | |
1207 | EXPORT_SYMBOL(debug_dma_map_single); | |
1208 | ||
f62bc980 | 1209 | void debug_dma_map_page(struct device *dev, struct page *page, size_t offset, |
c2bbf9d1 HM |
1210 | size_t size, int direction, dma_addr_t dma_addr, |
1211 | unsigned long attrs) | |
f62bc980 JR |
1212 | { |
1213 | struct dma_debug_entry *entry; | |
1214 | ||
01ce18b3 | 1215 | if (unlikely(dma_debug_disabled())) |
f62bc980 JR |
1216 | return; |
1217 | ||
bfe0fb0f | 1218 | if (dma_mapping_error(dev, dma_addr)) |
f62bc980 JR |
1219 | return; |
1220 | ||
1221 | entry = dma_entry_alloc(); | |
1222 | if (!entry) | |
1223 | return; | |
1224 | ||
1225 | entry->dev = dev; | |
2e05ea5c | 1226 | entry->type = dma_debug_single; |
0abdd7a8 | 1227 | entry->pfn = page_to_pfn(page); |
a97740f8 | 1228 | entry->offset = offset; |
f62bc980 JR |
1229 | entry->dev_addr = dma_addr; |
1230 | entry->size = size; | |
1231 | entry->direction = direction; | |
6c9c6d63 | 1232 | entry->map_err_type = MAP_ERR_NOT_CHECKED; |
f62bc980 | 1233 | |
b4a0f533 AL |
1234 | check_for_stack(dev, page, offset); |
1235 | ||
9537a48e | 1236 | if (!PageHighMem(page)) { |
f39d1b97 IM |
1237 | void *addr = page_address(page) + offset; |
1238 | ||
2e34bde1 | 1239 | check_for_illegal_area(dev, addr, size); |
f62bc980 JR |
1240 | } |
1241 | ||
c2bbf9d1 | 1242 | add_dma_entry(entry, attrs); |
f62bc980 | 1243 | } |
f62bc980 | 1244 | |
6c9c6d63 SK |
1245 | void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
1246 | { | |
1247 | struct dma_debug_entry ref; | |
1248 | struct dma_debug_entry *entry; | |
1249 | struct hash_bucket *bucket; | |
1250 | unsigned long flags; | |
1251 | ||
01ce18b3 | 1252 | if (unlikely(dma_debug_disabled())) |
6c9c6d63 SK |
1253 | return; |
1254 | ||
1255 | ref.dev = dev; | |
1256 | ref.dev_addr = dma_addr; | |
1257 | bucket = get_hash_bucket(&ref, &flags); | |
6c9c6d63 | 1258 | |
96e7d7a1 AD |
1259 | list_for_each_entry(entry, &bucket->list, list) { |
1260 | if (!exact_match(&ref, entry)) | |
1261 | continue; | |
1262 | ||
1263 | /* | |
1264 | * The same physical address can be mapped multiple | |
1265 | * times. Without a hardware IOMMU this results in the | |
1266 | * same device addresses being put into the dma-debug | |
1267 | * hash multiple times too. This can result in false | |
1268 | * positives being reported. Therefore we implement a | |
1269 | * best-fit algorithm here which updates the first entry | |
1270 | * from the hash which fits the reference value and is | |
1271 | * not currently listed as being checked. | |
1272 | */ | |
1273 | if (entry->map_err_type == MAP_ERR_NOT_CHECKED) { | |
1274 | entry->map_err_type = MAP_ERR_CHECKED; | |
1275 | break; | |
1276 | } | |
1277 | } | |
6c9c6d63 | 1278 | |
50f579a2 | 1279 | put_hash_bucket(bucket, flags); |
6c9c6d63 SK |
1280 | } |
1281 | EXPORT_SYMBOL(debug_dma_mapping_error); | |
1282 | ||
479623fd | 1283 | void debug_dma_unmap_page(struct device *dev, dma_addr_t dma_addr, |
2e05ea5c | 1284 | size_t size, int direction) |
f62bc980 JR |
1285 | { |
1286 | struct dma_debug_entry ref = { | |
2e05ea5c | 1287 | .type = dma_debug_single, |
f62bc980 | 1288 | .dev = dev, |
479623fd | 1289 | .dev_addr = dma_addr, |
f62bc980 JR |
1290 | .size = size, |
1291 | .direction = direction, | |
1292 | }; | |
1293 | ||
01ce18b3 | 1294 | if (unlikely(dma_debug_disabled())) |
f62bc980 | 1295 | return; |
f62bc980 JR |
1296 | check_unmap(&ref); |
1297 | } | |
f62bc980 | 1298 | |
972aa45c | 1299 | void debug_dma_map_sg(struct device *dev, struct scatterlist *sg, |
c2bbf9d1 HM |
1300 | int nents, int mapped_ents, int direction, |
1301 | unsigned long attrs) | |
972aa45c JR |
1302 | { |
1303 | struct dma_debug_entry *entry; | |
1304 | struct scatterlist *s; | |
1305 | int i; | |
1306 | ||
01ce18b3 | 1307 | if (unlikely(dma_debug_disabled())) |
972aa45c JR |
1308 | return; |
1309 | ||
293d92cb GS |
1310 | for_each_sg(sg, s, nents, i) { |
1311 | check_for_stack(dev, sg_page(s), s->offset); | |
1312 | if (!PageHighMem(sg_page(s))) | |
1313 | check_for_illegal_area(dev, sg_virt(s), s->length); | |
1314 | } | |
1315 | ||
972aa45c JR |
1316 | for_each_sg(sg, s, mapped_ents, i) { |
1317 | entry = dma_entry_alloc(); | |
1318 | if (!entry) | |
1319 | return; | |
1320 | ||
1321 | entry->type = dma_debug_sg; | |
1322 | entry->dev = dev; | |
0abdd7a8 | 1323 | entry->pfn = page_to_pfn(sg_page(s)); |
a97740f8 | 1324 | entry->offset = s->offset; |
884d0597 | 1325 | entry->size = sg_dma_len(s); |
15aedea4 | 1326 | entry->dev_addr = sg_dma_address(s); |
972aa45c JR |
1327 | entry->direction = direction; |
1328 | entry->sg_call_ents = nents; | |
1329 | entry->sg_mapped_ents = mapped_ents; | |
1330 | ||
78c47830 RM |
1331 | check_sg_segment(dev, s); |
1332 | ||
c2bbf9d1 | 1333 | add_dma_entry(entry, attrs); |
972aa45c JR |
1334 | } |
1335 | } | |
972aa45c | 1336 | |
aa010efb JR |
1337 | static int get_nr_mapped_entries(struct device *dev, |
1338 | struct dma_debug_entry *ref) | |
88f3907f | 1339 | { |
aa010efb | 1340 | struct dma_debug_entry *entry; |
88f3907f FT |
1341 | struct hash_bucket *bucket; |
1342 | unsigned long flags; | |
c17e2cf7 | 1343 | int mapped_ents; |
88f3907f | 1344 | |
aa010efb | 1345 | bucket = get_hash_bucket(ref, &flags); |
c6a21d0b | 1346 | entry = bucket_find_exact(bucket, ref); |
c17e2cf7 | 1347 | mapped_ents = 0; |
88f3907f | 1348 | |
88f3907f FT |
1349 | if (entry) |
1350 | mapped_ents = entry->sg_mapped_ents; | |
50f579a2 | 1351 | put_hash_bucket(bucket, flags); |
88f3907f FT |
1352 | |
1353 | return mapped_ents; | |
1354 | } | |
1355 | ||
972aa45c JR |
1356 | void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, |
1357 | int nelems, int dir) | |
1358 | { | |
972aa45c JR |
1359 | struct scatterlist *s; |
1360 | int mapped_ents = 0, i; | |
972aa45c | 1361 | |
01ce18b3 | 1362 | if (unlikely(dma_debug_disabled())) |
972aa45c JR |
1363 | return; |
1364 | ||
1365 | for_each_sg(sglist, s, nelems, i) { | |
1366 | ||
1367 | struct dma_debug_entry ref = { | |
1368 | .type = dma_debug_sg, | |
1369 | .dev = dev, | |
0abdd7a8 DW |
1370 | .pfn = page_to_pfn(sg_page(s)), |
1371 | .offset = s->offset, | |
15aedea4 | 1372 | .dev_addr = sg_dma_address(s), |
884d0597 | 1373 | .size = sg_dma_len(s), |
972aa45c | 1374 | .direction = dir, |
e5e8c5b9 | 1375 | .sg_call_ents = nelems, |
972aa45c JR |
1376 | }; |
1377 | ||
1378 | if (mapped_ents && i >= mapped_ents) | |
1379 | break; | |
1380 | ||
e5e8c5b9 | 1381 | if (!i) |
aa010efb | 1382 | mapped_ents = get_nr_mapped_entries(dev, &ref); |
972aa45c JR |
1383 | |
1384 | check_unmap(&ref); | |
1385 | } | |
1386 | } | |
972aa45c | 1387 | |
6bfd4498 | 1388 | void debug_dma_alloc_coherent(struct device *dev, size_t size, |
c2bbf9d1 HM |
1389 | dma_addr_t dma_addr, void *virt, |
1390 | unsigned long attrs) | |
6bfd4498 JR |
1391 | { |
1392 | struct dma_debug_entry *entry; | |
1393 | ||
01ce18b3 | 1394 | if (unlikely(dma_debug_disabled())) |
6bfd4498 JR |
1395 | return; |
1396 | ||
1397 | if (unlikely(virt == NULL)) | |
1398 | return; | |
1399 | ||
af1da686 MC |
1400 | /* handle vmalloc and linear addresses */ |
1401 | if (!is_vmalloc_addr(virt) && !virt_addr_valid(virt)) | |
6bfd4498 JR |
1402 | return; |
1403 | ||
af1da686 MC |
1404 | entry = dma_entry_alloc(); |
1405 | if (!entry) | |
3aaabbf1 MC |
1406 | return; |
1407 | ||
6bfd4498 JR |
1408 | entry->type = dma_debug_coherent; |
1409 | entry->dev = dev; | |
e57d0552 | 1410 | entry->offset = offset_in_page(virt); |
6bfd4498 JR |
1411 | entry->size = size; |
1412 | entry->dev_addr = dma_addr; | |
1413 | entry->direction = DMA_BIDIRECTIONAL; | |
1414 | ||
3aaabbf1 MC |
1415 | if (is_vmalloc_addr(virt)) |
1416 | entry->pfn = vmalloc_to_pfn(virt); | |
1417 | else | |
1418 | entry->pfn = page_to_pfn(virt_to_page(virt)); | |
1419 | ||
c2bbf9d1 | 1420 | add_dma_entry(entry, attrs); |
6bfd4498 | 1421 | } |
6bfd4498 JR |
1422 | |
1423 | void debug_dma_free_coherent(struct device *dev, size_t size, | |
479623fd | 1424 | void *virt, dma_addr_t dma_addr) |
6bfd4498 JR |
1425 | { |
1426 | struct dma_debug_entry ref = { | |
1427 | .type = dma_debug_coherent, | |
1428 | .dev = dev, | |
e57d0552 | 1429 | .offset = offset_in_page(virt), |
479623fd | 1430 | .dev_addr = dma_addr, |
6bfd4498 JR |
1431 | .size = size, |
1432 | .direction = DMA_BIDIRECTIONAL, | |
1433 | }; | |
1434 | ||
3aaabbf1 | 1435 | /* handle vmalloc and linear addresses */ |
af1da686 | 1436 | if (!is_vmalloc_addr(virt) && !virt_addr_valid(virt)) |
3aaabbf1 MC |
1437 | return; |
1438 | ||
1439 | if (is_vmalloc_addr(virt)) | |
1440 | ref.pfn = vmalloc_to_pfn(virt); | |
1441 | else | |
1442 | ref.pfn = page_to_pfn(virt_to_page(virt)); | |
1443 | ||
01ce18b3 | 1444 | if (unlikely(dma_debug_disabled())) |
6bfd4498 JR |
1445 | return; |
1446 | ||
1447 | check_unmap(&ref); | |
1448 | } | |
6bfd4498 | 1449 | |
0e74b34d | 1450 | void debug_dma_map_resource(struct device *dev, phys_addr_t addr, size_t size, |
c2bbf9d1 HM |
1451 | int direction, dma_addr_t dma_addr, |
1452 | unsigned long attrs) | |
0e74b34d NS |
1453 | { |
1454 | struct dma_debug_entry *entry; | |
1455 | ||
1456 | if (unlikely(dma_debug_disabled())) | |
1457 | return; | |
1458 | ||
1459 | entry = dma_entry_alloc(); | |
1460 | if (!entry) | |
1461 | return; | |
1462 | ||
1463 | entry->type = dma_debug_resource; | |
1464 | entry->dev = dev; | |
2e0cc304 | 1465 | entry->pfn = PHYS_PFN(addr); |
0e74b34d NS |
1466 | entry->offset = offset_in_page(addr); |
1467 | entry->size = size; | |
1468 | entry->dev_addr = dma_addr; | |
1469 | entry->direction = direction; | |
1470 | entry->map_err_type = MAP_ERR_NOT_CHECKED; | |
1471 | ||
c2bbf9d1 | 1472 | add_dma_entry(entry, attrs); |
0e74b34d | 1473 | } |
0e74b34d NS |
1474 | |
1475 | void debug_dma_unmap_resource(struct device *dev, dma_addr_t dma_addr, | |
1476 | size_t size, int direction) | |
1477 | { | |
1478 | struct dma_debug_entry ref = { | |
1479 | .type = dma_debug_resource, | |
1480 | .dev = dev, | |
1481 | .dev_addr = dma_addr, | |
1482 | .size = size, | |
1483 | .direction = direction, | |
1484 | }; | |
1485 | ||
1486 | if (unlikely(dma_debug_disabled())) | |
1487 | return; | |
1488 | ||
1489 | check_unmap(&ref); | |
1490 | } | |
0e74b34d | 1491 | |
b9d2317e JR |
1492 | void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, |
1493 | size_t size, int direction) | |
1494 | { | |
aa010efb JR |
1495 | struct dma_debug_entry ref; |
1496 | ||
01ce18b3 | 1497 | if (unlikely(dma_debug_disabled())) |
b9d2317e JR |
1498 | return; |
1499 | ||
aa010efb JR |
1500 | ref.type = dma_debug_single; |
1501 | ref.dev = dev; | |
1502 | ref.dev_addr = dma_handle; | |
1503 | ref.size = size; | |
1504 | ref.direction = direction; | |
1505 | ref.sg_call_ents = 0; | |
1506 | ||
1507 | check_sync(dev, &ref, true); | |
b9d2317e | 1508 | } |
b9d2317e JR |
1509 | |
1510 | void debug_dma_sync_single_for_device(struct device *dev, | |
1511 | dma_addr_t dma_handle, size_t size, | |
1512 | int direction) | |
1513 | { | |
aa010efb JR |
1514 | struct dma_debug_entry ref; |
1515 | ||
01ce18b3 | 1516 | if (unlikely(dma_debug_disabled())) |
b9d2317e JR |
1517 | return; |
1518 | ||
aa010efb JR |
1519 | ref.type = dma_debug_single; |
1520 | ref.dev = dev; | |
1521 | ref.dev_addr = dma_handle; | |
1522 | ref.size = size; | |
1523 | ref.direction = direction; | |
1524 | ref.sg_call_ents = 0; | |
1525 | ||
1526 | check_sync(dev, &ref, false); | |
b9d2317e | 1527 | } |
b9d2317e | 1528 | |
a31fba5d JR |
1529 | void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, |
1530 | int nelems, int direction) | |
1531 | { | |
1532 | struct scatterlist *s; | |
88f3907f | 1533 | int mapped_ents = 0, i; |
a31fba5d | 1534 | |
01ce18b3 | 1535 | if (unlikely(dma_debug_disabled())) |
a31fba5d JR |
1536 | return; |
1537 | ||
1538 | for_each_sg(sg, s, nelems, i) { | |
aa010efb JR |
1539 | |
1540 | struct dma_debug_entry ref = { | |
1541 | .type = dma_debug_sg, | |
1542 | .dev = dev, | |
0abdd7a8 DW |
1543 | .pfn = page_to_pfn(sg_page(s)), |
1544 | .offset = s->offset, | |
aa010efb JR |
1545 | .dev_addr = sg_dma_address(s), |
1546 | .size = sg_dma_len(s), | |
1547 | .direction = direction, | |
1548 | .sg_call_ents = nelems, | |
1549 | }; | |
1550 | ||
88f3907f | 1551 | if (!i) |
aa010efb | 1552 | mapped_ents = get_nr_mapped_entries(dev, &ref); |
88f3907f FT |
1553 | |
1554 | if (i >= mapped_ents) | |
1555 | break; | |
1556 | ||
aa010efb | 1557 | check_sync(dev, &ref, true); |
a31fba5d JR |
1558 | } |
1559 | } | |
a31fba5d JR |
1560 | |
1561 | void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, | |
1562 | int nelems, int direction) | |
1563 | { | |
1564 | struct scatterlist *s; | |
88f3907f | 1565 | int mapped_ents = 0, i; |
a31fba5d | 1566 | |
01ce18b3 | 1567 | if (unlikely(dma_debug_disabled())) |
a31fba5d JR |
1568 | return; |
1569 | ||
1570 | for_each_sg(sg, s, nelems, i) { | |
aa010efb JR |
1571 | |
1572 | struct dma_debug_entry ref = { | |
1573 | .type = dma_debug_sg, | |
1574 | .dev = dev, | |
0abdd7a8 DW |
1575 | .pfn = page_to_pfn(sg_page(s)), |
1576 | .offset = s->offset, | |
aa010efb JR |
1577 | .dev_addr = sg_dma_address(s), |
1578 | .size = sg_dma_len(s), | |
1579 | .direction = direction, | |
1580 | .sg_call_ents = nelems, | |
1581 | }; | |
88f3907f | 1582 | if (!i) |
aa010efb | 1583 | mapped_ents = get_nr_mapped_entries(dev, &ref); |
88f3907f FT |
1584 | |
1585 | if (i >= mapped_ents) | |
1586 | break; | |
1587 | ||
aa010efb | 1588 | check_sync(dev, &ref, false); |
a31fba5d JR |
1589 | } |
1590 | } | |
a31fba5d | 1591 | |
1745de5e JR |
1592 | static int __init dma_debug_driver_setup(char *str) |
1593 | { | |
1594 | int i; | |
1595 | ||
1596 | for (i = 0; i < NAME_MAX_LEN - 1; ++i, ++str) { | |
1597 | current_driver_name[i] = *str; | |
1598 | if (*str == 0) | |
1599 | break; | |
1600 | } | |
1601 | ||
1602 | if (current_driver_name[0]) | |
f737b095 | 1603 | pr_info("enable driver filter for driver [%s]\n", |
e7ed70ee | 1604 | current_driver_name); |
1745de5e JR |
1605 | |
1606 | ||
1607 | return 1; | |
1608 | } | |
1609 | __setup("dma_debug_driver=", dma_debug_driver_setup); |