Merge tag 'riscv-for-linus-6.3-mw2' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-2.6-block.git] / kernel / dma / debug.c
CommitLineData
45051539 1// SPDX-License-Identifier: GPL-2.0-only
f2f45e5f
JR
2/*
3 * Copyright (C) 2008 Advanced Micro Devices, Inc.
4 *
5 * Author: Joerg Roedel <joerg.roedel@amd.com>
f2f45e5f
JR
6 */
7
f737b095
RM
8#define pr_fmt(fmt) "DMA-API: " fmt
9
68db0cf1 10#include <linux/sched/task_stack.h>
972aa45c 11#include <linux/scatterlist.h>
a1fd09e8 12#include <linux/dma-map-ops.h>
29930025 13#include <linux/sched/task.h>
6c132d1b 14#include <linux/stacktrace.h>
30dfa90c 15#include <linux/spinlock.h>
b4a0f533 16#include <linux/vmalloc.h>
788dcfa6 17#include <linux/debugfs.h>
8a6fc708 18#include <linux/uaccess.h>
23a7bfae 19#include <linux/export.h>
2d62ece1 20#include <linux/device.h>
f2f45e5f 21#include <linux/types.h>
2d62ece1 22#include <linux/sched.h>
8a6fc708 23#include <linux/ctype.h>
f2f45e5f 24#include <linux/list.h>
6bf07871 25#include <linux/slab.h>
2e34bde1 26#include <asm/sections.h>
a1fd09e8 27#include "debug.h"
2e34bde1 28
5e76f564 29#define HASH_SIZE 16384ULL
30dfa90c
JR
30#define HASH_FN_SHIFT 13
31#define HASH_FN_MASK (HASH_SIZE - 1)
32
15b28bbc 33#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
2b9d9ac0 34/* If the pool runs out, add this many new entries at once */
ad78dee0 35#define DMA_DEBUG_DYNAMIC_ENTRIES (PAGE_SIZE / sizeof(struct dma_debug_entry))
15b28bbc 36
f2f45e5f
JR
37enum {
38 dma_debug_single,
f2f45e5f
JR
39 dma_debug_sg,
40 dma_debug_coherent,
0e74b34d 41 dma_debug_resource,
f2f45e5f
JR
42};
43
6c9c6d63
SK
44enum map_err_types {
45 MAP_ERR_CHECK_NOT_APPLICABLE,
46 MAP_ERR_NOT_CHECKED,
47 MAP_ERR_CHECKED,
48};
49
6c132d1b
DW
50#define DMA_DEBUG_STACKTRACE_ENTRIES 5
51
0abdd7a8
DW
52/**
53 * struct dma_debug_entry - track a dma_map* or dma_alloc_coherent mapping
54 * @list: node on pre-allocated free_entries list
55 * @dev: 'dev' argument to dma_map_{page|single|sg} or dma_alloc_coherent
0abdd7a8 56 * @size: length of the mapping
d3694f30 57 * @type: single, page, sg, coherent
0abdd7a8
DW
58 * @direction: enum dma_data_direction
59 * @sg_call_ents: 'nents' from dma_map_sg
60 * @sg_mapped_ents: 'mapped_ents' from dma_map_sg
d3694f30
ED
61 * @pfn: page frame of the start address
62 * @offset: offset of mapping relative to pfn
0abdd7a8
DW
63 * @map_err_type: track whether dma_mapping_error() was checked
64 * @stacktrace: support backtraces when a violation is detected
65 */
f2f45e5f
JR
66struct dma_debug_entry {
67 struct list_head list;
68 struct device *dev;
f2f45e5f
JR
69 u64 dev_addr;
70 u64 size;
d3694f30 71 int type;
f2f45e5f
JR
72 int direction;
73 int sg_call_ents;
74 int sg_mapped_ents;
d3694f30
ED
75 unsigned long pfn;
76 size_t offset;
6c9c6d63 77 enum map_err_types map_err_type;
6c132d1b 78#ifdef CONFIG_STACKTRACE
746017ed
TG
79 unsigned int stack_len;
80 unsigned long stack_entries[DMA_DEBUG_STACKTRACE_ENTRIES];
6c132d1b 81#endif
d3694f30 82} ____cacheline_aligned_in_smp;
f2f45e5f 83
c6a21d0b
NH
84typedef bool (*match_fn)(struct dma_debug_entry *, struct dma_debug_entry *);
85
30dfa90c
JR
86struct hash_bucket {
87 struct list_head list;
88 spinlock_t lock;
5e76f564 89};
30dfa90c
JR
90
91/* Hash list to save the allocated dma addresses */
92static struct hash_bucket dma_entry_hash[HASH_SIZE];
3b1e79ed
JR
93/* List of pre-allocated dma_debug_entry's */
94static LIST_HEAD(free_entries);
95/* Lock for the list above */
96static DEFINE_SPINLOCK(free_entries_lock);
97
98/* Global disable flag - will be set in case of an error */
621a5f7a 99static bool global_disable __read_mostly;
3b1e79ed 100
2ce8e7ed
FF
101/* Early initialization disable flag, set at the end of dma_debug_init */
102static bool dma_debug_initialized __read_mostly;
103
01ce18b3
FF
104static inline bool dma_debug_disabled(void)
105{
2ce8e7ed 106 return global_disable || !dma_debug_initialized;
01ce18b3
FF
107}
108
788dcfa6
JR
109/* Global error count */
110static u32 error_count;
111
112/* Global error show enable*/
113static u32 show_all_errors __read_mostly;
114/* Number of errors to show */
115static u32 show_num_errors = 1;
116
3b1e79ed
JR
117static u32 num_free_entries;
118static u32 min_free_entries;
e6a1a89d 119static u32 nr_total_entries;
30dfa90c 120
59d3daaf 121/* number of preallocated entries requested by kernel cmdline */
bcebe324 122static u32 nr_prealloc_entries = PREALLOC_DMA_DEBUG_ENTRIES;
59d3daaf 123
2e507d84
JR
124/* per-driver filter related state */
125
126#define NAME_MAX_LEN 64
127
128static char current_driver_name[NAME_MAX_LEN] __read_mostly;
129static struct device_driver *current_driver __read_mostly;
130
131static DEFINE_RWLOCK(driver_name_lock);
788dcfa6 132
6c9c6d63
SK
133static const char *const maperr2str[] = {
134 [MAP_ERR_CHECK_NOT_APPLICABLE] = "dma map error check not applicable",
135 [MAP_ERR_NOT_CHECKED] = "dma map error not checked",
136 [MAP_ERR_CHECKED] = "dma map error checked",
137};
138
9bb50ed7
GS
139static const char *type2name[] = {
140 [dma_debug_single] = "single",
141 [dma_debug_sg] = "scather-gather",
142 [dma_debug_coherent] = "coherent",
143 [dma_debug_resource] = "resource",
144};
2d62ece1 145
23efed6f
CH
146static const char *dir2name[] = {
147 [DMA_BIDIRECTIONAL] = "DMA_BIDIRECTIONAL",
148 [DMA_TO_DEVICE] = "DMA_TO_DEVICE",
149 [DMA_FROM_DEVICE] = "DMA_FROM_DEVICE",
150 [DMA_NONE] = "DMA_NONE",
151};
2d62ece1
JR
152
153/*
154 * The access to some variables in this macro is racy. We can't use atomic_t
155 * here because all these variables are exported to debugfs. Some of them even
156 * writeable. This is also the reason why a lock won't help much. But anyway,
157 * the races are no big deal. Here is why:
158 *
159 * error_count: the addition is racy, but the worst thing that can happen is
160 * that we don't count some errors
161 * show_num_errors: the subtraction is racy. Also no big deal because in
162 * worst case this will result in one warning more in the
163 * system log than the user configured. This variable is
164 * writeable via debugfs.
165 */
6c132d1b
DW
166static inline void dump_entry_trace(struct dma_debug_entry *entry)
167{
168#ifdef CONFIG_STACKTRACE
169 if (entry) {
fc65104c 170 pr_warn("Mapped at:\n");
746017ed 171 stack_trace_print(entry->stack_entries, entry->stack_len, 0);
6c132d1b
DW
172 }
173#endif
174}
175
2e507d84
JR
176static bool driver_filter(struct device *dev)
177{
0bf84128
JR
178 struct device_driver *drv;
179 unsigned long flags;
180 bool ret;
181
2e507d84
JR
182 /* driver filter off */
183 if (likely(!current_driver_name[0]))
184 return true;
185
186 /* driver filter on and initialized */
ec9c96ef 187 if (current_driver && dev && dev->driver == current_driver)
2e507d84
JR
188 return true;
189
ec9c96ef
KM
190 /* driver filter on, but we can't filter on a NULL device... */
191 if (!dev)
192 return false;
193
0bf84128
JR
194 if (current_driver || !current_driver_name[0])
195 return false;
2e507d84 196
0bf84128 197 /* driver filter on but not yet initialized */
f3ff9247 198 drv = dev->driver;
0bf84128
JR
199 if (!drv)
200 return false;
201
202 /* lock to protect against change of current_driver_name */
203 read_lock_irqsave(&driver_name_lock, flags);
204
205 ret = false;
206 if (drv->name &&
207 strncmp(current_driver_name, drv->name, NAME_MAX_LEN - 1) == 0) {
208 current_driver = drv;
209 ret = true;
2e507d84
JR
210 }
211
0bf84128 212 read_unlock_irqrestore(&driver_name_lock, flags);
0bf84128
JR
213
214 return ret;
2e507d84
JR
215}
216
ec9c96ef
KM
217#define err_printk(dev, entry, format, arg...) do { \
218 error_count += 1; \
219 if (driver_filter(dev) && \
220 (show_all_errors || show_num_errors > 0)) { \
f737b095 221 WARN(1, pr_fmt("%s %s: ") format, \
ec9c96ef
KM
222 dev ? dev_driver_string(dev) : "NULL", \
223 dev ? dev_name(dev) : "NULL", ## arg); \
224 dump_entry_trace(entry); \
225 } \
226 if (!show_all_errors && show_num_errors > 0) \
227 show_num_errors -= 1; \
2d62ece1
JR
228 } while (0);
229
30dfa90c
JR
230/*
231 * Hash related functions
232 *
233 * Every DMA-API request is saved into a struct dma_debug_entry. To
234 * have quick access to these structs they are stored into a hash.
235 */
236static int hash_fn(struct dma_debug_entry *entry)
237{
238 /*
239 * Hash function is based on the dma address.
240 * We use bits 20-27 here as the index into the hash
241 */
242 return (entry->dev_addr >> HASH_FN_SHIFT) & HASH_FN_MASK;
243}
244
245/*
246 * Request exclusive access to a hash bucket for a given dma_debug_entry.
247 */
248static struct hash_bucket *get_hash_bucket(struct dma_debug_entry *entry,
249 unsigned long *flags)
d5dfc80f 250 __acquires(&dma_entry_hash[idx].lock)
30dfa90c
JR
251{
252 int idx = hash_fn(entry);
253 unsigned long __flags;
254
255 spin_lock_irqsave(&dma_entry_hash[idx].lock, __flags);
256 *flags = __flags;
257 return &dma_entry_hash[idx];
258}
259
260/*
261 * Give up exclusive access to the hash bucket
262 */
263static void put_hash_bucket(struct hash_bucket *bucket,
50f579a2 264 unsigned long flags)
d5dfc80f 265 __releases(&bucket->lock)
30dfa90c 266{
50f579a2 267 spin_unlock_irqrestore(&bucket->lock, flags);
30dfa90c
JR
268}
269
c6a21d0b
NH
270static bool exact_match(struct dma_debug_entry *a, struct dma_debug_entry *b)
271{
91ec37cc 272 return ((a->dev_addr == b->dev_addr) &&
c6a21d0b
NH
273 (a->dev == b->dev)) ? true : false;
274}
275
276static bool containing_match(struct dma_debug_entry *a,
277 struct dma_debug_entry *b)
278{
279 if (a->dev != b->dev)
280 return false;
281
282 if ((b->dev_addr <= a->dev_addr) &&
283 ((b->dev_addr + b->size) >= (a->dev_addr + a->size)))
284 return true;
285
286 return false;
287}
288
30dfa90c
JR
289/*
290 * Search a given entry in the hash bucket list
291 */
c6a21d0b
NH
292static struct dma_debug_entry *__hash_bucket_find(struct hash_bucket *bucket,
293 struct dma_debug_entry *ref,
294 match_fn match)
30dfa90c 295{
7caf6a49 296 struct dma_debug_entry *entry, *ret = NULL;
fe73fbe1 297 int matches = 0, match_lvl, last_lvl = -1;
30dfa90c
JR
298
299 list_for_each_entry(entry, &bucket->list, list) {
c6a21d0b 300 if (!match(ref, entry))
7caf6a49
JR
301 continue;
302
303 /*
304 * Some drivers map the same physical address multiple
305 * times. Without a hardware IOMMU this results in the
306 * same device addresses being put into the dma-debug
307 * hash multiple times too. This can result in false
af901ca1 308 * positives being reported. Therefore we implement a
7caf6a49
JR
309 * best-fit algorithm here which returns the entry from
310 * the hash which fits best to the reference value
311 * instead of the first-fit.
312 */
313 matches += 1;
314 match_lvl = 0;
e5e8c5b9
JR
315 entry->size == ref->size ? ++match_lvl : 0;
316 entry->type == ref->type ? ++match_lvl : 0;
317 entry->direction == ref->direction ? ++match_lvl : 0;
318 entry->sg_call_ents == ref->sg_call_ents ? ++match_lvl : 0;
7caf6a49 319
e5e8c5b9 320 if (match_lvl == 4) {
7caf6a49 321 /* perfect-fit - return the result */
30dfa90c 322 return entry;
7caf6a49
JR
323 } else if (match_lvl > last_lvl) {
324 /*
325 * We found an entry that fits better then the
fe73fbe1 326 * previous one or it is the 1st match.
7caf6a49
JR
327 */
328 last_lvl = match_lvl;
329 ret = entry;
330 }
30dfa90c
JR
331 }
332
7caf6a49
JR
333 /*
334 * If we have multiple matches but no perfect-fit, just return
335 * NULL.
336 */
337 ret = (matches == 1) ? ret : NULL;
338
339 return ret;
30dfa90c
JR
340}
341
c6a21d0b
NH
342static struct dma_debug_entry *bucket_find_exact(struct hash_bucket *bucket,
343 struct dma_debug_entry *ref)
344{
345 return __hash_bucket_find(bucket, ref, exact_match);
346}
347
348static struct dma_debug_entry *bucket_find_contain(struct hash_bucket **bucket,
349 struct dma_debug_entry *ref,
350 unsigned long *flags)
351{
352
c6a21d0b 353 struct dma_debug_entry *entry, index = *ref;
2995b800 354 int limit = min(HASH_SIZE, (index.dev_addr >> HASH_FN_SHIFT) + 1);
c6a21d0b 355
2995b800 356 for (int i = 0; i < limit; i++) {
a7a2c02a 357 entry = __hash_bucket_find(*bucket, ref, containing_match);
c6a21d0b
NH
358
359 if (entry)
360 return entry;
361
362 /*
363 * Nothing found, go back a hash bucket
364 */
50f579a2 365 put_hash_bucket(*bucket, *flags);
c6a21d0b
NH
366 index.dev_addr -= (1 << HASH_FN_SHIFT);
367 *bucket = get_hash_bucket(&index, flags);
368 }
369
370 return NULL;
371}
372
30dfa90c
JR
373/*
374 * Add an entry to a hash bucket
375 */
376static void hash_bucket_add(struct hash_bucket *bucket,
377 struct dma_debug_entry *entry)
378{
379 list_add_tail(&entry->list, &bucket->list);
380}
381
382/*
383 * Remove entry from a hash bucket list
384 */
385static void hash_bucket_del(struct dma_debug_entry *entry)
386{
387 list_del(&entry->list);
388}
389
0abdd7a8
DW
390static unsigned long long phys_addr(struct dma_debug_entry *entry)
391{
0e74b34d
NS
392 if (entry->type == dma_debug_resource)
393 return __pfn_to_phys(entry->pfn) + entry->offset;
394
0abdd7a8
DW
395 return page_to_phys(pfn_to_page(entry->pfn)) + entry->offset;
396}
397
ac26c18b
DW
398/*
399 * Dump mapping entries for debugging purposes
400 */
401void debug_dma_dump_mappings(struct device *dev)
402{
403 int idx;
404
405 for (idx = 0; idx < HASH_SIZE; idx++) {
406 struct hash_bucket *bucket = &dma_entry_hash[idx];
407 struct dma_debug_entry *entry;
408 unsigned long flags;
409
410 spin_lock_irqsave(&bucket->lock, flags);
411
412 list_for_each_entry(entry, &bucket->list, list) {
413 if (!dev || dev == entry->dev) {
414 dev_info(entry->dev,
0abdd7a8 415 "%s idx %d P=%Lx N=%lx D=%Lx L=%Lx %s %s\n",
ac26c18b 416 type2name[entry->type], idx,
0abdd7a8 417 phys_addr(entry), entry->pfn,
ac26c18b 418 entry->dev_addr, entry->size,
6c9c6d63
SK
419 dir2name[entry->direction],
420 maperr2str[entry->map_err_type]);
ac26c18b
DW
421 }
422 }
423
424 spin_unlock_irqrestore(&bucket->lock, flags);
9ff6aa02 425 cond_resched();
ac26c18b
DW
426 }
427}
ac26c18b 428
0abdd7a8 429/*
3b7a6418
DW
430 * For each mapping (initial cacheline in the case of
431 * dma_alloc_coherent/dma_map_page, initial cacheline in each page of a
432 * scatterlist, or the cacheline specified in dma_map_single) insert
433 * into this tree using the cacheline as the key. At
0abdd7a8 434 * dma_unmap_{single|sg|page} or dma_free_coherent delete the entry. If
3b7a6418 435 * the entry already exists at insertion time add a tag as a reference
0abdd7a8 436 * count for the overlapping mappings. For now, the overlap tracking
3b7a6418
DW
437 * just ensures that 'unmaps' balance 'maps' before marking the
438 * cacheline idle, but we should also be flagging overlaps as an API
439 * violation.
0abdd7a8
DW
440 *
441 * Memory usage is mostly constrained by the maximum number of available
442 * dma-debug entries in that we need a free dma_debug_entry before
3b7a6418
DW
443 * inserting into the tree. In the case of dma_map_page and
444 * dma_alloc_coherent there is only one dma_debug_entry and one
445 * dma_active_cacheline entry to track per event. dma_map_sg(), on the
446 * other hand, consumes a single dma_debug_entry, but inserts 'nents'
447 * entries into the tree.
0abdd7a8 448 */
84bc4f1d 449static RADIX_TREE(dma_active_cacheline, GFP_ATOMIC);
0abdd7a8 450static DEFINE_SPINLOCK(radix_lock);
3b7a6418
DW
451#define ACTIVE_CACHELINE_MAX_OVERLAP ((1 << RADIX_TREE_MAX_TAGS) - 1)
452#define CACHELINE_PER_PAGE_SHIFT (PAGE_SHIFT - L1_CACHE_SHIFT)
453#define CACHELINES_PER_PAGE (1 << CACHELINE_PER_PAGE_SHIFT)
0abdd7a8 454
3b7a6418
DW
455static phys_addr_t to_cacheline_number(struct dma_debug_entry *entry)
456{
457 return (entry->pfn << CACHELINE_PER_PAGE_SHIFT) +
458 (entry->offset >> L1_CACHE_SHIFT);
459}
460
461static int active_cacheline_read_overlap(phys_addr_t cln)
0abdd7a8
DW
462{
463 int overlap = 0, i;
464
465 for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--)
3b7a6418 466 if (radix_tree_tag_get(&dma_active_cacheline, cln, i))
0abdd7a8
DW
467 overlap |= 1 << i;
468 return overlap;
469}
470
3b7a6418 471static int active_cacheline_set_overlap(phys_addr_t cln, int overlap)
0abdd7a8
DW
472{
473 int i;
474
3b7a6418 475 if (overlap > ACTIVE_CACHELINE_MAX_OVERLAP || overlap < 0)
59f2e7df 476 return overlap;
0abdd7a8
DW
477
478 for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--)
479 if (overlap & 1 << i)
3b7a6418 480 radix_tree_tag_set(&dma_active_cacheline, cln, i);
0abdd7a8 481 else
3b7a6418 482 radix_tree_tag_clear(&dma_active_cacheline, cln, i);
0abdd7a8
DW
483
484 return overlap;
485}
486
3b7a6418 487static void active_cacheline_inc_overlap(phys_addr_t cln)
0abdd7a8 488{
3b7a6418 489 int overlap = active_cacheline_read_overlap(cln);
0abdd7a8 490
3b7a6418 491 overlap = active_cacheline_set_overlap(cln, ++overlap);
0abdd7a8
DW
492
493 /* If we overflowed the overlap counter then we're potentially
5848dc5b 494 * leaking dma-mappings.
0abdd7a8 495 */
3b7a6418 496 WARN_ONCE(overlap > ACTIVE_CACHELINE_MAX_OVERLAP,
f737b095 497 pr_fmt("exceeded %d overlapping mappings of cacheline %pa\n"),
3b7a6418 498 ACTIVE_CACHELINE_MAX_OVERLAP, &cln);
0abdd7a8
DW
499}
500
3b7a6418 501static int active_cacheline_dec_overlap(phys_addr_t cln)
0abdd7a8 502{
3b7a6418 503 int overlap = active_cacheline_read_overlap(cln);
0abdd7a8 504
3b7a6418 505 return active_cacheline_set_overlap(cln, --overlap);
0abdd7a8
DW
506}
507
3b7a6418 508static int active_cacheline_insert(struct dma_debug_entry *entry)
0abdd7a8 509{
3b7a6418 510 phys_addr_t cln = to_cacheline_number(entry);
0abdd7a8
DW
511 unsigned long flags;
512 int rc;
513
3b7a6418
DW
514 /* If the device is not writing memory then we don't have any
515 * concerns about the cpu consuming stale data. This mitigates
516 * legitimate usages of overlapping mappings.
517 */
518 if (entry->direction == DMA_TO_DEVICE)
519 return 0;
520
0abdd7a8 521 spin_lock_irqsave(&radix_lock, flags);
3b7a6418 522 rc = radix_tree_insert(&dma_active_cacheline, cln, entry);
0abdd7a8 523 if (rc == -EEXIST)
3b7a6418 524 active_cacheline_inc_overlap(cln);
0abdd7a8
DW
525 spin_unlock_irqrestore(&radix_lock, flags);
526
527 return rc;
528}
529
3b7a6418 530static void active_cacheline_remove(struct dma_debug_entry *entry)
0abdd7a8 531{
3b7a6418 532 phys_addr_t cln = to_cacheline_number(entry);
0abdd7a8
DW
533 unsigned long flags;
534
3b7a6418
DW
535 /* ...mirror the insert case */
536 if (entry->direction == DMA_TO_DEVICE)
537 return;
538
0abdd7a8 539 spin_lock_irqsave(&radix_lock, flags);
59f2e7df 540 /* since we are counting overlaps the final put of the
3b7a6418
DW
541 * cacheline will occur when the overlap count is 0.
542 * active_cacheline_dec_overlap() returns -1 in that case
59f2e7df 543 */
3b7a6418
DW
544 if (active_cacheline_dec_overlap(cln) < 0)
545 radix_tree_delete(&dma_active_cacheline, cln);
0abdd7a8
DW
546 spin_unlock_irqrestore(&radix_lock, flags);
547}
548
30dfa90c
JR
549/*
550 * Wrapper function for adding an entry to the hash.
551 * This function takes care of locking itself.
552 */
c2bbf9d1 553static void add_dma_entry(struct dma_debug_entry *entry, unsigned long attrs)
30dfa90c
JR
554{
555 struct hash_bucket *bucket;
556 unsigned long flags;
0abdd7a8 557 int rc;
30dfa90c
JR
558
559 bucket = get_hash_bucket(entry, &flags);
560 hash_bucket_add(bucket, entry);
50f579a2 561 put_hash_bucket(bucket, flags);
0abdd7a8 562
3b7a6418 563 rc = active_cacheline_insert(entry);
0abdd7a8 564 if (rc == -ENOMEM) {
e19f8fa6 565 pr_err_once("cacheline tracking ENOMEM, dma-debug disabled\n");
0abdd7a8 566 global_disable = true;
c2bbf9d1 567 } else if (rc == -EEXIST && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
510e1a72
HM
568 err_printk(entry->dev, entry,
569 "cacheline tracking EEXIST, overlapping mappings aren't supported\n");
0abdd7a8 570 }
30dfa90c
JR
571}
572
ad78dee0 573static int dma_debug_create_entries(gfp_t gfp)
2b9d9ac0 574{
ad78dee0 575 struct dma_debug_entry *entry;
2b9d9ac0
RM
576 int i;
577
ad78dee0
RM
578 entry = (void *)get_zeroed_page(gfp);
579 if (!entry)
580 return -ENOMEM;
2b9d9ac0 581
ad78dee0
RM
582 for (i = 0; i < DMA_DEBUG_DYNAMIC_ENTRIES; i++)
583 list_add_tail(&entry[i].list, &free_entries);
2b9d9ac0 584
ad78dee0
RM
585 num_free_entries += DMA_DEBUG_DYNAMIC_ENTRIES;
586 nr_total_entries += DMA_DEBUG_DYNAMIC_ENTRIES;
2b9d9ac0
RM
587
588 return 0;
2b9d9ac0
RM
589}
590
e6a1a89d
FT
591static struct dma_debug_entry *__dma_entry_alloc(void)
592{
593 struct dma_debug_entry *entry;
594
595 entry = list_entry(free_entries.next, struct dma_debug_entry, list);
596 list_del(&entry->list);
597 memset(entry, 0, sizeof(*entry));
598
599 num_free_entries -= 1;
600 if (num_free_entries < min_free_entries)
601 min_free_entries = num_free_entries;
602
603 return entry;
604}
605
05f099a7 606static void __dma_entry_alloc_check_leak(void)
ceb51173
RM
607{
608 u32 tmp = nr_total_entries % nr_prealloc_entries;
609
610 /* Shout each time we tick over some multiple of the initial pool */
611 if (tmp < DMA_DEBUG_DYNAMIC_ENTRIES) {
612 pr_info("dma_debug_entry pool grown to %u (%u00%%)\n",
613 nr_total_entries,
614 (nr_total_entries / nr_prealloc_entries));
615 }
616}
617
3b1e79ed
JR
618/* struct dma_entry allocator
619 *
620 * The next two functions implement the allocator for
621 * struct dma_debug_entries.
622 */
623static struct dma_debug_entry *dma_entry_alloc(void)
624{
29cdd4e4 625 struct dma_debug_entry *entry;
3b1e79ed
JR
626 unsigned long flags;
627
628 spin_lock_irqsave(&free_entries_lock, flags);
2b9d9ac0 629 if (num_free_entries == 0) {
ad78dee0 630 if (dma_debug_create_entries(GFP_ATOMIC)) {
2b9d9ac0
RM
631 global_disable = true;
632 spin_unlock_irqrestore(&free_entries_lock, flags);
633 pr_err("debugging out of memory - disabling\n");
634 return NULL;
635 }
ceb51173 636 __dma_entry_alloc_check_leak();
3b1e79ed
JR
637 }
638
e6a1a89d 639 entry = __dma_entry_alloc();
3b1e79ed 640
29cdd4e4
JK
641 spin_unlock_irqrestore(&free_entries_lock, flags);
642
6c132d1b 643#ifdef CONFIG_STACKTRACE
746017ed
TG
644 entry->stack_len = stack_trace_save(entry->stack_entries,
645 ARRAY_SIZE(entry->stack_entries),
646 1);
6c132d1b 647#endif
3b1e79ed
JR
648 return entry;
649}
650
651static void dma_entry_free(struct dma_debug_entry *entry)
652{
653 unsigned long flags;
654
3b7a6418 655 active_cacheline_remove(entry);
0abdd7a8 656
3b1e79ed
JR
657 /*
658 * add to beginning of the list - this way the entries are
659 * more likely cache hot when they are reallocated.
660 */
661 spin_lock_irqsave(&free_entries_lock, flags);
662 list_add(&entry->list, &free_entries);
663 num_free_entries += 1;
664 spin_unlock_irqrestore(&free_entries_lock, flags);
665}
666
6bf07871
JR
667/*
668 * DMA-API debugging init code
669 *
670 * The init code does two things:
671 * 1. Initialize core data structures
672 * 2. Preallocate a given number of dma_debug_entry structs
673 */
674
8a6fc708
JR
675static ssize_t filter_read(struct file *file, char __user *user_buf,
676 size_t count, loff_t *ppos)
677{
8a6fc708 678 char buf[NAME_MAX_LEN + 1];
c17e2cf7 679 unsigned long flags;
8a6fc708
JR
680 int len;
681
682 if (!current_driver_name[0])
683 return 0;
684
685 /*
686 * We can't copy to userspace directly because current_driver_name can
687 * only be read under the driver_name_lock with irqs disabled. So
688 * create a temporary copy first.
689 */
690 read_lock_irqsave(&driver_name_lock, flags);
691 len = scnprintf(buf, NAME_MAX_LEN + 1, "%s\n", current_driver_name);
692 read_unlock_irqrestore(&driver_name_lock, flags);
693
694 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
695}
696
697static ssize_t filter_write(struct file *file, const char __user *userbuf,
698 size_t count, loff_t *ppos)
699{
8a6fc708 700 char buf[NAME_MAX_LEN];
c17e2cf7
JR
701 unsigned long flags;
702 size_t len;
8a6fc708
JR
703 int i;
704
705 /*
706 * We can't copy from userspace directly. Access to
707 * current_driver_name is protected with a write_lock with irqs
708 * disabled. Since copy_from_user can fault and may sleep we
709 * need to copy to temporary buffer first
710 */
e7ed70ee 711 len = min(count, (size_t)(NAME_MAX_LEN - 1));
8a6fc708
JR
712 if (copy_from_user(buf, userbuf, len))
713 return -EFAULT;
714
715 buf[len] = 0;
716
717 write_lock_irqsave(&driver_name_lock, flags);
718
31232509
JR
719 /*
720 * Now handle the string we got from userspace very carefully.
8a6fc708
JR
721 * The rules are:
722 * - only use the first token we got
723 * - token delimiter is everything looking like a space
724 * character (' ', '\n', '\t' ...)
725 *
726 */
727 if (!isalnum(buf[0])) {
728 /*
31232509 729 * If the first character userspace gave us is not
8a6fc708
JR
730 * alphanumerical then assume the filter should be
731 * switched off.
732 */
733 if (current_driver_name[0])
f737b095 734 pr_info("switching off dma-debug driver filter\n");
8a6fc708
JR
735 current_driver_name[0] = 0;
736 current_driver = NULL;
737 goto out_unlock;
738 }
739
740 /*
741 * Now parse out the first token and use it as the name for the
742 * driver to filter for.
743 */
39a37ce1 744 for (i = 0; i < NAME_MAX_LEN - 1; ++i) {
8a6fc708
JR
745 current_driver_name[i] = buf[i];
746 if (isspace(buf[i]) || buf[i] == ' ' || buf[i] == 0)
747 break;
748 }
749 current_driver_name[i] = 0;
750 current_driver = NULL;
751
f737b095 752 pr_info("enable driver filter for driver [%s]\n",
e7ed70ee 753 current_driver_name);
8a6fc708
JR
754
755out_unlock:
756 write_unlock_irqrestore(&driver_name_lock, flags);
757
758 return count;
759}
760
aeb583d0 761static const struct file_operations filter_fops = {
8a6fc708
JR
762 .read = filter_read,
763 .write = filter_write,
6038f373 764 .llseek = default_llseek,
8a6fc708
JR
765};
766
0a3b192c
CL
767static int dump_show(struct seq_file *seq, void *v)
768{
769 int idx;
770
771 for (idx = 0; idx < HASH_SIZE; idx++) {
772 struct hash_bucket *bucket = &dma_entry_hash[idx];
773 struct dma_debug_entry *entry;
774 unsigned long flags;
775
776 spin_lock_irqsave(&bucket->lock, flags);
777 list_for_each_entry(entry, &bucket->list, list) {
778 seq_printf(seq,
779 "%s %s %s idx %d P=%llx N=%lx D=%llx L=%llx %s %s\n",
780 dev_name(entry->dev),
781 dev_driver_string(entry->dev),
782 type2name[entry->type], idx,
783 phys_addr(entry), entry->pfn,
784 entry->dev_addr, entry->size,
785 dir2name[entry->direction],
786 maperr2str[entry->map_err_type]);
787 }
788 spin_unlock_irqrestore(&bucket->lock, flags);
789 }
790 return 0;
791}
792DEFINE_SHOW_ATTRIBUTE(dump);
793
173735c3 794static int __init dma_debug_fs_init(void)
788dcfa6 795{
8e4d81b9 796 struct dentry *dentry = debugfs_create_dir("dma-api", NULL);
788dcfa6 797
8e4d81b9
GKH
798 debugfs_create_bool("disabled", 0444, dentry, &global_disable);
799 debugfs_create_u32("error_count", 0444, dentry, &error_count);
800 debugfs_create_u32("all_errors", 0644, dentry, &show_all_errors);
801 debugfs_create_u32("num_errors", 0644, dentry, &show_num_errors);
802 debugfs_create_u32("num_free_entries", 0444, dentry, &num_free_entries);
803 debugfs_create_u32("min_free_entries", 0444, dentry, &min_free_entries);
804 debugfs_create_u32("nr_total_entries", 0444, dentry, &nr_total_entries);
805 debugfs_create_file("driver_filter", 0644, dentry, NULL, &filter_fops);
0a3b192c 806 debugfs_create_file("dump", 0444, dentry, NULL, &dump_fops);
173735c3
AI
807
808 return 0;
788dcfa6 809}
173735c3 810core_initcall_sync(dma_debug_fs_init);
788dcfa6 811
ba4b87ad 812static int device_dma_allocations(struct device *dev, struct dma_debug_entry **out_entry)
ed888aef
JR
813{
814 struct dma_debug_entry *entry;
815 unsigned long flags;
816 int count = 0, i;
817
818 for (i = 0; i < HASH_SIZE; ++i) {
6a5cd60b 819 spin_lock_irqsave(&dma_entry_hash[i].lock, flags);
ed888aef 820 list_for_each_entry(entry, &dma_entry_hash[i].list, list) {
ba4b87ad 821 if (entry->dev == dev) {
ed888aef 822 count += 1;
ba4b87ad
SG
823 *out_entry = entry;
824 }
ed888aef 825 }
6a5cd60b 826 spin_unlock_irqrestore(&dma_entry_hash[i].lock, flags);
ed888aef
JR
827 }
828
829 return count;
830}
831
a8fe9ea2 832static int dma_debug_device_change(struct notifier_block *nb, unsigned long action, void *data)
ed888aef
JR
833{
834 struct device *dev = data;
3f649ab7 835 struct dma_debug_entry *entry;
ed888aef
JR
836 int count;
837
01ce18b3 838 if (dma_debug_disabled())
a8fe9ea2 839 return 0;
ed888aef
JR
840
841 switch (action) {
842 case BUS_NOTIFY_UNBOUND_DRIVER:
ba4b87ad 843 count = device_dma_allocations(dev, &entry);
ed888aef
JR
844 if (count == 0)
845 break;
f737b095 846 err_printk(dev, entry, "device driver has pending "
ed888aef 847 "DMA allocations while released from device "
ba4b87ad
SG
848 "[count=%d]\n"
849 "One of leaked entries details: "
850 "[device address=0x%016llx] [size=%llu bytes] "
851 "[mapped with %s] [mapped as %s]\n",
852 count, entry->dev_addr, entry->size,
853 dir2name[entry->direction], type2name[entry->type]);
ed888aef
JR
854 break;
855 default:
856 break;
857 }
858
859 return 0;
860}
861
41531c8f
JR
862void dma_debug_add_bus(struct bus_type *bus)
863{
ed888aef
JR
864 struct notifier_block *nb;
865
01ce18b3 866 if (dma_debug_disabled())
f797d988
SR
867 return;
868
ed888aef
JR
869 nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
870 if (nb == NULL) {
e7ed70ee 871 pr_err("dma_debug_add_bus: out of memory\n");
ed888aef
JR
872 return;
873 }
874
875 nb->notifier_call = dma_debug_device_change;
876
877 bus_register_notifier(bus, nb);
41531c8f 878}
788dcfa6 879
15b28bbc 880static int dma_debug_init(void)
6bf07871 881{
ad78dee0 882 int i, nr_pages;
6bf07871 883
2ce8e7ed
FF
884 /* Do not use dma_debug_initialized here, since we really want to be
885 * called to set dma_debug_initialized
886 */
887 if (global_disable)
15b28bbc 888 return 0;
6bf07871
JR
889
890 for (i = 0; i < HASH_SIZE; ++i) {
891 INIT_LIST_HEAD(&dma_entry_hash[i].list);
b0a5b83e 892 spin_lock_init(&dma_entry_hash[i].lock);
6bf07871
JR
893 }
894
ad78dee0
RM
895 nr_pages = DIV_ROUND_UP(nr_prealloc_entries, DMA_DEBUG_DYNAMIC_ENTRIES);
896 for (i = 0; i < nr_pages; ++i)
897 dma_debug_create_entries(GFP_KERNEL);
898 if (num_free_entries >= nr_prealloc_entries) {
899 pr_info("preallocated %d debug entries\n", nr_total_entries);
900 } else if (num_free_entries > 0) {
901 pr_warn("%d debug entries requested but only %d allocated\n",
902 nr_prealloc_entries, nr_total_entries);
903 } else {
f737b095 904 pr_err("debugging out of memory error - disabled\n");
6bf07871
JR
905 global_disable = true;
906
15b28bbc 907 return 0;
6bf07871 908 }
2b9d9ac0 909 min_free_entries = num_free_entries;
e6a1a89d 910
2ce8e7ed
FF
911 dma_debug_initialized = true;
912
f737b095 913 pr_info("debugging enabled by kernel config\n");
15b28bbc 914 return 0;
6bf07871 915}
15b28bbc 916core_initcall(dma_debug_init);
6bf07871 917
59d3daaf
JR
918static __init int dma_debug_cmdline(char *str)
919{
920 if (!str)
921 return -EINVAL;
922
923 if (strncmp(str, "off", 3) == 0) {
f737b095 924 pr_info("debugging disabled on kernel command line\n");
59d3daaf
JR
925 global_disable = true;
926 }
927
80e43909 928 return 1;
59d3daaf
JR
929}
930
931static __init int dma_debug_entries_cmdline(char *str)
932{
59d3daaf
JR
933 if (!str)
934 return -EINVAL;
bcebe324
CH
935 if (!get_option(&str, &nr_prealloc_entries))
936 nr_prealloc_entries = PREALLOC_DMA_DEBUG_ENTRIES;
80e43909 937 return 1;
59d3daaf
JR
938}
939
940__setup("dma_debug=", dma_debug_cmdline);
941__setup("dma_debug_entries=", dma_debug_entries_cmdline);
942
2d62ece1
JR
943static void check_unmap(struct dma_debug_entry *ref)
944{
945 struct dma_debug_entry *entry;
946 struct hash_bucket *bucket;
947 unsigned long flags;
948
2d62ece1 949 bucket = get_hash_bucket(ref, &flags);
c6a21d0b 950 entry = bucket_find_exact(bucket, ref);
2d62ece1
JR
951
952 if (!entry) {
8d640a51 953 /* must drop lock before calling dma_mapping_error */
50f579a2 954 put_hash_bucket(bucket, flags);
8d640a51 955
bfe0fb0f
SK
956 if (dma_mapping_error(ref->dev, ref->dev_addr)) {
957 err_printk(ref->dev, NULL,
f737b095 958 "device driver tries to free an "
8d640a51
AD
959 "invalid DMA memory address\n");
960 } else {
961 err_printk(ref->dev, NULL,
f737b095 962 "device driver tries to free DMA "
8d640a51
AD
963 "memory it has not allocated [device "
964 "address=0x%016llx] [size=%llu bytes]\n",
965 ref->dev_addr, ref->size);
bfe0fb0f 966 }
8d640a51 967 return;
2d62ece1
JR
968 }
969
970 if (ref->size != entry->size) {
f737b095 971 err_printk(ref->dev, entry, "device driver frees "
2d62ece1
JR
972 "DMA memory with different size "
973 "[device address=0x%016llx] [map size=%llu bytes] "
974 "[unmap size=%llu bytes]\n",
975 ref->dev_addr, entry->size, ref->size);
976 }
977
978 if (ref->type != entry->type) {
f737b095 979 err_printk(ref->dev, entry, "device driver frees "
2d62ece1
JR
980 "DMA memory with wrong function "
981 "[device address=0x%016llx] [size=%llu bytes] "
982 "[mapped as %s] [unmapped as %s]\n",
983 ref->dev_addr, ref->size,
984 type2name[entry->type], type2name[ref->type]);
985 } else if ((entry->type == dma_debug_coherent) &&
0abdd7a8 986 (phys_addr(ref) != phys_addr(entry))) {
f737b095 987 err_printk(ref->dev, entry, "device driver frees "
2d62ece1
JR
988 "DMA memory with different CPU address "
989 "[device address=0x%016llx] [size=%llu bytes] "
59a40e70
JR
990 "[cpu alloc address=0x%016llx] "
991 "[cpu free address=0x%016llx]",
2d62ece1 992 ref->dev_addr, ref->size,
0abdd7a8
DW
993 phys_addr(entry),
994 phys_addr(ref));
2d62ece1
JR
995 }
996
997 if (ref->sg_call_ents && ref->type == dma_debug_sg &&
998 ref->sg_call_ents != entry->sg_call_ents) {
f737b095 999 err_printk(ref->dev, entry, "device driver frees "
2d62ece1
JR
1000 "DMA sg list with different entry count "
1001 "[map count=%d] [unmap count=%d]\n",
1002 entry->sg_call_ents, ref->sg_call_ents);
1003 }
1004
1005 /*
1006 * This may be no bug in reality - but most implementations of the
1007 * DMA API don't handle this properly, so check for it here
1008 */
1009 if (ref->direction != entry->direction) {
f737b095 1010 err_printk(ref->dev, entry, "device driver frees "
2d62ece1
JR
1011 "DMA memory with different direction "
1012 "[device address=0x%016llx] [size=%llu bytes] "
1013 "[mapped with %s] [unmapped with %s]\n",
1014 ref->dev_addr, ref->size,
1015 dir2name[entry->direction],
1016 dir2name[ref->direction]);
1017 }
1018
a5759b2b
MC
1019 /*
1020 * Drivers should use dma_mapping_error() to check the returned
1021 * addresses of dma_map_single() and dma_map_page().
985098a0 1022 * If not, print this warning message. See Documentation/core-api/dma-api.rst.
a5759b2b 1023 */
6c9c6d63
SK
1024 if (entry->map_err_type == MAP_ERR_NOT_CHECKED) {
1025 err_printk(ref->dev, entry,
f737b095 1026 "device driver failed to check map error"
6c9c6d63
SK
1027 "[device address=0x%016llx] [size=%llu bytes] "
1028 "[mapped as %s]",
1029 ref->dev_addr, ref->size,
1030 type2name[entry->type]);
1031 }
1032
2d62ece1
JR
1033 hash_bucket_del(entry);
1034 dma_entry_free(entry);
1035
50f579a2 1036 put_hash_bucket(bucket, flags);
2d62ece1
JR
1037}
1038
b4a0f533
AL
1039static void check_for_stack(struct device *dev,
1040 struct page *page, size_t offset)
2d62ece1 1041{
b4a0f533
AL
1042 void *addr;
1043 struct vm_struct *stack_vm_area = task_stack_vm_area(current);
1044
1045 if (!stack_vm_area) {
1046 /* Stack is direct-mapped. */
1047 if (PageHighMem(page))
1048 return;
1049 addr = page_address(page) + offset;
1050 if (object_is_on_stack(addr))
f737b095 1051 err_printk(dev, NULL, "device driver maps memory from stack [addr=%p]\n", addr);
b4a0f533
AL
1052 } else {
1053 /* Stack is vmalloced. */
1054 int i;
1055
1056 for (i = 0; i < stack_vm_area->nr_pages; i++) {
1057 if (page != stack_vm_area->pages[i])
1058 continue;
1059
1060 addr = (u8 *)current->stack + i * PAGE_SIZE + offset;
f737b095 1061 err_printk(dev, NULL, "device driver maps memory from stack [probable addr=%p]\n", addr);
b4a0f533
AL
1062 break;
1063 }
1064 }
2d62ece1
JR
1065}
1066
f39d1b97 1067static void check_for_illegal_area(struct device *dev, void *addr, unsigned long len)
2e34bde1 1068{
1d7db834
KW
1069 if (memory_intersects(_stext, _etext, addr, len) ||
1070 memory_intersects(__start_rodata, __end_rodata, addr, len))
f737b095 1071 err_printk(dev, NULL, "device driver maps memory from kernel text or rodata [addr=%p] [len=%lu]\n", addr, len);
2e34bde1
JR
1072}
1073
aa010efb
JR
1074static void check_sync(struct device *dev,
1075 struct dma_debug_entry *ref,
1076 bool to_cpu)
2d62ece1 1077{
2d62ece1
JR
1078 struct dma_debug_entry *entry;
1079 struct hash_bucket *bucket;
1080 unsigned long flags;
1081
aa010efb 1082 bucket = get_hash_bucket(ref, &flags);
2d62ece1 1083
c6a21d0b 1084 entry = bucket_find_contain(&bucket, ref, &flags);
2d62ece1
JR
1085
1086 if (!entry) {
f737b095 1087 err_printk(dev, NULL, "device driver tries "
2d62ece1
JR
1088 "to sync DMA memory it has not allocated "
1089 "[device address=0x%016llx] [size=%llu bytes]\n",
aa010efb 1090 (unsigned long long)ref->dev_addr, ref->size);
2d62ece1
JR
1091 goto out;
1092 }
1093
aa010efb 1094 if (ref->size > entry->size) {
f737b095 1095 err_printk(dev, entry, "device driver syncs"
2d62ece1
JR
1096 " DMA memory outside allocated range "
1097 "[device address=0x%016llx] "
aa010efb
JR
1098 "[allocation size=%llu bytes] "
1099 "[sync offset+size=%llu]\n",
1100 entry->dev_addr, entry->size,
1101 ref->size);
2d62ece1
JR
1102 }
1103
42d53b4f
KH
1104 if (entry->direction == DMA_BIDIRECTIONAL)
1105 goto out;
1106
aa010efb 1107 if (ref->direction != entry->direction) {
f737b095 1108 err_printk(dev, entry, "device driver syncs "
2d62ece1
JR
1109 "DMA memory with different direction "
1110 "[device address=0x%016llx] [size=%llu bytes] "
1111 "[mapped with %s] [synced with %s]\n",
aa010efb 1112 (unsigned long long)ref->dev_addr, entry->size,
2d62ece1 1113 dir2name[entry->direction],
aa010efb 1114 dir2name[ref->direction]);
2d62ece1
JR
1115 }
1116
2d62ece1 1117 if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) &&
aa010efb 1118 !(ref->direction == DMA_TO_DEVICE))
f737b095 1119 err_printk(dev, entry, "device driver syncs "
2d62ece1
JR
1120 "device read-only DMA memory for cpu "
1121 "[device address=0x%016llx] [size=%llu bytes] "
1122 "[mapped with %s] [synced with %s]\n",
aa010efb 1123 (unsigned long long)ref->dev_addr, entry->size,
2d62ece1 1124 dir2name[entry->direction],
aa010efb 1125 dir2name[ref->direction]);
2d62ece1
JR
1126
1127 if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) &&
aa010efb 1128 !(ref->direction == DMA_FROM_DEVICE))
f737b095 1129 err_printk(dev, entry, "device driver syncs "
2d62ece1
JR
1130 "device write-only DMA memory to device "
1131 "[device address=0x%016llx] [size=%llu bytes] "
1132 "[mapped with %s] [synced with %s]\n",
aa010efb 1133 (unsigned long long)ref->dev_addr, entry->size,
2d62ece1 1134 dir2name[entry->direction],
aa010efb 1135 dir2name[ref->direction]);
2d62ece1 1136
7f830642
RM
1137 if (ref->sg_call_ents && ref->type == dma_debug_sg &&
1138 ref->sg_call_ents != entry->sg_call_ents) {
f737b095 1139 err_printk(ref->dev, entry, "device driver syncs "
7f830642
RM
1140 "DMA sg list with different entry count "
1141 "[map count=%d] [sync count=%d]\n",
1142 entry->sg_call_ents, ref->sg_call_ents);
1143 }
1144
2d62ece1 1145out:
50f579a2 1146 put_hash_bucket(bucket, flags);
2d62ece1
JR
1147}
1148
78c47830
RM
1149static void check_sg_segment(struct device *dev, struct scatterlist *sg)
1150{
1151#ifdef CONFIG_DMA_API_DEBUG_SG
1152 unsigned int max_seg = dma_get_max_seg_size(dev);
1153 u64 start, end, boundary = dma_get_seg_boundary(dev);
1154
1155 /*
1156 * Either the driver forgot to set dma_parms appropriately, or
1157 * whoever generated the list forgot to check them.
1158 */
1159 if (sg->length > max_seg)
f737b095 1160 err_printk(dev, NULL, "mapping sg segment longer than device claims to support [len=%u] [max=%u]\n",
78c47830
RM
1161 sg->length, max_seg);
1162 /*
1163 * In some cases this could potentially be the DMA API
1164 * implementation's fault, but it would usually imply that
1165 * the scatterlist was built inappropriately to begin with.
1166 */
1167 start = sg_dma_address(sg);
1168 end = start + sg_dma_len(sg) - 1;
1169 if ((start ^ end) & ~boundary)
f737b095 1170 err_printk(dev, NULL, "mapping sg segment across boundary [start=0x%016llx] [end=0x%016llx] [boundary=0x%016llx]\n",
78c47830
RM
1171 start, end, boundary);
1172#endif
1173}
1174
99c65fa7
SB
1175void debug_dma_map_single(struct device *dev, const void *addr,
1176 unsigned long len)
1177{
1178 if (unlikely(dma_debug_disabled()))
1179 return;
1180
1181 if (!virt_addr_valid(addr))
f737b095 1182 err_printk(dev, NULL, "device driver maps memory from invalid area [addr=%p] [len=%lu]\n",
99c65fa7
SB
1183 addr, len);
1184
1185 if (is_vmalloc_addr(addr))
f737b095 1186 err_printk(dev, NULL, "device driver maps memory from vmalloc area [addr=%p] [len=%lu]\n",
99c65fa7
SB
1187 addr, len);
1188}
1189EXPORT_SYMBOL(debug_dma_map_single);
1190
f62bc980 1191void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
c2bbf9d1
HM
1192 size_t size, int direction, dma_addr_t dma_addr,
1193 unsigned long attrs)
f62bc980
JR
1194{
1195 struct dma_debug_entry *entry;
1196
01ce18b3 1197 if (unlikely(dma_debug_disabled()))
f62bc980
JR
1198 return;
1199
bfe0fb0f 1200 if (dma_mapping_error(dev, dma_addr))
f62bc980
JR
1201 return;
1202
1203 entry = dma_entry_alloc();
1204 if (!entry)
1205 return;
1206
1207 entry->dev = dev;
2e05ea5c 1208 entry->type = dma_debug_single;
0abdd7a8 1209 entry->pfn = page_to_pfn(page);
a97740f8 1210 entry->offset = offset;
f62bc980
JR
1211 entry->dev_addr = dma_addr;
1212 entry->size = size;
1213 entry->direction = direction;
6c9c6d63 1214 entry->map_err_type = MAP_ERR_NOT_CHECKED;
f62bc980 1215
b4a0f533
AL
1216 check_for_stack(dev, page, offset);
1217
9537a48e 1218 if (!PageHighMem(page)) {
f39d1b97
IM
1219 void *addr = page_address(page) + offset;
1220
2e34bde1 1221 check_for_illegal_area(dev, addr, size);
f62bc980
JR
1222 }
1223
c2bbf9d1 1224 add_dma_entry(entry, attrs);
f62bc980 1225}
f62bc980 1226
6c9c6d63
SK
1227void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
1228{
1229 struct dma_debug_entry ref;
1230 struct dma_debug_entry *entry;
1231 struct hash_bucket *bucket;
1232 unsigned long flags;
1233
01ce18b3 1234 if (unlikely(dma_debug_disabled()))
6c9c6d63
SK
1235 return;
1236
1237 ref.dev = dev;
1238 ref.dev_addr = dma_addr;
1239 bucket = get_hash_bucket(&ref, &flags);
6c9c6d63 1240
96e7d7a1
AD
1241 list_for_each_entry(entry, &bucket->list, list) {
1242 if (!exact_match(&ref, entry))
1243 continue;
1244
1245 /*
1246 * The same physical address can be mapped multiple
1247 * times. Without a hardware IOMMU this results in the
1248 * same device addresses being put into the dma-debug
1249 * hash multiple times too. This can result in false
1250 * positives being reported. Therefore we implement a
1251 * best-fit algorithm here which updates the first entry
1252 * from the hash which fits the reference value and is
1253 * not currently listed as being checked.
1254 */
1255 if (entry->map_err_type == MAP_ERR_NOT_CHECKED) {
1256 entry->map_err_type = MAP_ERR_CHECKED;
1257 break;
1258 }
1259 }
6c9c6d63 1260
50f579a2 1261 put_hash_bucket(bucket, flags);
6c9c6d63
SK
1262}
1263EXPORT_SYMBOL(debug_dma_mapping_error);
1264
f62bc980 1265void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
2e05ea5c 1266 size_t size, int direction)
f62bc980
JR
1267{
1268 struct dma_debug_entry ref = {
2e05ea5c 1269 .type = dma_debug_single,
f62bc980
JR
1270 .dev = dev,
1271 .dev_addr = addr,
1272 .size = size,
1273 .direction = direction,
1274 };
1275
01ce18b3 1276 if (unlikely(dma_debug_disabled()))
f62bc980 1277 return;
f62bc980
JR
1278 check_unmap(&ref);
1279}
f62bc980 1280
972aa45c 1281void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
c2bbf9d1
HM
1282 int nents, int mapped_ents, int direction,
1283 unsigned long attrs)
972aa45c
JR
1284{
1285 struct dma_debug_entry *entry;
1286 struct scatterlist *s;
1287 int i;
1288
01ce18b3 1289 if (unlikely(dma_debug_disabled()))
972aa45c
JR
1290 return;
1291
293d92cb
GS
1292 for_each_sg(sg, s, nents, i) {
1293 check_for_stack(dev, sg_page(s), s->offset);
1294 if (!PageHighMem(sg_page(s)))
1295 check_for_illegal_area(dev, sg_virt(s), s->length);
1296 }
1297
972aa45c
JR
1298 for_each_sg(sg, s, mapped_ents, i) {
1299 entry = dma_entry_alloc();
1300 if (!entry)
1301 return;
1302
1303 entry->type = dma_debug_sg;
1304 entry->dev = dev;
0abdd7a8 1305 entry->pfn = page_to_pfn(sg_page(s));
a97740f8 1306 entry->offset = s->offset;
884d0597 1307 entry->size = sg_dma_len(s);
15aedea4 1308 entry->dev_addr = sg_dma_address(s);
972aa45c
JR
1309 entry->direction = direction;
1310 entry->sg_call_ents = nents;
1311 entry->sg_mapped_ents = mapped_ents;
1312
78c47830
RM
1313 check_sg_segment(dev, s);
1314
c2bbf9d1 1315 add_dma_entry(entry, attrs);
972aa45c
JR
1316 }
1317}
972aa45c 1318
aa010efb
JR
1319static int get_nr_mapped_entries(struct device *dev,
1320 struct dma_debug_entry *ref)
88f3907f 1321{
aa010efb 1322 struct dma_debug_entry *entry;
88f3907f
FT
1323 struct hash_bucket *bucket;
1324 unsigned long flags;
c17e2cf7 1325 int mapped_ents;
88f3907f 1326
aa010efb 1327 bucket = get_hash_bucket(ref, &flags);
c6a21d0b 1328 entry = bucket_find_exact(bucket, ref);
c17e2cf7 1329 mapped_ents = 0;
88f3907f 1330
88f3907f
FT
1331 if (entry)
1332 mapped_ents = entry->sg_mapped_ents;
50f579a2 1333 put_hash_bucket(bucket, flags);
88f3907f
FT
1334
1335 return mapped_ents;
1336}
1337
972aa45c
JR
1338void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
1339 int nelems, int dir)
1340{
972aa45c
JR
1341 struct scatterlist *s;
1342 int mapped_ents = 0, i;
972aa45c 1343
01ce18b3 1344 if (unlikely(dma_debug_disabled()))
972aa45c
JR
1345 return;
1346
1347 for_each_sg(sglist, s, nelems, i) {
1348
1349 struct dma_debug_entry ref = {
1350 .type = dma_debug_sg,
1351 .dev = dev,
0abdd7a8
DW
1352 .pfn = page_to_pfn(sg_page(s)),
1353 .offset = s->offset,
15aedea4 1354 .dev_addr = sg_dma_address(s),
884d0597 1355 .size = sg_dma_len(s),
972aa45c 1356 .direction = dir,
e5e8c5b9 1357 .sg_call_ents = nelems,
972aa45c
JR
1358 };
1359
1360 if (mapped_ents && i >= mapped_ents)
1361 break;
1362
e5e8c5b9 1363 if (!i)
aa010efb 1364 mapped_ents = get_nr_mapped_entries(dev, &ref);
972aa45c
JR
1365
1366 check_unmap(&ref);
1367 }
1368}
972aa45c 1369
6bfd4498 1370void debug_dma_alloc_coherent(struct device *dev, size_t size,
c2bbf9d1
HM
1371 dma_addr_t dma_addr, void *virt,
1372 unsigned long attrs)
6bfd4498
JR
1373{
1374 struct dma_debug_entry *entry;
1375
01ce18b3 1376 if (unlikely(dma_debug_disabled()))
6bfd4498
JR
1377 return;
1378
1379 if (unlikely(virt == NULL))
1380 return;
1381
af1da686
MC
1382 /* handle vmalloc and linear addresses */
1383 if (!is_vmalloc_addr(virt) && !virt_addr_valid(virt))
6bfd4498
JR
1384 return;
1385
af1da686
MC
1386 entry = dma_entry_alloc();
1387 if (!entry)
3aaabbf1
MC
1388 return;
1389
6bfd4498
JR
1390 entry->type = dma_debug_coherent;
1391 entry->dev = dev;
e57d0552 1392 entry->offset = offset_in_page(virt);
6bfd4498
JR
1393 entry->size = size;
1394 entry->dev_addr = dma_addr;
1395 entry->direction = DMA_BIDIRECTIONAL;
1396
3aaabbf1
MC
1397 if (is_vmalloc_addr(virt))
1398 entry->pfn = vmalloc_to_pfn(virt);
1399 else
1400 entry->pfn = page_to_pfn(virt_to_page(virt));
1401
c2bbf9d1 1402 add_dma_entry(entry, attrs);
6bfd4498 1403}
6bfd4498
JR
1404
1405void debug_dma_free_coherent(struct device *dev, size_t size,
1406 void *virt, dma_addr_t addr)
1407{
1408 struct dma_debug_entry ref = {
1409 .type = dma_debug_coherent,
1410 .dev = dev,
e57d0552 1411 .offset = offset_in_page(virt),
6bfd4498
JR
1412 .dev_addr = addr,
1413 .size = size,
1414 .direction = DMA_BIDIRECTIONAL,
1415 };
1416
3aaabbf1 1417 /* handle vmalloc and linear addresses */
af1da686 1418 if (!is_vmalloc_addr(virt) && !virt_addr_valid(virt))
3aaabbf1
MC
1419 return;
1420
1421 if (is_vmalloc_addr(virt))
1422 ref.pfn = vmalloc_to_pfn(virt);
1423 else
1424 ref.pfn = page_to_pfn(virt_to_page(virt));
1425
01ce18b3 1426 if (unlikely(dma_debug_disabled()))
6bfd4498
JR
1427 return;
1428
1429 check_unmap(&ref);
1430}
6bfd4498 1431
0e74b34d 1432void debug_dma_map_resource(struct device *dev, phys_addr_t addr, size_t size,
c2bbf9d1
HM
1433 int direction, dma_addr_t dma_addr,
1434 unsigned long attrs)
0e74b34d
NS
1435{
1436 struct dma_debug_entry *entry;
1437
1438 if (unlikely(dma_debug_disabled()))
1439 return;
1440
1441 entry = dma_entry_alloc();
1442 if (!entry)
1443 return;
1444
1445 entry->type = dma_debug_resource;
1446 entry->dev = dev;
2e0cc304 1447 entry->pfn = PHYS_PFN(addr);
0e74b34d
NS
1448 entry->offset = offset_in_page(addr);
1449 entry->size = size;
1450 entry->dev_addr = dma_addr;
1451 entry->direction = direction;
1452 entry->map_err_type = MAP_ERR_NOT_CHECKED;
1453
c2bbf9d1 1454 add_dma_entry(entry, attrs);
0e74b34d 1455}
0e74b34d
NS
1456
1457void debug_dma_unmap_resource(struct device *dev, dma_addr_t dma_addr,
1458 size_t size, int direction)
1459{
1460 struct dma_debug_entry ref = {
1461 .type = dma_debug_resource,
1462 .dev = dev,
1463 .dev_addr = dma_addr,
1464 .size = size,
1465 .direction = direction,
1466 };
1467
1468 if (unlikely(dma_debug_disabled()))
1469 return;
1470
1471 check_unmap(&ref);
1472}
0e74b34d 1473
b9d2317e
JR
1474void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
1475 size_t size, int direction)
1476{
aa010efb
JR
1477 struct dma_debug_entry ref;
1478
01ce18b3 1479 if (unlikely(dma_debug_disabled()))
b9d2317e
JR
1480 return;
1481
aa010efb
JR
1482 ref.type = dma_debug_single;
1483 ref.dev = dev;
1484 ref.dev_addr = dma_handle;
1485 ref.size = size;
1486 ref.direction = direction;
1487 ref.sg_call_ents = 0;
1488
1489 check_sync(dev, &ref, true);
b9d2317e 1490}
b9d2317e
JR
1491
1492void debug_dma_sync_single_for_device(struct device *dev,
1493 dma_addr_t dma_handle, size_t size,
1494 int direction)
1495{
aa010efb
JR
1496 struct dma_debug_entry ref;
1497
01ce18b3 1498 if (unlikely(dma_debug_disabled()))
b9d2317e
JR
1499 return;
1500
aa010efb
JR
1501 ref.type = dma_debug_single;
1502 ref.dev = dev;
1503 ref.dev_addr = dma_handle;
1504 ref.size = size;
1505 ref.direction = direction;
1506 ref.sg_call_ents = 0;
1507
1508 check_sync(dev, &ref, false);
b9d2317e 1509}
b9d2317e 1510
a31fba5d
JR
1511void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
1512 int nelems, int direction)
1513{
1514 struct scatterlist *s;
88f3907f 1515 int mapped_ents = 0, i;
a31fba5d 1516
01ce18b3 1517 if (unlikely(dma_debug_disabled()))
a31fba5d
JR
1518 return;
1519
1520 for_each_sg(sg, s, nelems, i) {
aa010efb
JR
1521
1522 struct dma_debug_entry ref = {
1523 .type = dma_debug_sg,
1524 .dev = dev,
0abdd7a8
DW
1525 .pfn = page_to_pfn(sg_page(s)),
1526 .offset = s->offset,
aa010efb
JR
1527 .dev_addr = sg_dma_address(s),
1528 .size = sg_dma_len(s),
1529 .direction = direction,
1530 .sg_call_ents = nelems,
1531 };
1532
88f3907f 1533 if (!i)
aa010efb 1534 mapped_ents = get_nr_mapped_entries(dev, &ref);
88f3907f
FT
1535
1536 if (i >= mapped_ents)
1537 break;
1538
aa010efb 1539 check_sync(dev, &ref, true);
a31fba5d
JR
1540 }
1541}
a31fba5d
JR
1542
1543void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
1544 int nelems, int direction)
1545{
1546 struct scatterlist *s;
88f3907f 1547 int mapped_ents = 0, i;
a31fba5d 1548
01ce18b3 1549 if (unlikely(dma_debug_disabled()))
a31fba5d
JR
1550 return;
1551
1552 for_each_sg(sg, s, nelems, i) {
aa010efb
JR
1553
1554 struct dma_debug_entry ref = {
1555 .type = dma_debug_sg,
1556 .dev = dev,
0abdd7a8
DW
1557 .pfn = page_to_pfn(sg_page(s)),
1558 .offset = s->offset,
aa010efb
JR
1559 .dev_addr = sg_dma_address(s),
1560 .size = sg_dma_len(s),
1561 .direction = direction,
1562 .sg_call_ents = nelems,
1563 };
88f3907f 1564 if (!i)
aa010efb 1565 mapped_ents = get_nr_mapped_entries(dev, &ref);
88f3907f
FT
1566
1567 if (i >= mapped_ents)
1568 break;
1569
aa010efb 1570 check_sync(dev, &ref, false);
a31fba5d
JR
1571 }
1572}
a31fba5d 1573
1745de5e
JR
1574static int __init dma_debug_driver_setup(char *str)
1575{
1576 int i;
1577
1578 for (i = 0; i < NAME_MAX_LEN - 1; ++i, ++str) {
1579 current_driver_name[i] = *str;
1580 if (*str == 0)
1581 break;
1582 }
1583
1584 if (current_driver_name[0])
f737b095 1585 pr_info("enable driver filter for driver [%s]\n",
e7ed70ee 1586 current_driver_name);
1745de5e
JR
1587
1588
1589 return 1;
1590}
1591__setup("dma_debug_driver=", dma_debug_driver_setup);