drm/amd/display: Clear phantom stream count and plane count
[linux-block.git] / drivers / iommu / dma-iommu.c
CommitLineData
caab277b 1// SPDX-License-Identifier: GPL-2.0-only
0db2e5d1
RM
2/*
3 * A fairly generic DMA-API to IOMMU-API glue layer.
4 *
5 * Copyright (C) 2014-2015 ARM Ltd.
6 *
7 * based in part on arch/arm/mm/dma-mapping.c:
8 * Copyright (C) 2000-2004 Russell King
0db2e5d1
RM
9 */
10
f51dc892 11#include <linux/acpi_iort.h>
a17e3026
RM
12#include <linux/atomic.h>
13#include <linux/crash_dump.h>
0db2e5d1 14#include <linux/device.h>
a17e3026 15#include <linux/dma-direct.h>
a17e3026 16#include <linux/dma-map-ops.h>
5b11e9cd 17#include <linux/gfp.h>
0db2e5d1
RM
18#include <linux/huge_mm.h>
19#include <linux/iommu.h>
20#include <linux/iova.h>
44bb7e24 21#include <linux/irq.h>
b8397a8f 22#include <linux/list_sort.h>
30280eee 23#include <linux/memremap.h>
0db2e5d1 24#include <linux/mm.h>
c1864790 25#include <linux/mutex.h>
5cef282e 26#include <linux/of_iommu.h>
fade1ec0 27#include <linux/pci.h>
5b11e9cd 28#include <linux/scatterlist.h>
a17e3026
RM
29#include <linux/spinlock.h>
30#include <linux/swiotlb.h>
5b11e9cd 31#include <linux/vmalloc.h>
a63c357b 32#include <trace/events/swiotlb.h>
0db2e5d1 33
f2042ed2
RM
34#include "dma-iommu.h"
35
44bb7e24
RM
36struct iommu_dma_msi_page {
37 struct list_head list;
38 dma_addr_t iova;
39 phys_addr_t phys;
40};
41
fdbe574e
RM
42enum iommu_dma_cookie_type {
43 IOMMU_DMA_IOVA_COOKIE,
44 IOMMU_DMA_MSI_COOKIE,
45};
46
32d5bc8b
NS
47enum iommu_dma_queue_type {
48 IOMMU_DMA_OPTS_PER_CPU_QUEUE,
49 IOMMU_DMA_OPTS_SINGLE_QUEUE,
50};
51
52struct iommu_dma_options {
53 enum iommu_dma_queue_type qt;
9f5b681e
NS
54 size_t fq_size;
55 unsigned int fq_timeout;
32d5bc8b
NS
56};
57
44bb7e24 58struct iommu_dma_cookie {
fdbe574e
RM
59 enum iommu_dma_cookie_type type;
60 union {
61 /* Full allocator for IOMMU_DMA_IOVA_COOKIE */
a17e3026
RM
62 struct {
63 struct iova_domain iovad;
32d5bc8b
NS
64 /* Flush queue */
65 union {
66 struct iova_fq *single_fq;
67 struct iova_fq __percpu *percpu_fq;
68 };
a17e3026
RM
69 /* Number of TLB flushes that have been started */
70 atomic64_t fq_flush_start_cnt;
71 /* Number of TLB flushes that have been finished */
72 atomic64_t fq_flush_finish_cnt;
73 /* Timer to regularily empty the flush queues */
74 struct timer_list fq_timer;
75 /* 1 when timer is active, 0 when not */
76 atomic_t fq_timer_on;
77 };
fdbe574e
RM
78 /* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */
79 dma_addr_t msi_iova;
80 };
81 struct list_head msi_page_list;
2da274cd
ZL
82
83 /* Domain for flush queue callback; NULL if flush queue not in use */
84 struct iommu_domain *fq_domain;
32d5bc8b
NS
85 /* Options for dma-iommu use */
86 struct iommu_dma_options options;
ac9a5d52 87 struct mutex mutex;
44bb7e24
RM
88};
89
a8e8af35 90static DEFINE_STATIC_KEY_FALSE(iommu_deferred_attach_enabled);
af3e9579 91bool iommu_dma_forcedac __read_mostly;
3542dcb1
RM
92
93static int __init iommu_dma_forcedac_setup(char *str)
94{
95 int ret = kstrtobool(str, &iommu_dma_forcedac);
96
97 if (!ret && iommu_dma_forcedac)
98 pr_info("Forcing DAC for PCI devices\n");
99 return ret;
100}
101early_param("iommu.forcedac", iommu_dma_forcedac_setup);
a8e8af35 102
a17e3026 103/* Number of entries per flush queue */
9f5b681e
NS
104#define IOVA_DEFAULT_FQ_SIZE 256
105#define IOVA_SINGLE_FQ_SIZE 32768
a17e3026
RM
106
107/* Timeout (in ms) after which entries are flushed from the queue */
9f5b681e
NS
108#define IOVA_DEFAULT_FQ_TIMEOUT 10
109#define IOVA_SINGLE_FQ_TIMEOUT 1000
a17e3026
RM
110
111/* Flush queue entry for deferred flushing */
112struct iova_fq_entry {
113 unsigned long iova_pfn;
114 unsigned long pages;
115 struct list_head freelist;
116 u64 counter; /* Flush counter when this entry was added */
117};
118
119/* Per-CPU flush queue structure */
120struct iova_fq {
a17e3026 121 spinlock_t lock;
9f5b681e
NS
122 unsigned int head, tail;
123 unsigned int mod_mask;
124 struct iova_fq_entry entries[];
a17e3026
RM
125};
126
f7f07484 127#define fq_ring_for_each(i, fq) \
9f5b681e 128 for ((i) = (fq)->head; (i) != (fq)->tail; (i) = ((i) + 1) & (fq)->mod_mask)
f7f07484
RM
129
130static inline bool fq_full(struct iova_fq *fq)
131{
132 assert_spin_locked(&fq->lock);
9f5b681e 133 return (((fq->tail + 1) & fq->mod_mask) == fq->head);
f7f07484
RM
134}
135
a17e3026 136static inline unsigned int fq_ring_add(struct iova_fq *fq)
f7f07484 137{
a17e3026 138 unsigned int idx = fq->tail;
f7f07484
RM
139
140 assert_spin_locked(&fq->lock);
141
9f5b681e 142 fq->tail = (idx + 1) & fq->mod_mask;
f7f07484
RM
143
144 return idx;
145}
146
32d5bc8b 147static void fq_ring_free_locked(struct iommu_dma_cookie *cookie, struct iova_fq *fq)
f7f07484 148{
a17e3026
RM
149 u64 counter = atomic64_read(&cookie->fq_flush_finish_cnt);
150 unsigned int idx;
f7f07484
RM
151
152 assert_spin_locked(&fq->lock);
153
154 fq_ring_for_each(idx, fq) {
155
156 if (fq->entries[idx].counter >= counter)
157 break;
158
159 put_pages_list(&fq->entries[idx].freelist);
a17e3026 160 free_iova_fast(&cookie->iovad,
f7f07484
RM
161 fq->entries[idx].iova_pfn,
162 fq->entries[idx].pages);
163
9f5b681e 164 fq->head = (fq->head + 1) & fq->mod_mask;
f7f07484
RM
165 }
166}
167
32d5bc8b
NS
168static void fq_ring_free(struct iommu_dma_cookie *cookie, struct iova_fq *fq)
169{
170 unsigned long flags;
171
172 spin_lock_irqsave(&fq->lock, flags);
173 fq_ring_free_locked(cookie, fq);
174 spin_unlock_irqrestore(&fq->lock, flags);
175}
176
a17e3026 177static void fq_flush_iotlb(struct iommu_dma_cookie *cookie)
f7f07484 178{
a17e3026
RM
179 atomic64_inc(&cookie->fq_flush_start_cnt);
180 cookie->fq_domain->ops->flush_iotlb_all(cookie->fq_domain);
181 atomic64_inc(&cookie->fq_flush_finish_cnt);
f7f07484
RM
182}
183
184static void fq_flush_timeout(struct timer_list *t)
185{
a17e3026 186 struct iommu_dma_cookie *cookie = from_timer(cookie, t, fq_timer);
f7f07484
RM
187 int cpu;
188
a17e3026
RM
189 atomic_set(&cookie->fq_timer_on, 0);
190 fq_flush_iotlb(cookie);
f7f07484 191
32d5bc8b
NS
192 if (cookie->options.qt == IOMMU_DMA_OPTS_SINGLE_QUEUE) {
193 fq_ring_free(cookie, cookie->single_fq);
194 } else {
195 for_each_possible_cpu(cpu)
196 fq_ring_free(cookie, per_cpu_ptr(cookie->percpu_fq, cpu));
f7f07484
RM
197 }
198}
199
a17e3026 200static void queue_iova(struct iommu_dma_cookie *cookie,
f7f07484
RM
201 unsigned long pfn, unsigned long pages,
202 struct list_head *freelist)
203{
204 struct iova_fq *fq;
205 unsigned long flags;
a17e3026 206 unsigned int idx;
f7f07484
RM
207
208 /*
209 * Order against the IOMMU driver's pagetable update from unmapping
a17e3026 210 * @pte, to guarantee that fq_flush_iotlb() observes that if called
f7f07484
RM
211 * from a different CPU before we release the lock below. Full barrier
212 * so it also pairs with iommu_dma_init_fq() to avoid seeing partially
213 * written fq state here.
214 */
215 smp_mb();
216
32d5bc8b
NS
217 if (cookie->options.qt == IOMMU_DMA_OPTS_SINGLE_QUEUE)
218 fq = cookie->single_fq;
219 else
220 fq = raw_cpu_ptr(cookie->percpu_fq);
221
f7f07484
RM
222 spin_lock_irqsave(&fq->lock, flags);
223
224 /*
225 * First remove all entries from the flush queue that have already been
226 * flushed out on another CPU. This makes the fq_full() check below less
227 * likely to be true.
228 */
32d5bc8b 229 fq_ring_free_locked(cookie, fq);
f7f07484
RM
230
231 if (fq_full(fq)) {
a17e3026 232 fq_flush_iotlb(cookie);
32d5bc8b 233 fq_ring_free_locked(cookie, fq);
f7f07484
RM
234 }
235
236 idx = fq_ring_add(fq);
237
238 fq->entries[idx].iova_pfn = pfn;
239 fq->entries[idx].pages = pages;
a17e3026 240 fq->entries[idx].counter = atomic64_read(&cookie->fq_flush_start_cnt);
f7f07484
RM
241 list_splice(freelist, &fq->entries[idx].freelist);
242
243 spin_unlock_irqrestore(&fq->lock, flags);
244
245 /* Avoid false sharing as much as possible. */
a17e3026
RM
246 if (!atomic_read(&cookie->fq_timer_on) &&
247 !atomic_xchg(&cookie->fq_timer_on, 1))
248 mod_timer(&cookie->fq_timer,
9f5b681e 249 jiffies + msecs_to_jiffies(cookie->options.fq_timeout));
f7f07484
RM
250}
251
32d5bc8b 252static void iommu_dma_free_fq_single(struct iova_fq *fq)
f7f07484 253{
32d5bc8b 254 int idx;
f7f07484 255
32d5bc8b
NS
256 fq_ring_for_each(idx, fq)
257 put_pages_list(&fq->entries[idx].freelist);
258 vfree(fq);
259}
260
261static void iommu_dma_free_fq_percpu(struct iova_fq __percpu *percpu_fq)
262{
263 int cpu, idx;
f7f07484 264
a17e3026 265 /* The IOVAs will be torn down separately, so just free our queued pages */
f7f07484 266 for_each_possible_cpu(cpu) {
32d5bc8b 267 struct iova_fq *fq = per_cpu_ptr(percpu_fq, cpu);
f7f07484
RM
268
269 fq_ring_for_each(idx, fq)
270 put_pages_list(&fq->entries[idx].freelist);
271 }
272
32d5bc8b
NS
273 free_percpu(percpu_fq);
274}
275
276static void iommu_dma_free_fq(struct iommu_dma_cookie *cookie)
277{
278 if (!cookie->fq_domain)
279 return;
280
281 del_timer_sync(&cookie->fq_timer);
282 if (cookie->options.qt == IOMMU_DMA_OPTS_SINGLE_QUEUE)
283 iommu_dma_free_fq_single(cookie->single_fq);
284 else
285 iommu_dma_free_fq_percpu(cookie->percpu_fq);
286}
287
9f5b681e 288static void iommu_dma_init_one_fq(struct iova_fq *fq, size_t fq_size)
32d5bc8b
NS
289{
290 int i;
291
292 fq->head = 0;
293 fq->tail = 0;
9f5b681e 294 fq->mod_mask = fq_size - 1;
32d5bc8b
NS
295
296 spin_lock_init(&fq->lock);
297
9f5b681e 298 for (i = 0; i < fq_size; i++)
32d5bc8b
NS
299 INIT_LIST_HEAD(&fq->entries[i].freelist);
300}
301
302static int iommu_dma_init_fq_single(struct iommu_dma_cookie *cookie)
303{
9f5b681e 304 size_t fq_size = cookie->options.fq_size;
32d5bc8b
NS
305 struct iova_fq *queue;
306
9f5b681e 307 queue = vmalloc(struct_size(queue, entries, fq_size));
32d5bc8b
NS
308 if (!queue)
309 return -ENOMEM;
9f5b681e 310 iommu_dma_init_one_fq(queue, fq_size);
32d5bc8b
NS
311 cookie->single_fq = queue;
312
313 return 0;
314}
315
316static int iommu_dma_init_fq_percpu(struct iommu_dma_cookie *cookie)
317{
9f5b681e 318 size_t fq_size = cookie->options.fq_size;
32d5bc8b
NS
319 struct iova_fq __percpu *queue;
320 int cpu;
321
9f5b681e
NS
322 queue = __alloc_percpu(struct_size(queue, entries, fq_size),
323 __alignof__(*queue));
32d5bc8b
NS
324 if (!queue)
325 return -ENOMEM;
326
327 for_each_possible_cpu(cpu)
9f5b681e 328 iommu_dma_init_one_fq(per_cpu_ptr(queue, cpu), fq_size);
32d5bc8b
NS
329 cookie->percpu_fq = queue;
330 return 0;
f7f07484
RM
331}
332
a17e3026
RM
333/* sysfs updates are serialised by the mutex of the group owning @domain */
334int iommu_dma_init_fq(struct iommu_domain *domain)
f7f07484 335{
a17e3026 336 struct iommu_dma_cookie *cookie = domain->iova_cookie;
32d5bc8b 337 int rc;
f7f07484 338
a17e3026
RM
339 if (cookie->fq_domain)
340 return 0;
341
342 atomic64_set(&cookie->fq_flush_start_cnt, 0);
343 atomic64_set(&cookie->fq_flush_finish_cnt, 0);
f7f07484 344
32d5bc8b
NS
345 if (cookie->options.qt == IOMMU_DMA_OPTS_SINGLE_QUEUE)
346 rc = iommu_dma_init_fq_single(cookie);
347 else
348 rc = iommu_dma_init_fq_percpu(cookie);
349
350 if (rc) {
a17e3026 351 pr_warn("iova flush queue initialization failed\n");
f7f07484 352 return -ENOMEM;
a17e3026 353 }
f7f07484 354
a17e3026
RM
355 timer_setup(&cookie->fq_timer, fq_flush_timeout, 0);
356 atomic_set(&cookie->fq_timer_on, 0);
357 /*
358 * Prevent incomplete fq state being observable. Pairs with path from
359 * __iommu_dma_unmap() through iommu_dma_free_iova() to queue_iova()
360 */
361 smp_wmb();
362 WRITE_ONCE(cookie->fq_domain, domain);
f7f07484
RM
363 return 0;
364}
365
fdbe574e
RM
366static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie)
367{
368 if (cookie->type == IOMMU_DMA_IOVA_COOKIE)
369 return cookie->iovad.granule;
370 return PAGE_SIZE;
371}
372
fdbe574e
RM
373static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type)
374{
375 struct iommu_dma_cookie *cookie;
376
377 cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
378 if (cookie) {
fdbe574e
RM
379 INIT_LIST_HEAD(&cookie->msi_page_list);
380 cookie->type = type;
381 }
382 return cookie;
44bb7e24
RM
383}
384
0db2e5d1
RM
385/**
386 * iommu_get_dma_cookie - Acquire DMA-API resources for a domain
387 * @domain: IOMMU domain to prepare for DMA-API usage
0db2e5d1
RM
388 */
389int iommu_get_dma_cookie(struct iommu_domain *domain)
fdbe574e
RM
390{
391 if (domain->iova_cookie)
392 return -EEXIST;
393
394 domain->iova_cookie = cookie_alloc(IOMMU_DMA_IOVA_COOKIE);
395 if (!domain->iova_cookie)
396 return -ENOMEM;
397
ac9a5d52 398 mutex_init(&domain->iova_cookie->mutex);
fdbe574e
RM
399 return 0;
400}
fdbe574e
RM
401
402/**
403 * iommu_get_msi_cookie - Acquire just MSI remapping resources
404 * @domain: IOMMU domain to prepare
405 * @base: Start address of IOVA region for MSI mappings
406 *
407 * Users who manage their own IOVA allocation and do not want DMA API support,
408 * but would still like to take advantage of automatic MSI remapping, can use
409 * this to initialise their own domain appropriately. Users should reserve a
410 * contiguous IOVA region, starting at @base, large enough to accommodate the
411 * number of PAGE_SIZE mappings necessary to cover every MSI doorbell address
412 * used by the devices attached to @domain.
413 */
414int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
0db2e5d1 415{
44bb7e24 416 struct iommu_dma_cookie *cookie;
0db2e5d1 417
fdbe574e
RM
418 if (domain->type != IOMMU_DOMAIN_UNMANAGED)
419 return -EINVAL;
420
0db2e5d1
RM
421 if (domain->iova_cookie)
422 return -EEXIST;
423
fdbe574e 424 cookie = cookie_alloc(IOMMU_DMA_MSI_COOKIE);
44bb7e24
RM
425 if (!cookie)
426 return -ENOMEM;
0db2e5d1 427
fdbe574e 428 cookie->msi_iova = base;
44bb7e24
RM
429 domain->iova_cookie = cookie;
430 return 0;
0db2e5d1 431}
fdbe574e 432EXPORT_SYMBOL(iommu_get_msi_cookie);
0db2e5d1
RM
433
434/**
435 * iommu_put_dma_cookie - Release a domain's DMA mapping resources
fdbe574e
RM
436 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() or
437 * iommu_get_msi_cookie()
0db2e5d1
RM
438 */
439void iommu_put_dma_cookie(struct iommu_domain *domain)
440{
44bb7e24
RM
441 struct iommu_dma_cookie *cookie = domain->iova_cookie;
442 struct iommu_dma_msi_page *msi, *tmp;
0db2e5d1 443
44bb7e24 444 if (!cookie)
0db2e5d1
RM
445 return;
446
f7f07484 447 if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule) {
a17e3026 448 iommu_dma_free_fq(cookie);
44bb7e24 449 put_iova_domain(&cookie->iovad);
f7f07484 450 }
44bb7e24
RM
451
452 list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) {
453 list_del(&msi->list);
454 kfree(msi);
455 }
456 kfree(cookie);
0db2e5d1
RM
457 domain->iova_cookie = NULL;
458}
0db2e5d1 459
273df963
RM
460/**
461 * iommu_dma_get_resv_regions - Reserved region driver helper
462 * @dev: Device from iommu_get_resv_regions()
463 * @list: Reserved region list from iommu_get_resv_regions()
464 *
465 * IOMMU drivers can use this to implement their .get_resv_regions callback
cd2c9fcf
SK
466 * for general non-IOMMU-specific reservations. Currently, this covers GICv3
467 * ITS region reservation on ACPI based ARM platforms that may require HW MSI
468 * reservation.
273df963
RM
469 */
470void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list)
fade1ec0 471{
fade1ec0 472
98cc4f71 473 if (!is_of_node(dev_iommu_fwspec_get(dev)->iommu_fwnode))
55be25b8 474 iort_iommu_get_resv_regions(dev, list);
273df963 475
5cef282e
TR
476 if (dev->of_node)
477 of_iommu_get_resv_regions(dev, list);
fade1ec0 478}
273df963 479EXPORT_SYMBOL(iommu_dma_get_resv_regions);
fade1ec0 480
7c1b058c
RM
481static int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie,
482 phys_addr_t start, phys_addr_t end)
483{
484 struct iova_domain *iovad = &cookie->iovad;
485 struct iommu_dma_msi_page *msi_page;
486 int i, num_pages;
487
488 start -= iova_offset(iovad, start);
489 num_pages = iova_align(iovad, end - start) >> iova_shift(iovad);
490
7c1b058c 491 for (i = 0; i < num_pages; i++) {
65ac74f1
MZ
492 msi_page = kmalloc(sizeof(*msi_page), GFP_KERNEL);
493 if (!msi_page)
494 return -ENOMEM;
495
496 msi_page->phys = start;
497 msi_page->iova = start;
498 INIT_LIST_HEAD(&msi_page->list);
499 list_add(&msi_page->list, &cookie->msi_page_list);
7c1b058c
RM
500 start += iovad->granule;
501 }
502
503 return 0;
504}
505
b8397a8f
RM
506static int iommu_dma_ranges_sort(void *priv, const struct list_head *a,
507 const struct list_head *b)
508{
509 struct resource_entry *res_a = list_entry(a, typeof(*res_a), node);
510 struct resource_entry *res_b = list_entry(b, typeof(*res_b), node);
511
512 return res_a->res->start > res_b->res->start;
513}
514
aadad097 515static int iova_reserve_pci_windows(struct pci_dev *dev,
cd2c9fcf
SK
516 struct iova_domain *iovad)
517{
518 struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus);
519 struct resource_entry *window;
520 unsigned long lo, hi;
aadad097 521 phys_addr_t start = 0, end;
cd2c9fcf
SK
522
523 resource_list_for_each_entry(window, &bridge->windows) {
524 if (resource_type(window->res) != IORESOURCE_MEM)
525 continue;
526
527 lo = iova_pfn(iovad, window->res->start - window->offset);
528 hi = iova_pfn(iovad, window->res->end - window->offset);
529 reserve_iova(iovad, lo, hi);
530 }
aadad097
SM
531
532 /* Get reserved DMA windows from host bridge */
b8397a8f 533 list_sort(NULL, &bridge->dma_ranges, iommu_dma_ranges_sort);
aadad097
SM
534 resource_list_for_each_entry(window, &bridge->dma_ranges) {
535 end = window->res->start - window->offset;
536resv_iova:
537 if (end > start) {
538 lo = iova_pfn(iovad, start);
539 hi = iova_pfn(iovad, end);
540 reserve_iova(iovad, lo, hi);
571f3160 541 } else if (end < start) {
b8397a8f 542 /* DMA ranges should be non-overlapping */
571f3160 543 dev_err(&dev->dev,
7154cbd3
JR
544 "Failed to reserve IOVA [%pa-%pa]\n",
545 &start, &end);
aadad097
SM
546 return -EINVAL;
547 }
548
549 start = window->res->end - window->offset + 1;
550 /* If window is last entry */
551 if (window->node.next == &bridge->dma_ranges &&
29fcea8c
AB
552 end != ~(phys_addr_t)0) {
553 end = ~(phys_addr_t)0;
aadad097
SM
554 goto resv_iova;
555 }
556 }
557
558 return 0;
cd2c9fcf
SK
559}
560
7c1b058c
RM
561static int iova_reserve_iommu_regions(struct device *dev,
562 struct iommu_domain *domain)
563{
564 struct iommu_dma_cookie *cookie = domain->iova_cookie;
565 struct iova_domain *iovad = &cookie->iovad;
566 struct iommu_resv_region *region;
567 LIST_HEAD(resv_regions);
568 int ret = 0;
569
aadad097
SM
570 if (dev_is_pci(dev)) {
571 ret = iova_reserve_pci_windows(to_pci_dev(dev), iovad);
572 if (ret)
573 return ret;
574 }
cd2c9fcf 575
7c1b058c
RM
576 iommu_get_resv_regions(dev, &resv_regions);
577 list_for_each_entry(region, &resv_regions, list) {
578 unsigned long lo, hi;
579
580 /* We ARE the software that manages these! */
581 if (region->type == IOMMU_RESV_SW_MSI)
582 continue;
583
584 lo = iova_pfn(iovad, region->start);
585 hi = iova_pfn(iovad, region->start + region->length - 1);
586 reserve_iova(iovad, lo, hi);
587
588 if (region->type == IOMMU_RESV_MSI)
589 ret = cookie_init_hw_msi_region(cookie, region->start,
590 region->start + region->length);
591 if (ret)
592 break;
593 }
594 iommu_put_resv_regions(dev, &resv_regions);
595
596 return ret;
597}
598
82c3cefb
LB
599static bool dev_is_untrusted(struct device *dev)
600{
601 return dev_is_pci(dev) && to_pci_dev(dev)->untrusted;
602}
603
861370f4
CM
604static bool dev_use_swiotlb(struct device *dev, size_t size,
605 enum dma_data_direction dir)
2e727bff 606{
861370f4
CM
607 return IS_ENABLED(CONFIG_SWIOTLB) &&
608 (dev_is_untrusted(dev) ||
609 dma_kmalloc_needs_bounce(dev, size, dir));
610}
611
612static bool dev_use_sg_swiotlb(struct device *dev, struct scatterlist *sg,
613 int nents, enum dma_data_direction dir)
614{
615 struct scatterlist *s;
616 int i;
617
618 if (!IS_ENABLED(CONFIG_SWIOTLB))
619 return false;
620
621 if (dev_is_untrusted(dev))
622 return true;
623
624 /*
625 * If kmalloc() buffers are not DMA-safe for this device and
626 * direction, check the individual lengths in the sg list. If any
627 * element is deemed unsafe, use the swiotlb for bouncing.
628 */
629 if (!dma_kmalloc_safe(dev, dir)) {
630 for_each_sg(sg, s, nents, i)
631 if (!dma_kmalloc_size_aligned(s->length))
632 return true;
633 }
634
635 return false;
2e727bff
DS
636}
637
32d5bc8b
NS
638/**
639 * iommu_dma_init_options - Initialize dma-iommu options
640 * @options: The options to be initialized
641 * @dev: Device the options are set for
642 *
643 * This allows tuning dma-iommu specific to device properties
644 */
645static void iommu_dma_init_options(struct iommu_dma_options *options,
646 struct device *dev)
647{
9f5b681e
NS
648 /* Shadowing IOTLB flushes do better with a single large queue */
649 if (dev->iommu->shadow_on_flush) {
32d5bc8b 650 options->qt = IOMMU_DMA_OPTS_SINGLE_QUEUE;
9f5b681e
NS
651 options->fq_timeout = IOVA_SINGLE_FQ_TIMEOUT;
652 options->fq_size = IOVA_SINGLE_FQ_SIZE;
653 } else {
32d5bc8b 654 options->qt = IOMMU_DMA_OPTS_PER_CPU_QUEUE;
9f5b681e
NS
655 options->fq_size = IOVA_DEFAULT_FQ_SIZE;
656 options->fq_timeout = IOVA_DEFAULT_FQ_TIMEOUT;
657 }
32d5bc8b
NS
658}
659
0db2e5d1
RM
660/**
661 * iommu_dma_init_domain - Initialise a DMA mapping domain
662 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
663 * @base: IOVA at which the mappable address space starts
ac6d7046 664 * @limit: Last address of the IOVA space
fade1ec0 665 * @dev: Device the domain is being initialised for
0db2e5d1 666 *
ac6d7046 667 * @base and @limit + 1 should be exact multiples of IOMMU page granularity to
0db2e5d1
RM
668 * avoid rounding surprises. If necessary, we reserve the page at address 0
669 * to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but
670 * any change which could make prior IOVAs invalid will fail.
671 */
06d60728 672static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
ac6d7046 673 dma_addr_t limit, struct device *dev)
0db2e5d1 674{
fdbe574e 675 struct iommu_dma_cookie *cookie = domain->iova_cookie;
c61a4633 676 unsigned long order, base_pfn;
6b0c54e7 677 struct iova_domain *iovad;
32e92d9f 678 int ret;
0db2e5d1 679
fdbe574e
RM
680 if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE)
681 return -EINVAL;
0db2e5d1 682
6b0c54e7
YL
683 iovad = &cookie->iovad;
684
0db2e5d1 685 /* Use the smallest supported page size for IOVA granularity */
d16e0faa 686 order = __ffs(domain->pgsize_bitmap);
0db2e5d1 687 base_pfn = max_t(unsigned long, 1, base >> order);
0db2e5d1
RM
688
689 /* Check the domain allows at least some access to the device... */
690 if (domain->geometry.force_aperture) {
691 if (base > domain->geometry.aperture_end ||
ac6d7046 692 limit < domain->geometry.aperture_start) {
0db2e5d1
RM
693 pr_warn("specified DMA range outside IOMMU capability\n");
694 return -EFAULT;
695 }
696 /* ...then finally give it a kicking to make sure it fits */
697 base_pfn = max_t(unsigned long, base_pfn,
698 domain->geometry.aperture_start >> order);
0db2e5d1
RM
699 }
700
f51d7bb7 701 /* start_pfn is always nonzero for an already-initialised domain */
ac9a5d52 702 mutex_lock(&cookie->mutex);
0db2e5d1
RM
703 if (iovad->start_pfn) {
704 if (1UL << order != iovad->granule ||
f51d7bb7 705 base_pfn != iovad->start_pfn) {
0db2e5d1 706 pr_warn("Incompatible range for DMA domain\n");
ac9a5d52
YW
707 ret = -EFAULT;
708 goto done_unlock;
0db2e5d1 709 }
7c1b058c 710
ac9a5d52
YW
711 ret = 0;
712 goto done_unlock;
0db2e5d1 713 }
7c1b058c 714
aa3ac946 715 init_iova_domain(iovad, 1UL << order, base_pfn);
32e92d9f
JG
716 ret = iova_domain_init_rcaches(iovad);
717 if (ret)
ac9a5d52 718 goto done_unlock;
2da274cd 719
32d5bc8b
NS
720 iommu_dma_init_options(&cookie->options, dev);
721
c208916f 722 /* If the FQ fails we can simply fall back to strict mode */
a4fdd976
RM
723 if (domain->type == IOMMU_DOMAIN_DMA_FQ &&
724 (!device_iommu_capable(dev, IOMMU_CAP_DEFERRED_FLUSH) || iommu_dma_init_fq(domain)))
452e69b5 725 domain->type = IOMMU_DOMAIN_DMA;
7c1b058c 726
ac9a5d52
YW
727 ret = iova_reserve_iommu_regions(dev, domain);
728
729done_unlock:
730 mutex_unlock(&cookie->mutex);
731 return ret;
0db2e5d1 732}
0db2e5d1
RM
733
734/**
737c85ca
MH
735 * dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API
736 * page flags.
0db2e5d1
RM
737 * @dir: Direction of DMA transfer
738 * @coherent: Is the DMA master cache-coherent?
737c85ca 739 * @attrs: DMA attributes for the mapping
0db2e5d1
RM
740 *
741 * Return: corresponding IOMMU API page protection flags
742 */
06d60728 743static int dma_info_to_prot(enum dma_data_direction dir, bool coherent,
737c85ca 744 unsigned long attrs)
0db2e5d1
RM
745{
746 int prot = coherent ? IOMMU_CACHE : 0;
747
737c85ca
MH
748 if (attrs & DMA_ATTR_PRIVILEGED)
749 prot |= IOMMU_PRIV;
750
0db2e5d1
RM
751 switch (dir) {
752 case DMA_BIDIRECTIONAL:
753 return prot | IOMMU_READ | IOMMU_WRITE;
754 case DMA_TO_DEVICE:
755 return prot | IOMMU_READ;
756 case DMA_FROM_DEVICE:
757 return prot | IOMMU_WRITE;
758 default:
759 return 0;
760 }
761}
762
842fe519 763static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
bd036d2f 764 size_t size, u64 dma_limit, struct device *dev)
0db2e5d1 765{
a44e6657
RM
766 struct iommu_dma_cookie *cookie = domain->iova_cookie;
767 struct iova_domain *iovad = &cookie->iovad;
791c2b17 768 unsigned long shift, iova_len, iova;
0db2e5d1 769
a44e6657
RM
770 if (cookie->type == IOMMU_DMA_MSI_COOKIE) {
771 cookie->msi_iova += size;
772 return cookie->msi_iova - size;
773 }
774
775 shift = iova_shift(iovad);
776 iova_len = size >> shift;
777
a7ba70f1 778 dma_limit = min_not_zero(dma_limit, dev->bus_dma_limit);
03bfdc31 779
c987ff0d 780 if (domain->geometry.force_aperture)
bd036d2f 781 dma_limit = min(dma_limit, (u64)domain->geometry.aperture_end);
122fac03 782
791c2b17
RM
783 /*
784 * Try to use all the 32-bit PCI addresses first. The original SAC vs.
785 * DAC reasoning loses relevance with PCIe, but enough hardware and
786 * firmware bugs are still lurking out there that it's safest not to
787 * venture into the 64-bit space until necessary.
788 *
789 * If your device goes wrong after seeing the notice then likely either
790 * its driver is not setting DMA masks accurately, the hardware has
791 * some inherent bug in handling >32-bit addresses, or not all the
792 * expected address bits are wired up between the device and the IOMMU.
793 */
794 if (dma_limit > DMA_BIT_MASK(32) && dev->iommu->pci_32bit_workaround) {
538d5b33
TN
795 iova = alloc_iova_fast(iovad, iova_len,
796 DMA_BIT_MASK(32) >> shift, false);
791c2b17
RM
797 if (iova)
798 goto done;
bb65a64c 799
791c2b17
RM
800 dev->iommu->pci_32bit_workaround = false;
801 dev_notice(dev, "Using %d-bit DMA addresses\n", bits_per(dma_limit));
802 }
122fac03 803
791c2b17
RM
804 iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift, true);
805done:
bb65a64c 806 return (dma_addr_t)iova << shift;
0db2e5d1
RM
807}
808
842fe519 809static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie,
452e69b5 810 dma_addr_t iova, size_t size, struct iommu_iotlb_gather *gather)
0db2e5d1 811{
842fe519 812 struct iova_domain *iovad = &cookie->iovad;
0db2e5d1 813
a44e6657 814 /* The MSI case is only ever cleaning up its most recent allocation */
bb65a64c 815 if (cookie->type == IOMMU_DMA_MSI_COOKIE)
a44e6657 816 cookie->msi_iova -= size;
452e69b5 817 else if (gather && gather->queued)
a17e3026 818 queue_iova(cookie, iova_pfn(iovad, iova),
2a2b8eaa 819 size >> iova_shift(iovad),
87f60cc6 820 &gather->freelist);
bb65a64c 821 else
1cc896ed
RM
822 free_iova_fast(iovad, iova_pfn(iovad, iova),
823 size >> iova_shift(iovad));
842fe519
RM
824}
825
b61d271e 826static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr,
842fe519
RM
827 size_t size)
828{
b61d271e 829 struct iommu_domain *domain = iommu_get_dma_domain(dev);
a44e6657
RM
830 struct iommu_dma_cookie *cookie = domain->iova_cookie;
831 struct iova_domain *iovad = &cookie->iovad;
842fe519 832 size_t iova_off = iova_offset(iovad, dma_addr);
a7d20dc1
WD
833 struct iommu_iotlb_gather iotlb_gather;
834 size_t unmapped;
842fe519
RM
835
836 dma_addr -= iova_off;
837 size = iova_align(iovad, size + iova_off);
a7d20dc1 838 iommu_iotlb_gather_init(&iotlb_gather);
452e69b5 839 iotlb_gather.queued = READ_ONCE(cookie->fq_domain);
a7d20dc1
WD
840
841 unmapped = iommu_unmap_fast(domain, dma_addr, size, &iotlb_gather);
842 WARN_ON(unmapped != size);
842fe519 843
452e69b5 844 if (!iotlb_gather.queued)
aae4c8e2 845 iommu_iotlb_sync(domain, &iotlb_gather);
452e69b5 846 iommu_dma_free_iova(cookie, dma_addr, size, &iotlb_gather);
0db2e5d1
RM
847}
848
92aec09c 849static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
bd036d2f 850 size_t size, int prot, u64 dma_mask)
92aec09c 851{
b61d271e 852 struct iommu_domain *domain = iommu_get_dma_domain(dev);
92aec09c 853 struct iommu_dma_cookie *cookie = domain->iova_cookie;
8af23fad
RM
854 struct iova_domain *iovad = &cookie->iovad;
855 size_t iova_off = iova_offset(iovad, phys);
92aec09c
CH
856 dma_addr_t iova;
857
a8e8af35 858 if (static_branch_unlikely(&iommu_deferred_attach_enabled) &&
3ab65729 859 iommu_deferred_attach(dev, domain))
795bbbb9
TM
860 return DMA_MAPPING_ERROR;
861
8af23fad 862 size = iova_align(iovad, size + iova_off);
92aec09c 863
6e235020 864 iova = iommu_dma_alloc_iova(domain, size, dma_mask, dev);
92aec09c
CH
865 if (!iova)
866 return DMA_MAPPING_ERROR;
867
4dc6376a 868 if (iommu_map(domain, iova, phys - iova_off, size, prot, GFP_ATOMIC)) {
2a2b8eaa 869 iommu_dma_free_iova(cookie, iova, size, NULL);
92aec09c
CH
870 return DMA_MAPPING_ERROR;
871 }
872 return iova + iova_off;
873}
874
0db2e5d1
RM
875static void __iommu_dma_free_pages(struct page **pages, int count)
876{
877 while (count--)
878 __free_page(pages[count]);
879 kvfree(pages);
880}
881
c4b17afb
GK
882static struct page **__iommu_dma_alloc_pages(struct device *dev,
883 unsigned int count, unsigned long order_mask, gfp_t gfp)
0db2e5d1
RM
884{
885 struct page **pages;
c4b17afb 886 unsigned int i = 0, nid = dev_to_node(dev);
3b6b7e19 887
5e0a760b 888 order_mask &= GENMASK(MAX_PAGE_ORDER, 0);
3b6b7e19
RM
889 if (!order_mask)
890 return NULL;
0db2e5d1 891
ab6f4b00 892 pages = kvcalloc(count, sizeof(*pages), GFP_KERNEL);
0db2e5d1
RM
893 if (!pages)
894 return NULL;
895
896 /* IOMMU can map any pages, so himem can also be used here */
897 gfp |= __GFP_NOWARN | __GFP_HIGHMEM;
898
899 while (count) {
900 struct page *page = NULL;
3b6b7e19 901 unsigned int order_size;
0db2e5d1
RM
902
903 /*
904 * Higher-order allocations are a convenience rather
905 * than a necessity, hence using __GFP_NORETRY until
3b6b7e19 906 * falling back to minimum-order allocations.
0db2e5d1 907 */
61883d3c 908 for (order_mask &= GENMASK(__fls(count), 0);
3b6b7e19
RM
909 order_mask; order_mask &= ~order_size) {
910 unsigned int order = __fls(order_mask);
c4b17afb 911 gfp_t alloc_flags = gfp;
3b6b7e19
RM
912
913 order_size = 1U << order;
c4b17afb
GK
914 if (order_mask > order_size)
915 alloc_flags |= __GFP_NORETRY;
916 page = alloc_pages_node(nid, alloc_flags, order);
0db2e5d1
RM
917 if (!page)
918 continue;
4604393c 919 if (order)
0db2e5d1 920 split_page(page, order);
4604393c 921 break;
0db2e5d1 922 }
0db2e5d1
RM
923 if (!page) {
924 __iommu_dma_free_pages(pages, i);
925 return NULL;
926 }
3b6b7e19
RM
927 count -= order_size;
928 while (order_size--)
0db2e5d1
RM
929 pages[i++] = page++;
930 }
931 return pages;
932}
933
8230ce9a
CH
934/*
935 * If size is less than PAGE_SIZE, then a full CPU page will be allocated,
0db2e5d1 936 * but an IOMMU which supports smaller pages might not map the whole thing.
0db2e5d1 937 */
8230ce9a
CH
938static struct page **__iommu_dma_alloc_noncontiguous(struct device *dev,
939 size_t size, struct sg_table *sgt, gfp_t gfp, pgprot_t prot,
e8d39a90 940 unsigned long attrs)
0db2e5d1 941{
43c5bf11 942 struct iommu_domain *domain = iommu_get_dma_domain(dev);
842fe519
RM
943 struct iommu_dma_cookie *cookie = domain->iova_cookie;
944 struct iova_domain *iovad = &cookie->iovad;
21b95aaf
CH
945 bool coherent = dev_is_dma_coherent(dev);
946 int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
21b95aaf 947 unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
0db2e5d1 948 struct page **pages;
842fe519 949 dma_addr_t iova;
a3884774 950 ssize_t ret;
0db2e5d1 951
a8e8af35 952 if (static_branch_unlikely(&iommu_deferred_attach_enabled) &&
3ab65729 953 iommu_deferred_attach(dev, domain))
795bbbb9
TM
954 return NULL;
955
3b6b7e19
RM
956 min_size = alloc_sizes & -alloc_sizes;
957 if (min_size < PAGE_SIZE) {
958 min_size = PAGE_SIZE;
959 alloc_sizes |= PAGE_SIZE;
960 } else {
961 size = ALIGN(size, min_size);
962 }
00085f1e 963 if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES)
3b6b7e19
RM
964 alloc_sizes = min_size;
965
966 count = PAGE_ALIGN(size) >> PAGE_SHIFT;
c4b17afb
GK
967 pages = __iommu_dma_alloc_pages(dev, count, alloc_sizes >> PAGE_SHIFT,
968 gfp);
0db2e5d1
RM
969 if (!pages)
970 return NULL;
971
842fe519
RM
972 size = iova_align(iovad, size);
973 iova = iommu_dma_alloc_iova(domain, size, dev->coherent_dma_mask, dev);
0db2e5d1
RM
974 if (!iova)
975 goto out_free_pages;
976
96d57808
JG
977 /*
978 * Remove the zone/policy flags from the GFP - these are applied to the
979 * __iommu_dma_alloc_pages() but are not used for the supporting
980 * internal allocations that follow.
981 */
982 gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM | __GFP_COMP);
983
984 if (sg_alloc_table_from_pages(sgt, pages, count, 0, size, gfp))
0db2e5d1
RM
985 goto out_free_iova;
986
21b95aaf 987 if (!(ioprot & IOMMU_CACHE)) {
23f88e0a
CH
988 struct scatterlist *sg;
989 int i;
990
8230ce9a 991 for_each_sg(sgt->sgl, sg, sgt->orig_nents, i)
23f88e0a 992 arch_dma_prep_coherent(sg_page(sg), sg->length);
0db2e5d1
RM
993 }
994
f2b2c051 995 ret = iommu_map_sg(domain, iova, sgt->sgl, sgt->orig_nents, ioprot,
96d57808 996 gfp);
a3884774 997 if (ret < 0 || ret < size)
0db2e5d1
RM
998 goto out_free_sg;
999
8230ce9a 1000 sgt->sgl->dma_address = iova;
e817ee5f 1001 sgt->sgl->dma_length = size;
8230ce9a
CH
1002 return pages;
1003
1004out_free_sg:
1005 sg_free_table(sgt);
1006out_free_iova:
1007 iommu_dma_free_iova(cookie, iova, size, NULL);
1008out_free_pages:
1009 __iommu_dma_free_pages(pages, count);
1010 return NULL;
1011}
1012
1013static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
1014 dma_addr_t *dma_handle, gfp_t gfp, pgprot_t prot,
1015 unsigned long attrs)
1016{
1017 struct page **pages;
1018 struct sg_table sgt;
1019 void *vaddr;
1020
1021 pages = __iommu_dma_alloc_noncontiguous(dev, size, &sgt, gfp, prot,
1022 attrs);
1023 if (!pages)
1024 return NULL;
1025 *dma_handle = sgt.sgl->dma_address;
1026 sg_free_table(&sgt);
51231740 1027 vaddr = dma_common_pages_remap(pages, size, prot,
21b95aaf
CH
1028 __builtin_return_address(0));
1029 if (!vaddr)
1030 goto out_unmap;
21b95aaf 1031 return vaddr;
0db2e5d1 1032
21b95aaf 1033out_unmap:
8230ce9a
CH
1034 __iommu_dma_unmap(dev, *dma_handle, size);
1035 __iommu_dma_free_pages(pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
0db2e5d1
RM
1036 return NULL;
1037}
1038
e817ee5f
CH
1039static struct sg_table *iommu_dma_alloc_noncontiguous(struct device *dev,
1040 size_t size, enum dma_data_direction dir, gfp_t gfp,
1041 unsigned long attrs)
1042{
1043 struct dma_sgt_handle *sh;
1044
1045 sh = kmalloc(sizeof(*sh), gfp);
1046 if (!sh)
1047 return NULL;
1048
1049 sh->pages = __iommu_dma_alloc_noncontiguous(dev, size, &sh->sgt, gfp,
1050 PAGE_KERNEL, attrs);
1051 if (!sh->pages) {
1052 kfree(sh);
1053 return NULL;
1054 }
1055 return &sh->sgt;
1056}
1057
1058static void iommu_dma_free_noncontiguous(struct device *dev, size_t size,
1059 struct sg_table *sgt, enum dma_data_direction dir)
1060{
1061 struct dma_sgt_handle *sh = sgt_handle(sgt);
1062
1063 __iommu_dma_unmap(dev, sgt->sgl->dma_address, size);
1064 __iommu_dma_free_pages(sh->pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
1065 sg_free_table(&sh->sgt);
0fbea680 1066 kfree(sh);
e817ee5f 1067}
e817ee5f 1068
06d60728
CH
1069static void iommu_dma_sync_single_for_cpu(struct device *dev,
1070 dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
0db2e5d1 1071{
06d60728 1072 phys_addr_t phys;
0db2e5d1 1073
861370f4 1074 if (dev_is_dma_coherent(dev) && !dev_use_swiotlb(dev, size, dir))
06d60728 1075 return;
1cc896ed 1076
06d60728 1077 phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
82612d66
TM
1078 if (!dev_is_dma_coherent(dev))
1079 arch_sync_dma_for_cpu(phys, size, dir);
1080
7fd856aa 1081 if (is_swiotlb_buffer(dev, phys))
80808d27 1082 swiotlb_sync_single_for_cpu(dev, phys, size, dir);
0db2e5d1 1083}
0db2e5d1 1084
06d60728
CH
1085static void iommu_dma_sync_single_for_device(struct device *dev,
1086 dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
0db2e5d1 1087{
06d60728 1088 phys_addr_t phys;
0db2e5d1 1089
861370f4 1090 if (dev_is_dma_coherent(dev) && !dev_use_swiotlb(dev, size, dir))
06d60728 1091 return;
1cc896ed 1092
06d60728 1093 phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
7fd856aa 1094 if (is_swiotlb_buffer(dev, phys))
80808d27 1095 swiotlb_sync_single_for_device(dev, phys, size, dir);
82612d66
TM
1096
1097 if (!dev_is_dma_coherent(dev))
1098 arch_sync_dma_for_device(phys, size, dir);
06d60728 1099}
0db2e5d1 1100
06d60728
CH
1101static void iommu_dma_sync_sg_for_cpu(struct device *dev,
1102 struct scatterlist *sgl, int nelems,
1103 enum dma_data_direction dir)
1104{
1105 struct scatterlist *sg;
1106 int i;
1107
861370f4 1108 if (sg_dma_is_swiotlb(sgl))
08ae5d4a
DS
1109 for_each_sg(sgl, sg, nelems, i)
1110 iommu_dma_sync_single_for_cpu(dev, sg_dma_address(sg),
1111 sg->length, dir);
1112 else if (!dev_is_dma_coherent(dev))
1113 for_each_sg(sgl, sg, nelems, i)
82612d66 1114 arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir);
06d60728
CH
1115}
1116
1117static void iommu_dma_sync_sg_for_device(struct device *dev,
1118 struct scatterlist *sgl, int nelems,
1119 enum dma_data_direction dir)
1120{
1121 struct scatterlist *sg;
1122 int i;
1123
861370f4 1124 if (sg_dma_is_swiotlb(sgl))
08ae5d4a
DS
1125 for_each_sg(sgl, sg, nelems, i)
1126 iommu_dma_sync_single_for_device(dev,
1127 sg_dma_address(sg),
1128 sg->length, dir);
1129 else if (!dev_is_dma_coherent(dev))
1130 for_each_sg(sgl, sg, nelems, i)
82612d66 1131 arch_sync_dma_for_device(sg_phys(sg), sg->length, dir);
0db2e5d1
RM
1132}
1133
06d60728
CH
1134static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
1135 unsigned long offset, size_t size, enum dma_data_direction dir,
1136 unsigned long attrs)
51f8cc9e 1137{
06d60728
CH
1138 phys_addr_t phys = page_to_phys(page) + offset;
1139 bool coherent = dev_is_dma_coherent(dev);
9b49bbc2
DS
1140 int prot = dma_info_to_prot(dir, coherent, attrs);
1141 struct iommu_domain *domain = iommu_get_dma_domain(dev);
1142 struct iommu_dma_cookie *cookie = domain->iova_cookie;
1143 struct iova_domain *iovad = &cookie->iovad;
9b49bbc2
DS
1144 dma_addr_t iova, dma_mask = dma_get_mask(dev);
1145
1146 /*
1147 * If both the physical buffer start address and size are
1148 * page aligned, we don't need to use a bounce page.
1149 */
861370f4
CM
1150 if (dev_use_swiotlb(dev, size, dir) &&
1151 iova_offset(iovad, phys | size)) {
9b49bbc2 1152 void *padding_start;
2cbc61a1 1153 size_t padding_size, aligned_size;
9b49bbc2 1154
f316ba0a
ML
1155 if (!is_swiotlb_active(dev)) {
1156 dev_warn_once(dev, "DMA bounce buffers are inactive, unable to map unaligned transaction.\n");
1157 return DMA_MAPPING_ERROR;
1158 }
1159
a63c357b
IM
1160 trace_swiotlb_bounced(dev, phys, size);
1161
9b49bbc2 1162 aligned_size = iova_align(iovad, size);
e81e99ba
DS
1163 phys = swiotlb_tbl_map_single(dev, phys, size, aligned_size,
1164 iova_mask(iovad), dir, attrs);
9b49bbc2
DS
1165
1166 if (phys == DMA_MAPPING_ERROR)
1167 return DMA_MAPPING_ERROR;
06d60728 1168
9b49bbc2
DS
1169 /* Cleanup the padding area. */
1170 padding_start = phys_to_virt(phys);
1171 padding_size = aligned_size;
1172
1173 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
1174 (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)) {
1175 padding_start += size;
1176 padding_size -= size;
1177 }
1178
1179 memset(padding_start, 0, padding_size);
1180 }
06d60728 1181
9b49bbc2 1182 if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
56e35f9c 1183 arch_sync_dma_for_device(phys, size, dir);
9b49bbc2 1184
2cbc61a1 1185 iova = __iommu_dma_map(dev, phys, size, prot, dma_mask);
9b49bbc2
DS
1186 if (iova == DMA_MAPPING_ERROR && is_swiotlb_buffer(dev, phys))
1187 swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs);
1188 return iova;
51f8cc9e
RM
1189}
1190
06d60728
CH
1191static void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
1192 size_t size, enum dma_data_direction dir, unsigned long attrs)
0db2e5d1 1193{
9b49bbc2
DS
1194 struct iommu_domain *domain = iommu_get_dma_domain(dev);
1195 phys_addr_t phys;
1196
1197 phys = iommu_iova_to_phys(domain, dma_handle);
1198 if (WARN_ON(!phys))
1199 return;
1200
1201 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && !dev_is_dma_coherent(dev))
1202 arch_sync_dma_for_cpu(phys, size, dir);
1203
1204 __iommu_dma_unmap(dev, dma_handle, size);
1205
1206 if (unlikely(is_swiotlb_buffer(dev, phys)))
1207 swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs);
0db2e5d1
RM
1208}
1209
1210/*
1211 * Prepare a successfully-mapped scatterlist to give back to the caller.
809eac54
RM
1212 *
1213 * At this point the segments are already laid out by iommu_dma_map_sg() to
1214 * avoid individually crossing any boundaries, so we merely need to check a
1215 * segment's start address to avoid concatenating across one.
0db2e5d1
RM
1216 */
1217static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
1218 dma_addr_t dma_addr)
1219{
809eac54
RM
1220 struct scatterlist *s, *cur = sg;
1221 unsigned long seg_mask = dma_get_seg_boundary(dev);
1222 unsigned int cur_len = 0, max_len = dma_get_max_seg_size(dev);
1223 int i, count = 0;
0db2e5d1
RM
1224
1225 for_each_sg(sg, s, nents, i) {
809eac54 1226 /* Restore this segment's original unaligned fields first */
30280eee 1227 dma_addr_t s_dma_addr = sg_dma_address(s);
809eac54 1228 unsigned int s_iova_off = sg_dma_address(s);
0db2e5d1 1229 unsigned int s_length = sg_dma_len(s);
809eac54 1230 unsigned int s_iova_len = s->length;
0db2e5d1 1231
cad34be7 1232 sg_dma_address(s) = DMA_MAPPING_ERROR;
809eac54
RM
1233 sg_dma_len(s) = 0;
1234
cb147bbe 1235 if (sg_dma_is_bus_address(s)) {
30280eee
LG
1236 if (i > 0)
1237 cur = sg_next(cur);
1238
1239 sg_dma_unmark_bus_address(s);
1240 sg_dma_address(cur) = s_dma_addr;
1241 sg_dma_len(cur) = s_length;
1242 sg_dma_mark_bus_address(cur);
1243 count++;
1244 cur_len = 0;
1245 continue;
1246 }
1247
1248 s->offset += s_iova_off;
1249 s->length = s_length;
1250
809eac54
RM
1251 /*
1252 * Now fill in the real DMA data. If...
1253 * - there is a valid output segment to append to
1254 * - and this segment starts on an IOVA page boundary
1255 * - but doesn't fall at a segment boundary
1256 * - and wouldn't make the resulting output segment too long
1257 */
1258 if (cur_len && !s_iova_off && (dma_addr & seg_mask) &&
ab2cbeb0 1259 (max_len - cur_len >= s_length)) {
809eac54
RM
1260 /* ...then concatenate it with the previous one */
1261 cur_len += s_length;
1262 } else {
1263 /* Otherwise start the next output segment */
1264 if (i > 0)
1265 cur = sg_next(cur);
1266 cur_len = s_length;
1267 count++;
1268
1269 sg_dma_address(cur) = dma_addr + s_iova_off;
1270 }
1271
1272 sg_dma_len(cur) = cur_len;
1273 dma_addr += s_iova_len;
1274
1275 if (s_length + s_iova_off < s_iova_len)
1276 cur_len = 0;
0db2e5d1 1277 }
809eac54 1278 return count;
0db2e5d1
RM
1279}
1280
1281/*
1282 * If mapping failed, then just restore the original list,
1283 * but making sure the DMA fields are invalidated.
1284 */
1285static void __invalidate_sg(struct scatterlist *sg, int nents)
1286{
1287 struct scatterlist *s;
1288 int i;
1289
1290 for_each_sg(sg, s, nents, i) {
cb147bbe 1291 if (sg_dma_is_bus_address(s)) {
30280eee
LG
1292 sg_dma_unmark_bus_address(s);
1293 } else {
1294 if (sg_dma_address(s) != DMA_MAPPING_ERROR)
1295 s->offset += sg_dma_address(s);
1296 if (sg_dma_len(s))
1297 s->length = sg_dma_len(s);
1298 }
cad34be7 1299 sg_dma_address(s) = DMA_MAPPING_ERROR;
0db2e5d1
RM
1300 sg_dma_len(s) = 0;
1301 }
1302}
1303
82612d66
TM
1304static void iommu_dma_unmap_sg_swiotlb(struct device *dev, struct scatterlist *sg,
1305 int nents, enum dma_data_direction dir, unsigned long attrs)
1306{
1307 struct scatterlist *s;
1308 int i;
1309
1310 for_each_sg(sg, s, nents, i)
9b49bbc2 1311 iommu_dma_unmap_page(dev, sg_dma_address(s),
82612d66
TM
1312 sg_dma_len(s), dir, attrs);
1313}
1314
1315static int iommu_dma_map_sg_swiotlb(struct device *dev, struct scatterlist *sg,
1316 int nents, enum dma_data_direction dir, unsigned long attrs)
1317{
1318 struct scatterlist *s;
1319 int i;
1320
861370f4
CM
1321 sg_dma_mark_swiotlb(sg);
1322
82612d66 1323 for_each_sg(sg, s, nents, i) {
9b49bbc2
DS
1324 sg_dma_address(s) = iommu_dma_map_page(dev, sg_page(s),
1325 s->offset, s->length, dir, attrs);
82612d66
TM
1326 if (sg_dma_address(s) == DMA_MAPPING_ERROR)
1327 goto out_unmap;
1328 sg_dma_len(s) = s->length;
1329 }
1330
1331 return nents;
1332
1333out_unmap:
1334 iommu_dma_unmap_sg_swiotlb(dev, sg, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
dabb16f6 1335 return -EIO;
82612d66
TM
1336}
1337
0db2e5d1
RM
1338/*
1339 * The DMA API client is passing in a scatterlist which could describe
1340 * any old buffer layout, but the IOMMU API requires everything to be
1341 * aligned to IOMMU pages. Hence the need for this complicated bit of
1342 * impedance-matching, to be able to hand off a suitably-aligned list,
1343 * but still preserve the original offsets and sizes for the caller.
1344 */
06d60728
CH
1345static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
1346 int nents, enum dma_data_direction dir, unsigned long attrs)
0db2e5d1 1347{
43c5bf11 1348 struct iommu_domain *domain = iommu_get_dma_domain(dev);
842fe519
RM
1349 struct iommu_dma_cookie *cookie = domain->iova_cookie;
1350 struct iova_domain *iovad = &cookie->iovad;
0db2e5d1 1351 struct scatterlist *s, *prev = NULL;
06d60728 1352 int prot = dma_info_to_prot(dir, dev_is_dma_coherent(dev), attrs);
30280eee
LG
1353 struct pci_p2pdma_map_state p2pdma_state = {};
1354 enum pci_p2pdma_map_type map;
842fe519 1355 dma_addr_t iova;
0db2e5d1 1356 size_t iova_len = 0;
809eac54 1357 unsigned long mask = dma_get_seg_boundary(dev);
dabb16f6 1358 ssize_t ret;
0db2e5d1
RM
1359 int i;
1360
dabb16f6
LG
1361 if (static_branch_unlikely(&iommu_deferred_attach_enabled)) {
1362 ret = iommu_deferred_attach(dev, domain);
ac315f96
LG
1363 if (ret)
1364 goto out;
dabb16f6 1365 }
795bbbb9 1366
861370f4 1367 if (dev_use_sg_swiotlb(dev, sg, nents, dir))
82612d66
TM
1368 return iommu_dma_map_sg_swiotlb(dev, sg, nents, dir, attrs);
1369
06d60728
CH
1370 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
1371 iommu_dma_sync_sg_for_device(dev, sg, nents, dir);
1372
0db2e5d1
RM
1373 /*
1374 * Work out how much IOVA space we need, and align the segments to
1375 * IOVA granules for the IOMMU driver to handle. With some clever
1376 * trickery we can modify the list in-place, but reversibly, by
809eac54 1377 * stashing the unaligned parts in the as-yet-unused DMA fields.
0db2e5d1
RM
1378 */
1379 for_each_sg(sg, s, nents, i) {
809eac54 1380 size_t s_iova_off = iova_offset(iovad, s->offset);
0db2e5d1 1381 size_t s_length = s->length;
809eac54 1382 size_t pad_len = (mask - iova_len + 1) & mask;
0db2e5d1 1383
30280eee
LG
1384 if (is_pci_p2pdma_page(sg_page(s))) {
1385 map = pci_p2pdma_map_segment(&p2pdma_state, dev, s);
1386 switch (map) {
1387 case PCI_P2PDMA_MAP_BUS_ADDR:
1388 /*
1389 * iommu_map_sg() will skip this segment as
1390 * it is marked as a bus address,
1391 * __finalise_sg() will copy the dma address
1392 * into the output segment.
1393 */
1394 continue;
1395 case PCI_P2PDMA_MAP_THRU_HOST_BRIDGE:
1396 /*
1397 * Mapping through host bridge should be
1398 * mapped with regular IOVAs, thus we
1399 * do nothing here and continue below.
1400 */
1401 break;
1402 default:
1403 ret = -EREMOTEIO;
1404 goto out_restore_sg;
1405 }
1406 }
1407
809eac54 1408 sg_dma_address(s) = s_iova_off;
0db2e5d1 1409 sg_dma_len(s) = s_length;
809eac54
RM
1410 s->offset -= s_iova_off;
1411 s_length = iova_align(iovad, s_length + s_iova_off);
0db2e5d1
RM
1412 s->length = s_length;
1413
1414 /*
809eac54
RM
1415 * Due to the alignment of our single IOVA allocation, we can
1416 * depend on these assumptions about the segment boundary mask:
1417 * - If mask size >= IOVA size, then the IOVA range cannot
1418 * possibly fall across a boundary, so we don't care.
1419 * - If mask size < IOVA size, then the IOVA range must start
1420 * exactly on a boundary, therefore we can lay things out
1421 * based purely on segment lengths without needing to know
1422 * the actual addresses beforehand.
1423 * - The mask must be a power of 2, so pad_len == 0 if
1424 * iova_len == 0, thus we cannot dereference prev the first
1425 * time through here (i.e. before it has a meaningful value).
0db2e5d1 1426 */
809eac54 1427 if (pad_len && pad_len < s_length - 1) {
0db2e5d1
RM
1428 prev->length += pad_len;
1429 iova_len += pad_len;
1430 }
1431
1432 iova_len += s_length;
1433 prev = s;
1434 }
1435
30280eee
LG
1436 if (!iova_len)
1437 return __finalise_sg(dev, sg, nents, 0);
1438
842fe519 1439 iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev);
dabb16f6
LG
1440 if (!iova) {
1441 ret = -ENOMEM;
0db2e5d1 1442 goto out_restore_sg;
dabb16f6 1443 }
0db2e5d1
RM
1444
1445 /*
1446 * We'll leave any physical concatenation to the IOMMU driver's
1447 * implementation - it knows better than we do.
1448 */
f2b2c051 1449 ret = iommu_map_sg(domain, iova, sg, nents, prot, GFP_ATOMIC);
a3884774 1450 if (ret < 0 || ret < iova_len)
0db2e5d1
RM
1451 goto out_free_iova;
1452
842fe519 1453 return __finalise_sg(dev, sg, nents, iova);
0db2e5d1
RM
1454
1455out_free_iova:
2a2b8eaa 1456 iommu_dma_free_iova(cookie, iova, iova_len, NULL);
0db2e5d1
RM
1457out_restore_sg:
1458 __invalidate_sg(sg, nents);
dabb16f6 1459out:
30280eee 1460 if (ret != -ENOMEM && ret != -EREMOTEIO)
dabb16f6
LG
1461 return -EINVAL;
1462 return ret;
0db2e5d1
RM
1463}
1464
06d60728
CH
1465static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
1466 int nents, enum dma_data_direction dir, unsigned long attrs)
0db2e5d1 1467{
30280eee 1468 dma_addr_t end = 0, start;
842fe519
RM
1469 struct scatterlist *tmp;
1470 int i;
06d60728 1471
861370f4 1472 if (sg_dma_is_swiotlb(sg)) {
82612d66
TM
1473 iommu_dma_unmap_sg_swiotlb(dev, sg, nents, dir, attrs);
1474 return;
1475 }
1476
ee9d4097
DS
1477 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
1478 iommu_dma_sync_sg_for_cpu(dev, sg, nents, dir);
1479
0db2e5d1
RM
1480 /*
1481 * The scatterlist segments are mapped into a single
30280eee
LG
1482 * contiguous IOVA allocation, the start and end points
1483 * just have to be determined.
0db2e5d1 1484 */
30280eee 1485 for_each_sg(sg, tmp, nents, i) {
cb147bbe 1486 if (sg_dma_is_bus_address(tmp)) {
30280eee
LG
1487 sg_dma_unmark_bus_address(tmp);
1488 continue;
1489 }
1490
842fe519
RM
1491 if (sg_dma_len(tmp) == 0)
1492 break;
30280eee
LG
1493
1494 start = sg_dma_address(tmp);
1495 break;
842fe519 1496 }
30280eee
LG
1497
1498 nents -= i;
1499 for_each_sg(tmp, tmp, nents, i) {
cb147bbe 1500 if (sg_dma_is_bus_address(tmp)) {
30280eee
LG
1501 sg_dma_unmark_bus_address(tmp);
1502 continue;
1503 }
1504
842fe519
RM
1505 if (sg_dma_len(tmp) == 0)
1506 break;
30280eee
LG
1507
1508 end = sg_dma_address(tmp) + sg_dma_len(tmp);
842fe519 1509 }
30280eee
LG
1510
1511 if (end)
1512 __iommu_dma_unmap(dev, start, end - start);
0db2e5d1
RM
1513}
1514
06d60728 1515static dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
51f8cc9e
RM
1516 size_t size, enum dma_data_direction dir, unsigned long attrs)
1517{
1518 return __iommu_dma_map(dev, phys, size,
6e235020
TM
1519 dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO,
1520 dma_get_mask(dev));
51f8cc9e
RM
1521}
1522
06d60728 1523static void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
51f8cc9e
RM
1524 size_t size, enum dma_data_direction dir, unsigned long attrs)
1525{
b61d271e 1526 __iommu_dma_unmap(dev, handle, size);
51f8cc9e
RM
1527}
1528
8553f6e6 1529static void __iommu_dma_free(struct device *dev, size_t size, void *cpu_addr)
bcf4b9c4
RM
1530{
1531 size_t alloc_size = PAGE_ALIGN(size);
1532 int count = alloc_size >> PAGE_SHIFT;
1533 struct page *page = NULL, **pages = NULL;
1534
bcf4b9c4 1535 /* Non-coherent atomic allocation? Easy */
e6475eb0 1536 if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
c84dc6e6 1537 dma_free_from_pool(dev, cpu_addr, alloc_size))
bcf4b9c4
RM
1538 return;
1539
f5ff79fd 1540 if (is_vmalloc_addr(cpu_addr)) {
bcf4b9c4
RM
1541 /*
1542 * If it the address is remapped, then it's either non-coherent
1543 * or highmem CMA, or an iommu_dma_alloc_remap() construction.
1544 */
5cf45379 1545 pages = dma_common_find_pages(cpu_addr);
bcf4b9c4
RM
1546 if (!pages)
1547 page = vmalloc_to_page(cpu_addr);
51231740 1548 dma_common_free_remap(cpu_addr, alloc_size);
bcf4b9c4
RM
1549 } else {
1550 /* Lowmem means a coherent atomic or CMA allocation */
1551 page = virt_to_page(cpu_addr);
1552 }
1553
1554 if (pages)
1555 __iommu_dma_free_pages(pages, count);
591fcf3b
NC
1556 if (page)
1557 dma_free_contiguous(dev, page, alloc_size);
bcf4b9c4
RM
1558}
1559
8553f6e6
RM
1560static void iommu_dma_free(struct device *dev, size_t size, void *cpu_addr,
1561 dma_addr_t handle, unsigned long attrs)
1562{
1563 __iommu_dma_unmap(dev, handle, size);
1564 __iommu_dma_free(dev, size, cpu_addr);
1565}
1566
ee1ef05d
CH
1567static void *iommu_dma_alloc_pages(struct device *dev, size_t size,
1568 struct page **pagep, gfp_t gfp, unsigned long attrs)
06d60728
CH
1569{
1570 bool coherent = dev_is_dma_coherent(dev);
9ad5d6ed 1571 size_t alloc_size = PAGE_ALIGN(size);
90ae409f 1572 int node = dev_to_node(dev);
9a4ab94a 1573 struct page *page = NULL;
9ad5d6ed 1574 void *cpu_addr;
06d60728 1575
591fcf3b 1576 page = dma_alloc_contiguous(dev, alloc_size, gfp);
90ae409f
CH
1577 if (!page)
1578 page = alloc_pages_node(node, gfp, get_order(alloc_size));
072bebc0
RM
1579 if (!page)
1580 return NULL;
1581
f5ff79fd 1582 if (!coherent || PageHighMem(page)) {
33dcb37c 1583 pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs);
072bebc0 1584
9ad5d6ed 1585 cpu_addr = dma_common_contiguous_remap(page, alloc_size,
51231740 1586 prot, __builtin_return_address(0));
9ad5d6ed 1587 if (!cpu_addr)
ee1ef05d 1588 goto out_free_pages;
8680aa5a
RM
1589
1590 if (!coherent)
9ad5d6ed 1591 arch_dma_prep_coherent(page, size);
8680aa5a 1592 } else {
9ad5d6ed 1593 cpu_addr = page_address(page);
8680aa5a 1594 }
ee1ef05d
CH
1595
1596 *pagep = page;
9ad5d6ed
RM
1597 memset(cpu_addr, 0, alloc_size);
1598 return cpu_addr;
072bebc0 1599out_free_pages:
591fcf3b 1600 dma_free_contiguous(dev, page, alloc_size);
072bebc0 1601 return NULL;
06d60728
CH
1602}
1603
ee1ef05d
CH
1604static void *iommu_dma_alloc(struct device *dev, size_t size,
1605 dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
1606{
1607 bool coherent = dev_is_dma_coherent(dev);
1608 int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
1609 struct page *page = NULL;
1610 void *cpu_addr;
1611
1612 gfp |= __GFP_ZERO;
1613
f5ff79fd 1614 if (gfpflags_allow_blocking(gfp) &&
e8d39a90
CH
1615 !(attrs & DMA_ATTR_FORCE_CONTIGUOUS)) {
1616 return iommu_dma_alloc_remap(dev, size, handle, gfp,
1617 dma_pgprot(dev, PAGE_KERNEL, attrs), attrs);
1618 }
ee1ef05d 1619
e6475eb0
CH
1620 if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
1621 !gfpflags_allow_blocking(gfp) && !coherent)
9420139f
CH
1622 page = dma_alloc_from_pool(dev, PAGE_ALIGN(size), &cpu_addr,
1623 gfp, NULL);
ee1ef05d
CH
1624 else
1625 cpu_addr = iommu_dma_alloc_pages(dev, size, &page, gfp, attrs);
1626 if (!cpu_addr)
1627 return NULL;
1628
6e235020
TM
1629 *handle = __iommu_dma_map(dev, page_to_phys(page), size, ioprot,
1630 dev->coherent_dma_mask);
ee1ef05d
CH
1631 if (*handle == DMA_MAPPING_ERROR) {
1632 __iommu_dma_free(dev, size, cpu_addr);
1633 return NULL;
1634 }
1635
1636 return cpu_addr;
1637}
1638
06d60728
CH
1639static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
1640 void *cpu_addr, dma_addr_t dma_addr, size_t size,
1641 unsigned long attrs)
1642{
1643 unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
efd9f10b 1644 unsigned long pfn, off = vma->vm_pgoff;
06d60728
CH
1645 int ret;
1646
33dcb37c 1647 vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
06d60728
CH
1648
1649 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
1650 return ret;
1651
1652 if (off >= nr_pages || vma_pages(vma) > nr_pages - off)
1653 return -ENXIO;
1654
f5ff79fd 1655 if (is_vmalloc_addr(cpu_addr)) {
5cf45379 1656 struct page **pages = dma_common_find_pages(cpu_addr);
06d60728 1657
efd9f10b 1658 if (pages)
71fe89ce 1659 return vm_map_pages(vma, pages, nr_pages);
efd9f10b
CH
1660 pfn = vmalloc_to_pfn(cpu_addr);
1661 } else {
1662 pfn = page_to_pfn(virt_to_page(cpu_addr));
06d60728
CH
1663 }
1664
efd9f10b
CH
1665 return remap_pfn_range(vma, vma->vm_start, pfn + off,
1666 vma->vm_end - vma->vm_start,
1667 vma->vm_page_prot);
06d60728
CH
1668}
1669
06d60728
CH
1670static int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
1671 void *cpu_addr, dma_addr_t dma_addr, size_t size,
1672 unsigned long attrs)
1673{
3fb3378b
CH
1674 struct page *page;
1675 int ret;
06d60728 1676
f5ff79fd 1677 if (is_vmalloc_addr(cpu_addr)) {
5cf45379 1678 struct page **pages = dma_common_find_pages(cpu_addr);
06d60728 1679
3fb3378b
CH
1680 if (pages) {
1681 return sg_alloc_table_from_pages(sgt, pages,
1682 PAGE_ALIGN(size) >> PAGE_SHIFT,
1683 0, size, GFP_KERNEL);
1684 }
1685
1686 page = vmalloc_to_page(cpu_addr);
1687 } else {
1688 page = virt_to_page(cpu_addr);
06d60728
CH
1689 }
1690
3fb3378b
CH
1691 ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
1692 if (!ret)
1693 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
1694 return ret;
06d60728
CH
1695}
1696
158a6d3c
YS
1697static unsigned long iommu_dma_get_merge_boundary(struct device *dev)
1698{
1699 struct iommu_domain *domain = iommu_get_dma_domain(dev);
1700
1701 return (1UL << __ffs(domain->pgsize_bitmap)) - 1;
1702}
1703
6d9870b7
JG
1704static size_t iommu_dma_opt_mapping_size(void)
1705{
1706 return iova_rcache_range();
1707}
1708
06d60728 1709static const struct dma_map_ops iommu_dma_ops = {
30280eee 1710 .flags = DMA_F_PCI_P2PDMA_SUPPORTED,
06d60728
CH
1711 .alloc = iommu_dma_alloc,
1712 .free = iommu_dma_free,
efa70f2f
CH
1713 .alloc_pages = dma_common_alloc_pages,
1714 .free_pages = dma_common_free_pages,
e817ee5f
CH
1715 .alloc_noncontiguous = iommu_dma_alloc_noncontiguous,
1716 .free_noncontiguous = iommu_dma_free_noncontiguous,
06d60728
CH
1717 .mmap = iommu_dma_mmap,
1718 .get_sgtable = iommu_dma_get_sgtable,
1719 .map_page = iommu_dma_map_page,
1720 .unmap_page = iommu_dma_unmap_page,
1721 .map_sg = iommu_dma_map_sg,
1722 .unmap_sg = iommu_dma_unmap_sg,
1723 .sync_single_for_cpu = iommu_dma_sync_single_for_cpu,
1724 .sync_single_for_device = iommu_dma_sync_single_for_device,
1725 .sync_sg_for_cpu = iommu_dma_sync_sg_for_cpu,
1726 .sync_sg_for_device = iommu_dma_sync_sg_for_device,
1727 .map_resource = iommu_dma_map_resource,
1728 .unmap_resource = iommu_dma_unmap_resource,
158a6d3c 1729 .get_merge_boundary = iommu_dma_get_merge_boundary,
6d9870b7 1730 .opt_mapping_size = iommu_dma_opt_mapping_size,
06d60728
CH
1731};
1732
1733/*
1734 * The IOMMU core code allocates the default DMA domain, which the underlying
1735 * IOMMU driver needs to support via the dma-iommu layer.
1736 */
ac6d7046 1737void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 dma_limit)
06d60728
CH
1738{
1739 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
1740
1741 if (!domain)
1742 goto out_err;
1743
1744 /*
1745 * The IOMMU core code allocates the default DMA domain, which the
1746 * underlying IOMMU driver needs to support via the dma-iommu layer.
1747 */
bf3aed46 1748 if (iommu_is_dma_domain(domain)) {
ac6d7046 1749 if (iommu_dma_init_domain(domain, dma_base, dma_limit, dev))
06d60728
CH
1750 goto out_err;
1751 dev->dma_ops = &iommu_dma_ops;
1752 }
1753
1754 return;
1755out_err:
1756 pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
1757 dev_name(dev));
51f8cc9e 1758}
8ce4904b 1759EXPORT_SYMBOL_GPL(iommu_setup_dma_ops);
51f8cc9e 1760
44bb7e24
RM
1761static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
1762 phys_addr_t msi_addr, struct iommu_domain *domain)
1763{
1764 struct iommu_dma_cookie *cookie = domain->iova_cookie;
1765 struct iommu_dma_msi_page *msi_page;
842fe519 1766 dma_addr_t iova;
44bb7e24 1767 int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
fdbe574e 1768 size_t size = cookie_msi_granule(cookie);
44bb7e24 1769
fdbe574e 1770 msi_addr &= ~(phys_addr_t)(size - 1);
44bb7e24
RM
1771 list_for_each_entry(msi_page, &cookie->msi_page_list, list)
1772 if (msi_page->phys == msi_addr)
1773 return msi_page;
1774
c1864790 1775 msi_page = kzalloc(sizeof(*msi_page), GFP_KERNEL);
44bb7e24
RM
1776 if (!msi_page)
1777 return NULL;
1778
8af23fad
RM
1779 iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
1780 if (!iova)
a44e6657 1781 goto out_free_page;
44bb7e24 1782
1369459b 1783 if (iommu_map(domain, iova, msi_addr, size, prot, GFP_KERNEL))
8af23fad
RM
1784 goto out_free_iova;
1785
44bb7e24 1786 INIT_LIST_HEAD(&msi_page->list);
a44e6657
RM
1787 msi_page->phys = msi_addr;
1788 msi_page->iova = iova;
44bb7e24
RM
1789 list_add(&msi_page->list, &cookie->msi_page_list);
1790 return msi_page;
1791
8af23fad 1792out_free_iova:
2a2b8eaa 1793 iommu_dma_free_iova(cookie, iova, size, NULL);
44bb7e24
RM
1794out_free_page:
1795 kfree(msi_page);
1796 return NULL;
1797}
1798
fa49364c
RM
1799/**
1800 * iommu_dma_prepare_msi() - Map the MSI page in the IOMMU domain
1801 * @desc: MSI descriptor, will store the MSI page
1802 * @msi_addr: MSI target address to be mapped
1803 *
1804 * Return: 0 on success or negative error code if the mapping failed.
1805 */
ece6e6f0 1806int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr)
44bb7e24 1807{
ece6e6f0 1808 struct device *dev = msi_desc_to_dev(desc);
44bb7e24 1809 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
44bb7e24 1810 struct iommu_dma_msi_page *msi_page;
c1864790 1811 static DEFINE_MUTEX(msi_prepare_lock); /* see below */
44bb7e24 1812
ece6e6f0
JG
1813 if (!domain || !domain->iova_cookie) {
1814 desc->iommu_cookie = NULL;
1815 return 0;
1816 }
44bb7e24 1817
44bb7e24 1818 /*
c1864790
RM
1819 * In fact the whole prepare operation should already be serialised by
1820 * irq_domain_mutex further up the callchain, but that's pretty subtle
1821 * on its own, so consider this locking as failsafe documentation...
44bb7e24 1822 */
c1864790 1823 mutex_lock(&msi_prepare_lock);
44bb7e24 1824 msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain);
c1864790 1825 mutex_unlock(&msi_prepare_lock);
44bb7e24 1826
ece6e6f0
JG
1827 msi_desc_set_iommu_cookie(desc, msi_page);
1828
1829 if (!msi_page)
1830 return -ENOMEM;
1831 return 0;
1832}
1833
fa49364c
RM
1834/**
1835 * iommu_dma_compose_msi_msg() - Apply translation to an MSI message
1836 * @desc: MSI descriptor prepared by iommu_dma_prepare_msi()
1837 * @msg: MSI message containing target physical address
1838 */
1839void iommu_dma_compose_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
ece6e6f0
JG
1840{
1841 struct device *dev = msi_desc_to_dev(desc);
1842 const struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
1843 const struct iommu_dma_msi_page *msi_page;
1844
1845 msi_page = msi_desc_get_iommu_cookie(desc);
1846
1847 if (!domain || !domain->iova_cookie || WARN_ON(!msi_page))
1848 return;
1849
1850 msg->address_hi = upper_32_bits(msi_page->iova);
1851 msg->address_lo &= cookie_msi_granule(domain->iova_cookie) - 1;
1852 msg->address_lo += lower_32_bits(msi_page->iova);
44bb7e24 1853}
06d60728
CH
1854
1855static int iommu_dma_init(void)
1856{
a8e8af35
LJ
1857 if (is_kdump_kernel())
1858 static_branch_enable(&iommu_deferred_attach_enabled);
1859
06d60728 1860 return iova_cache_get();
44bb7e24 1861}
06d60728 1862arch_initcall(iommu_dma_init);