Commit | Line | Data |
---|---|---|
caab277b | 1 | // SPDX-License-Identifier: GPL-2.0-only |
0db2e5d1 RM |
2 | /* |
3 | * A fairly generic DMA-API to IOMMU-API glue layer. | |
4 | * | |
5 | * Copyright (C) 2014-2015 ARM Ltd. | |
6 | * | |
7 | * based in part on arch/arm/mm/dma-mapping.c: | |
8 | * Copyright (C) 2000-2004 Russell King | |
0db2e5d1 RM |
9 | */ |
10 | ||
f51dc892 | 11 | #include <linux/acpi_iort.h> |
a17e3026 RM |
12 | #include <linux/atomic.h> |
13 | #include <linux/crash_dump.h> | |
0db2e5d1 | 14 | #include <linux/device.h> |
a17e3026 | 15 | #include <linux/dma-direct.h> |
a17e3026 | 16 | #include <linux/dma-map-ops.h> |
5b11e9cd | 17 | #include <linux/gfp.h> |
0db2e5d1 RM |
18 | #include <linux/huge_mm.h> |
19 | #include <linux/iommu.h> | |
20 | #include <linux/iova.h> | |
44bb7e24 | 21 | #include <linux/irq.h> |
b8397a8f | 22 | #include <linux/list_sort.h> |
30280eee | 23 | #include <linux/memremap.h> |
0db2e5d1 | 24 | #include <linux/mm.h> |
c1864790 | 25 | #include <linux/mutex.h> |
5cef282e | 26 | #include <linux/of_iommu.h> |
fade1ec0 | 27 | #include <linux/pci.h> |
5b11e9cd | 28 | #include <linux/scatterlist.h> |
a17e3026 RM |
29 | #include <linux/spinlock.h> |
30 | #include <linux/swiotlb.h> | |
5b11e9cd | 31 | #include <linux/vmalloc.h> |
0db2e5d1 | 32 | |
f2042ed2 RM |
33 | #include "dma-iommu.h" |
34 | ||
44bb7e24 RM |
35 | struct iommu_dma_msi_page { |
36 | struct list_head list; | |
37 | dma_addr_t iova; | |
38 | phys_addr_t phys; | |
39 | }; | |
40 | ||
fdbe574e RM |
41 | enum iommu_dma_cookie_type { |
42 | IOMMU_DMA_IOVA_COOKIE, | |
43 | IOMMU_DMA_MSI_COOKIE, | |
44 | }; | |
45 | ||
44bb7e24 | 46 | struct iommu_dma_cookie { |
fdbe574e RM |
47 | enum iommu_dma_cookie_type type; |
48 | union { | |
49 | /* Full allocator for IOMMU_DMA_IOVA_COOKIE */ | |
a17e3026 RM |
50 | struct { |
51 | struct iova_domain iovad; | |
52 | ||
53 | struct iova_fq __percpu *fq; /* Flush queue */ | |
54 | /* Number of TLB flushes that have been started */ | |
55 | atomic64_t fq_flush_start_cnt; | |
56 | /* Number of TLB flushes that have been finished */ | |
57 | atomic64_t fq_flush_finish_cnt; | |
58 | /* Timer to regularily empty the flush queues */ | |
59 | struct timer_list fq_timer; | |
60 | /* 1 when timer is active, 0 when not */ | |
61 | atomic_t fq_timer_on; | |
62 | }; | |
fdbe574e RM |
63 | /* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */ |
64 | dma_addr_t msi_iova; | |
65 | }; | |
66 | struct list_head msi_page_list; | |
2da274cd ZL |
67 | |
68 | /* Domain for flush queue callback; NULL if flush queue not in use */ | |
69 | struct iommu_domain *fq_domain; | |
ac9a5d52 | 70 | struct mutex mutex; |
44bb7e24 RM |
71 | }; |
72 | ||
a8e8af35 | 73 | static DEFINE_STATIC_KEY_FALSE(iommu_deferred_attach_enabled); |
af3e9579 | 74 | bool iommu_dma_forcedac __read_mostly; |
3542dcb1 RM |
75 | |
76 | static int __init iommu_dma_forcedac_setup(char *str) | |
77 | { | |
78 | int ret = kstrtobool(str, &iommu_dma_forcedac); | |
79 | ||
80 | if (!ret && iommu_dma_forcedac) | |
81 | pr_info("Forcing DAC for PCI devices\n"); | |
82 | return ret; | |
83 | } | |
84 | early_param("iommu.forcedac", iommu_dma_forcedac_setup); | |
a8e8af35 | 85 | |
a17e3026 RM |
86 | /* Number of entries per flush queue */ |
87 | #define IOVA_FQ_SIZE 256 | |
88 | ||
89 | /* Timeout (in ms) after which entries are flushed from the queue */ | |
90 | #define IOVA_FQ_TIMEOUT 10 | |
91 | ||
92 | /* Flush queue entry for deferred flushing */ | |
93 | struct iova_fq_entry { | |
94 | unsigned long iova_pfn; | |
95 | unsigned long pages; | |
96 | struct list_head freelist; | |
97 | u64 counter; /* Flush counter when this entry was added */ | |
98 | }; | |
99 | ||
100 | /* Per-CPU flush queue structure */ | |
101 | struct iova_fq { | |
102 | struct iova_fq_entry entries[IOVA_FQ_SIZE]; | |
103 | unsigned int head, tail; | |
104 | spinlock_t lock; | |
105 | }; | |
106 | ||
f7f07484 RM |
107 | #define fq_ring_for_each(i, fq) \ |
108 | for ((i) = (fq)->head; (i) != (fq)->tail; (i) = ((i) + 1) % IOVA_FQ_SIZE) | |
109 | ||
110 | static inline bool fq_full(struct iova_fq *fq) | |
111 | { | |
112 | assert_spin_locked(&fq->lock); | |
113 | return (((fq->tail + 1) % IOVA_FQ_SIZE) == fq->head); | |
114 | } | |
115 | ||
a17e3026 | 116 | static inline unsigned int fq_ring_add(struct iova_fq *fq) |
f7f07484 | 117 | { |
a17e3026 | 118 | unsigned int idx = fq->tail; |
f7f07484 RM |
119 | |
120 | assert_spin_locked(&fq->lock); | |
121 | ||
122 | fq->tail = (idx + 1) % IOVA_FQ_SIZE; | |
123 | ||
124 | return idx; | |
125 | } | |
126 | ||
a17e3026 | 127 | static void fq_ring_free(struct iommu_dma_cookie *cookie, struct iova_fq *fq) |
f7f07484 | 128 | { |
a17e3026 RM |
129 | u64 counter = atomic64_read(&cookie->fq_flush_finish_cnt); |
130 | unsigned int idx; | |
f7f07484 RM |
131 | |
132 | assert_spin_locked(&fq->lock); | |
133 | ||
134 | fq_ring_for_each(idx, fq) { | |
135 | ||
136 | if (fq->entries[idx].counter >= counter) | |
137 | break; | |
138 | ||
139 | put_pages_list(&fq->entries[idx].freelist); | |
a17e3026 | 140 | free_iova_fast(&cookie->iovad, |
f7f07484 RM |
141 | fq->entries[idx].iova_pfn, |
142 | fq->entries[idx].pages); | |
143 | ||
144 | fq->head = (fq->head + 1) % IOVA_FQ_SIZE; | |
145 | } | |
146 | } | |
147 | ||
a17e3026 | 148 | static void fq_flush_iotlb(struct iommu_dma_cookie *cookie) |
f7f07484 | 149 | { |
a17e3026 RM |
150 | atomic64_inc(&cookie->fq_flush_start_cnt); |
151 | cookie->fq_domain->ops->flush_iotlb_all(cookie->fq_domain); | |
152 | atomic64_inc(&cookie->fq_flush_finish_cnt); | |
f7f07484 RM |
153 | } |
154 | ||
155 | static void fq_flush_timeout(struct timer_list *t) | |
156 | { | |
a17e3026 | 157 | struct iommu_dma_cookie *cookie = from_timer(cookie, t, fq_timer); |
f7f07484 RM |
158 | int cpu; |
159 | ||
a17e3026 RM |
160 | atomic_set(&cookie->fq_timer_on, 0); |
161 | fq_flush_iotlb(cookie); | |
f7f07484 RM |
162 | |
163 | for_each_possible_cpu(cpu) { | |
164 | unsigned long flags; | |
165 | struct iova_fq *fq; | |
166 | ||
a17e3026 | 167 | fq = per_cpu_ptr(cookie->fq, cpu); |
f7f07484 | 168 | spin_lock_irqsave(&fq->lock, flags); |
a17e3026 | 169 | fq_ring_free(cookie, fq); |
f7f07484 RM |
170 | spin_unlock_irqrestore(&fq->lock, flags); |
171 | } | |
172 | } | |
173 | ||
a17e3026 | 174 | static void queue_iova(struct iommu_dma_cookie *cookie, |
f7f07484 RM |
175 | unsigned long pfn, unsigned long pages, |
176 | struct list_head *freelist) | |
177 | { | |
178 | struct iova_fq *fq; | |
179 | unsigned long flags; | |
a17e3026 | 180 | unsigned int idx; |
f7f07484 RM |
181 | |
182 | /* | |
183 | * Order against the IOMMU driver's pagetable update from unmapping | |
a17e3026 | 184 | * @pte, to guarantee that fq_flush_iotlb() observes that if called |
f7f07484 RM |
185 | * from a different CPU before we release the lock below. Full barrier |
186 | * so it also pairs with iommu_dma_init_fq() to avoid seeing partially | |
187 | * written fq state here. | |
188 | */ | |
189 | smp_mb(); | |
190 | ||
a17e3026 | 191 | fq = raw_cpu_ptr(cookie->fq); |
f7f07484 RM |
192 | spin_lock_irqsave(&fq->lock, flags); |
193 | ||
194 | /* | |
195 | * First remove all entries from the flush queue that have already been | |
196 | * flushed out on another CPU. This makes the fq_full() check below less | |
197 | * likely to be true. | |
198 | */ | |
a17e3026 | 199 | fq_ring_free(cookie, fq); |
f7f07484 RM |
200 | |
201 | if (fq_full(fq)) { | |
a17e3026 RM |
202 | fq_flush_iotlb(cookie); |
203 | fq_ring_free(cookie, fq); | |
f7f07484 RM |
204 | } |
205 | ||
206 | idx = fq_ring_add(fq); | |
207 | ||
208 | fq->entries[idx].iova_pfn = pfn; | |
209 | fq->entries[idx].pages = pages; | |
a17e3026 | 210 | fq->entries[idx].counter = atomic64_read(&cookie->fq_flush_start_cnt); |
f7f07484 RM |
211 | list_splice(freelist, &fq->entries[idx].freelist); |
212 | ||
213 | spin_unlock_irqrestore(&fq->lock, flags); | |
214 | ||
215 | /* Avoid false sharing as much as possible. */ | |
a17e3026 RM |
216 | if (!atomic_read(&cookie->fq_timer_on) && |
217 | !atomic_xchg(&cookie->fq_timer_on, 1)) | |
218 | mod_timer(&cookie->fq_timer, | |
f7f07484 RM |
219 | jiffies + msecs_to_jiffies(IOVA_FQ_TIMEOUT)); |
220 | } | |
221 | ||
a17e3026 | 222 | static void iommu_dma_free_fq(struct iommu_dma_cookie *cookie) |
f7f07484 RM |
223 | { |
224 | int cpu, idx; | |
225 | ||
a17e3026 | 226 | if (!cookie->fq) |
f7f07484 RM |
227 | return; |
228 | ||
a17e3026 RM |
229 | del_timer_sync(&cookie->fq_timer); |
230 | /* The IOVAs will be torn down separately, so just free our queued pages */ | |
f7f07484 | 231 | for_each_possible_cpu(cpu) { |
a17e3026 | 232 | struct iova_fq *fq = per_cpu_ptr(cookie->fq, cpu); |
f7f07484 RM |
233 | |
234 | fq_ring_for_each(idx, fq) | |
235 | put_pages_list(&fq->entries[idx].freelist); | |
236 | } | |
237 | ||
a17e3026 | 238 | free_percpu(cookie->fq); |
f7f07484 RM |
239 | } |
240 | ||
a17e3026 RM |
241 | /* sysfs updates are serialised by the mutex of the group owning @domain */ |
242 | int iommu_dma_init_fq(struct iommu_domain *domain) | |
f7f07484 | 243 | { |
a17e3026 | 244 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
f7f07484 RM |
245 | struct iova_fq __percpu *queue; |
246 | int i, cpu; | |
247 | ||
a17e3026 RM |
248 | if (cookie->fq_domain) |
249 | return 0; | |
250 | ||
251 | atomic64_set(&cookie->fq_flush_start_cnt, 0); | |
252 | atomic64_set(&cookie->fq_flush_finish_cnt, 0); | |
f7f07484 RM |
253 | |
254 | queue = alloc_percpu(struct iova_fq); | |
a17e3026 RM |
255 | if (!queue) { |
256 | pr_warn("iova flush queue initialization failed\n"); | |
f7f07484 | 257 | return -ENOMEM; |
a17e3026 | 258 | } |
f7f07484 RM |
259 | |
260 | for_each_possible_cpu(cpu) { | |
261 | struct iova_fq *fq = per_cpu_ptr(queue, cpu); | |
262 | ||
263 | fq->head = 0; | |
264 | fq->tail = 0; | |
265 | ||
266 | spin_lock_init(&fq->lock); | |
267 | ||
268 | for (i = 0; i < IOVA_FQ_SIZE; i++) | |
269 | INIT_LIST_HEAD(&fq->entries[i].freelist); | |
270 | } | |
271 | ||
a17e3026 | 272 | cookie->fq = queue; |
f7f07484 | 273 | |
a17e3026 RM |
274 | timer_setup(&cookie->fq_timer, fq_flush_timeout, 0); |
275 | atomic_set(&cookie->fq_timer_on, 0); | |
276 | /* | |
277 | * Prevent incomplete fq state being observable. Pairs with path from | |
278 | * __iommu_dma_unmap() through iommu_dma_free_iova() to queue_iova() | |
279 | */ | |
280 | smp_wmb(); | |
281 | WRITE_ONCE(cookie->fq_domain, domain); | |
f7f07484 RM |
282 | return 0; |
283 | } | |
284 | ||
fdbe574e RM |
285 | static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie) |
286 | { | |
287 | if (cookie->type == IOMMU_DMA_IOVA_COOKIE) | |
288 | return cookie->iovad.granule; | |
289 | return PAGE_SIZE; | |
290 | } | |
291 | ||
fdbe574e RM |
292 | static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type) |
293 | { | |
294 | struct iommu_dma_cookie *cookie; | |
295 | ||
296 | cookie = kzalloc(sizeof(*cookie), GFP_KERNEL); | |
297 | if (cookie) { | |
fdbe574e RM |
298 | INIT_LIST_HEAD(&cookie->msi_page_list); |
299 | cookie->type = type; | |
300 | } | |
301 | return cookie; | |
44bb7e24 RM |
302 | } |
303 | ||
0db2e5d1 RM |
304 | /** |
305 | * iommu_get_dma_cookie - Acquire DMA-API resources for a domain | |
306 | * @domain: IOMMU domain to prepare for DMA-API usage | |
0db2e5d1 RM |
307 | */ |
308 | int iommu_get_dma_cookie(struct iommu_domain *domain) | |
fdbe574e RM |
309 | { |
310 | if (domain->iova_cookie) | |
311 | return -EEXIST; | |
312 | ||
313 | domain->iova_cookie = cookie_alloc(IOMMU_DMA_IOVA_COOKIE); | |
314 | if (!domain->iova_cookie) | |
315 | return -ENOMEM; | |
316 | ||
ac9a5d52 | 317 | mutex_init(&domain->iova_cookie->mutex); |
fdbe574e RM |
318 | return 0; |
319 | } | |
fdbe574e RM |
320 | |
321 | /** | |
322 | * iommu_get_msi_cookie - Acquire just MSI remapping resources | |
323 | * @domain: IOMMU domain to prepare | |
324 | * @base: Start address of IOVA region for MSI mappings | |
325 | * | |
326 | * Users who manage their own IOVA allocation and do not want DMA API support, | |
327 | * but would still like to take advantage of automatic MSI remapping, can use | |
328 | * this to initialise their own domain appropriately. Users should reserve a | |
329 | * contiguous IOVA region, starting at @base, large enough to accommodate the | |
330 | * number of PAGE_SIZE mappings necessary to cover every MSI doorbell address | |
331 | * used by the devices attached to @domain. | |
332 | */ | |
333 | int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base) | |
0db2e5d1 | 334 | { |
44bb7e24 | 335 | struct iommu_dma_cookie *cookie; |
0db2e5d1 | 336 | |
fdbe574e RM |
337 | if (domain->type != IOMMU_DOMAIN_UNMANAGED) |
338 | return -EINVAL; | |
339 | ||
0db2e5d1 RM |
340 | if (domain->iova_cookie) |
341 | return -EEXIST; | |
342 | ||
fdbe574e | 343 | cookie = cookie_alloc(IOMMU_DMA_MSI_COOKIE); |
44bb7e24 RM |
344 | if (!cookie) |
345 | return -ENOMEM; | |
0db2e5d1 | 346 | |
fdbe574e | 347 | cookie->msi_iova = base; |
44bb7e24 RM |
348 | domain->iova_cookie = cookie; |
349 | return 0; | |
0db2e5d1 | 350 | } |
fdbe574e | 351 | EXPORT_SYMBOL(iommu_get_msi_cookie); |
0db2e5d1 RM |
352 | |
353 | /** | |
354 | * iommu_put_dma_cookie - Release a domain's DMA mapping resources | |
fdbe574e RM |
355 | * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() or |
356 | * iommu_get_msi_cookie() | |
0db2e5d1 RM |
357 | */ |
358 | void iommu_put_dma_cookie(struct iommu_domain *domain) | |
359 | { | |
44bb7e24 RM |
360 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
361 | struct iommu_dma_msi_page *msi, *tmp; | |
0db2e5d1 | 362 | |
44bb7e24 | 363 | if (!cookie) |
0db2e5d1 RM |
364 | return; |
365 | ||
f7f07484 | 366 | if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule) { |
a17e3026 | 367 | iommu_dma_free_fq(cookie); |
44bb7e24 | 368 | put_iova_domain(&cookie->iovad); |
f7f07484 | 369 | } |
44bb7e24 RM |
370 | |
371 | list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) { | |
372 | list_del(&msi->list); | |
373 | kfree(msi); | |
374 | } | |
375 | kfree(cookie); | |
0db2e5d1 RM |
376 | domain->iova_cookie = NULL; |
377 | } | |
0db2e5d1 | 378 | |
273df963 RM |
379 | /** |
380 | * iommu_dma_get_resv_regions - Reserved region driver helper | |
381 | * @dev: Device from iommu_get_resv_regions() | |
382 | * @list: Reserved region list from iommu_get_resv_regions() | |
383 | * | |
384 | * IOMMU drivers can use this to implement their .get_resv_regions callback | |
cd2c9fcf SK |
385 | * for general non-IOMMU-specific reservations. Currently, this covers GICv3 |
386 | * ITS region reservation on ACPI based ARM platforms that may require HW MSI | |
387 | * reservation. | |
273df963 RM |
388 | */ |
389 | void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list) | |
fade1ec0 | 390 | { |
fade1ec0 | 391 | |
98cc4f71 | 392 | if (!is_of_node(dev_iommu_fwspec_get(dev)->iommu_fwnode)) |
55be25b8 | 393 | iort_iommu_get_resv_regions(dev, list); |
273df963 | 394 | |
5cef282e TR |
395 | if (dev->of_node) |
396 | of_iommu_get_resv_regions(dev, list); | |
fade1ec0 | 397 | } |
273df963 | 398 | EXPORT_SYMBOL(iommu_dma_get_resv_regions); |
fade1ec0 | 399 | |
7c1b058c RM |
400 | static int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie, |
401 | phys_addr_t start, phys_addr_t end) | |
402 | { | |
403 | struct iova_domain *iovad = &cookie->iovad; | |
404 | struct iommu_dma_msi_page *msi_page; | |
405 | int i, num_pages; | |
406 | ||
407 | start -= iova_offset(iovad, start); | |
408 | num_pages = iova_align(iovad, end - start) >> iova_shift(iovad); | |
409 | ||
7c1b058c | 410 | for (i = 0; i < num_pages; i++) { |
65ac74f1 MZ |
411 | msi_page = kmalloc(sizeof(*msi_page), GFP_KERNEL); |
412 | if (!msi_page) | |
413 | return -ENOMEM; | |
414 | ||
415 | msi_page->phys = start; | |
416 | msi_page->iova = start; | |
417 | INIT_LIST_HEAD(&msi_page->list); | |
418 | list_add(&msi_page->list, &cookie->msi_page_list); | |
7c1b058c RM |
419 | start += iovad->granule; |
420 | } | |
421 | ||
422 | return 0; | |
423 | } | |
424 | ||
b8397a8f RM |
425 | static int iommu_dma_ranges_sort(void *priv, const struct list_head *a, |
426 | const struct list_head *b) | |
427 | { | |
428 | struct resource_entry *res_a = list_entry(a, typeof(*res_a), node); | |
429 | struct resource_entry *res_b = list_entry(b, typeof(*res_b), node); | |
430 | ||
431 | return res_a->res->start > res_b->res->start; | |
432 | } | |
433 | ||
aadad097 | 434 | static int iova_reserve_pci_windows(struct pci_dev *dev, |
cd2c9fcf SK |
435 | struct iova_domain *iovad) |
436 | { | |
437 | struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus); | |
438 | struct resource_entry *window; | |
439 | unsigned long lo, hi; | |
aadad097 | 440 | phys_addr_t start = 0, end; |
cd2c9fcf SK |
441 | |
442 | resource_list_for_each_entry(window, &bridge->windows) { | |
443 | if (resource_type(window->res) != IORESOURCE_MEM) | |
444 | continue; | |
445 | ||
446 | lo = iova_pfn(iovad, window->res->start - window->offset); | |
447 | hi = iova_pfn(iovad, window->res->end - window->offset); | |
448 | reserve_iova(iovad, lo, hi); | |
449 | } | |
aadad097 SM |
450 | |
451 | /* Get reserved DMA windows from host bridge */ | |
b8397a8f | 452 | list_sort(NULL, &bridge->dma_ranges, iommu_dma_ranges_sort); |
aadad097 SM |
453 | resource_list_for_each_entry(window, &bridge->dma_ranges) { |
454 | end = window->res->start - window->offset; | |
455 | resv_iova: | |
456 | if (end > start) { | |
457 | lo = iova_pfn(iovad, start); | |
458 | hi = iova_pfn(iovad, end); | |
459 | reserve_iova(iovad, lo, hi); | |
571f3160 | 460 | } else if (end < start) { |
b8397a8f | 461 | /* DMA ranges should be non-overlapping */ |
571f3160 | 462 | dev_err(&dev->dev, |
7154cbd3 JR |
463 | "Failed to reserve IOVA [%pa-%pa]\n", |
464 | &start, &end); | |
aadad097 SM |
465 | return -EINVAL; |
466 | } | |
467 | ||
468 | start = window->res->end - window->offset + 1; | |
469 | /* If window is last entry */ | |
470 | if (window->node.next == &bridge->dma_ranges && | |
29fcea8c AB |
471 | end != ~(phys_addr_t)0) { |
472 | end = ~(phys_addr_t)0; | |
aadad097 SM |
473 | goto resv_iova; |
474 | } | |
475 | } | |
476 | ||
477 | return 0; | |
cd2c9fcf SK |
478 | } |
479 | ||
7c1b058c RM |
480 | static int iova_reserve_iommu_regions(struct device *dev, |
481 | struct iommu_domain *domain) | |
482 | { | |
483 | struct iommu_dma_cookie *cookie = domain->iova_cookie; | |
484 | struct iova_domain *iovad = &cookie->iovad; | |
485 | struct iommu_resv_region *region; | |
486 | LIST_HEAD(resv_regions); | |
487 | int ret = 0; | |
488 | ||
aadad097 SM |
489 | if (dev_is_pci(dev)) { |
490 | ret = iova_reserve_pci_windows(to_pci_dev(dev), iovad); | |
491 | if (ret) | |
492 | return ret; | |
493 | } | |
cd2c9fcf | 494 | |
7c1b058c RM |
495 | iommu_get_resv_regions(dev, &resv_regions); |
496 | list_for_each_entry(region, &resv_regions, list) { | |
497 | unsigned long lo, hi; | |
498 | ||
499 | /* We ARE the software that manages these! */ | |
500 | if (region->type == IOMMU_RESV_SW_MSI) | |
501 | continue; | |
502 | ||
503 | lo = iova_pfn(iovad, region->start); | |
504 | hi = iova_pfn(iovad, region->start + region->length - 1); | |
505 | reserve_iova(iovad, lo, hi); | |
506 | ||
507 | if (region->type == IOMMU_RESV_MSI) | |
508 | ret = cookie_init_hw_msi_region(cookie, region->start, | |
509 | region->start + region->length); | |
510 | if (ret) | |
511 | break; | |
512 | } | |
513 | iommu_put_resv_regions(dev, &resv_regions); | |
514 | ||
515 | return ret; | |
516 | } | |
517 | ||
82c3cefb LB |
518 | static bool dev_is_untrusted(struct device *dev) |
519 | { | |
520 | return dev_is_pci(dev) && to_pci_dev(dev)->untrusted; | |
521 | } | |
522 | ||
2e727bff DS |
523 | static bool dev_use_swiotlb(struct device *dev) |
524 | { | |
525 | return IS_ENABLED(CONFIG_SWIOTLB) && dev_is_untrusted(dev); | |
526 | } | |
527 | ||
0db2e5d1 RM |
528 | /** |
529 | * iommu_dma_init_domain - Initialise a DMA mapping domain | |
530 | * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() | |
531 | * @base: IOVA at which the mappable address space starts | |
ac6d7046 | 532 | * @limit: Last address of the IOVA space |
fade1ec0 | 533 | * @dev: Device the domain is being initialised for |
0db2e5d1 | 534 | * |
ac6d7046 | 535 | * @base and @limit + 1 should be exact multiples of IOMMU page granularity to |
0db2e5d1 RM |
536 | * avoid rounding surprises. If necessary, we reserve the page at address 0 |
537 | * to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but | |
538 | * any change which could make prior IOVAs invalid will fail. | |
539 | */ | |
06d60728 | 540 | static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, |
ac6d7046 | 541 | dma_addr_t limit, struct device *dev) |
0db2e5d1 | 542 | { |
fdbe574e | 543 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
c61a4633 | 544 | unsigned long order, base_pfn; |
6b0c54e7 | 545 | struct iova_domain *iovad; |
32e92d9f | 546 | int ret; |
0db2e5d1 | 547 | |
fdbe574e RM |
548 | if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE) |
549 | return -EINVAL; | |
0db2e5d1 | 550 | |
6b0c54e7 YL |
551 | iovad = &cookie->iovad; |
552 | ||
0db2e5d1 | 553 | /* Use the smallest supported page size for IOVA granularity */ |
d16e0faa | 554 | order = __ffs(domain->pgsize_bitmap); |
0db2e5d1 | 555 | base_pfn = max_t(unsigned long, 1, base >> order); |
0db2e5d1 RM |
556 | |
557 | /* Check the domain allows at least some access to the device... */ | |
558 | if (domain->geometry.force_aperture) { | |
559 | if (base > domain->geometry.aperture_end || | |
ac6d7046 | 560 | limit < domain->geometry.aperture_start) { |
0db2e5d1 RM |
561 | pr_warn("specified DMA range outside IOMMU capability\n"); |
562 | return -EFAULT; | |
563 | } | |
564 | /* ...then finally give it a kicking to make sure it fits */ | |
565 | base_pfn = max_t(unsigned long, base_pfn, | |
566 | domain->geometry.aperture_start >> order); | |
0db2e5d1 RM |
567 | } |
568 | ||
f51d7bb7 | 569 | /* start_pfn is always nonzero for an already-initialised domain */ |
ac9a5d52 | 570 | mutex_lock(&cookie->mutex); |
0db2e5d1 RM |
571 | if (iovad->start_pfn) { |
572 | if (1UL << order != iovad->granule || | |
f51d7bb7 | 573 | base_pfn != iovad->start_pfn) { |
0db2e5d1 | 574 | pr_warn("Incompatible range for DMA domain\n"); |
ac9a5d52 YW |
575 | ret = -EFAULT; |
576 | goto done_unlock; | |
0db2e5d1 | 577 | } |
7c1b058c | 578 | |
ac9a5d52 YW |
579 | ret = 0; |
580 | goto done_unlock; | |
0db2e5d1 | 581 | } |
7c1b058c | 582 | |
aa3ac946 | 583 | init_iova_domain(iovad, 1UL << order, base_pfn); |
32e92d9f JG |
584 | ret = iova_domain_init_rcaches(iovad); |
585 | if (ret) | |
ac9a5d52 | 586 | goto done_unlock; |
2da274cd | 587 | |
c208916f | 588 | /* If the FQ fails we can simply fall back to strict mode */ |
452e69b5 RM |
589 | if (domain->type == IOMMU_DOMAIN_DMA_FQ && iommu_dma_init_fq(domain)) |
590 | domain->type = IOMMU_DOMAIN_DMA; | |
7c1b058c | 591 | |
ac9a5d52 YW |
592 | ret = iova_reserve_iommu_regions(dev, domain); |
593 | ||
594 | done_unlock: | |
595 | mutex_unlock(&cookie->mutex); | |
596 | return ret; | |
0db2e5d1 | 597 | } |
0db2e5d1 RM |
598 | |
599 | /** | |
737c85ca MH |
600 | * dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API |
601 | * page flags. | |
0db2e5d1 RM |
602 | * @dir: Direction of DMA transfer |
603 | * @coherent: Is the DMA master cache-coherent? | |
737c85ca | 604 | * @attrs: DMA attributes for the mapping |
0db2e5d1 RM |
605 | * |
606 | * Return: corresponding IOMMU API page protection flags | |
607 | */ | |
06d60728 | 608 | static int dma_info_to_prot(enum dma_data_direction dir, bool coherent, |
737c85ca | 609 | unsigned long attrs) |
0db2e5d1 RM |
610 | { |
611 | int prot = coherent ? IOMMU_CACHE : 0; | |
612 | ||
737c85ca MH |
613 | if (attrs & DMA_ATTR_PRIVILEGED) |
614 | prot |= IOMMU_PRIV; | |
615 | ||
0db2e5d1 RM |
616 | switch (dir) { |
617 | case DMA_BIDIRECTIONAL: | |
618 | return prot | IOMMU_READ | IOMMU_WRITE; | |
619 | case DMA_TO_DEVICE: | |
620 | return prot | IOMMU_READ; | |
621 | case DMA_FROM_DEVICE: | |
622 | return prot | IOMMU_WRITE; | |
623 | default: | |
624 | return 0; | |
625 | } | |
626 | } | |
627 | ||
842fe519 | 628 | static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain, |
bd036d2f | 629 | size_t size, u64 dma_limit, struct device *dev) |
0db2e5d1 | 630 | { |
a44e6657 RM |
631 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
632 | struct iova_domain *iovad = &cookie->iovad; | |
bb65a64c | 633 | unsigned long shift, iova_len, iova = 0; |
0db2e5d1 | 634 | |
a44e6657 RM |
635 | if (cookie->type == IOMMU_DMA_MSI_COOKIE) { |
636 | cookie->msi_iova += size; | |
637 | return cookie->msi_iova - size; | |
638 | } | |
639 | ||
640 | shift = iova_shift(iovad); | |
641 | iova_len = size >> shift; | |
642 | ||
a7ba70f1 | 643 | dma_limit = min_not_zero(dma_limit, dev->bus_dma_limit); |
03bfdc31 | 644 | |
c987ff0d | 645 | if (domain->geometry.force_aperture) |
bd036d2f | 646 | dma_limit = min(dma_limit, (u64)domain->geometry.aperture_end); |
122fac03 RM |
647 | |
648 | /* Try to get PCI devices a SAC address */ | |
3542dcb1 | 649 | if (dma_limit > DMA_BIT_MASK(32) && !iommu_dma_forcedac && dev_is_pci(dev)) |
538d5b33 TN |
650 | iova = alloc_iova_fast(iovad, iova_len, |
651 | DMA_BIT_MASK(32) >> shift, false); | |
bb65a64c | 652 | |
122fac03 | 653 | if (!iova) |
538d5b33 TN |
654 | iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift, |
655 | true); | |
122fac03 | 656 | |
bb65a64c | 657 | return (dma_addr_t)iova << shift; |
0db2e5d1 RM |
658 | } |
659 | ||
842fe519 | 660 | static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie, |
452e69b5 | 661 | dma_addr_t iova, size_t size, struct iommu_iotlb_gather *gather) |
0db2e5d1 | 662 | { |
842fe519 | 663 | struct iova_domain *iovad = &cookie->iovad; |
0db2e5d1 | 664 | |
a44e6657 | 665 | /* The MSI case is only ever cleaning up its most recent allocation */ |
bb65a64c | 666 | if (cookie->type == IOMMU_DMA_MSI_COOKIE) |
a44e6657 | 667 | cookie->msi_iova -= size; |
452e69b5 | 668 | else if (gather && gather->queued) |
a17e3026 | 669 | queue_iova(cookie, iova_pfn(iovad, iova), |
2a2b8eaa | 670 | size >> iova_shift(iovad), |
87f60cc6 | 671 | &gather->freelist); |
bb65a64c | 672 | else |
1cc896ed RM |
673 | free_iova_fast(iovad, iova_pfn(iovad, iova), |
674 | size >> iova_shift(iovad)); | |
842fe519 RM |
675 | } |
676 | ||
b61d271e | 677 | static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr, |
842fe519 RM |
678 | size_t size) |
679 | { | |
b61d271e | 680 | struct iommu_domain *domain = iommu_get_dma_domain(dev); |
a44e6657 RM |
681 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
682 | struct iova_domain *iovad = &cookie->iovad; | |
842fe519 | 683 | size_t iova_off = iova_offset(iovad, dma_addr); |
a7d20dc1 WD |
684 | struct iommu_iotlb_gather iotlb_gather; |
685 | size_t unmapped; | |
842fe519 RM |
686 | |
687 | dma_addr -= iova_off; | |
688 | size = iova_align(iovad, size + iova_off); | |
a7d20dc1 | 689 | iommu_iotlb_gather_init(&iotlb_gather); |
452e69b5 | 690 | iotlb_gather.queued = READ_ONCE(cookie->fq_domain); |
a7d20dc1 WD |
691 | |
692 | unmapped = iommu_unmap_fast(domain, dma_addr, size, &iotlb_gather); | |
693 | WARN_ON(unmapped != size); | |
842fe519 | 694 | |
452e69b5 | 695 | if (!iotlb_gather.queued) |
aae4c8e2 | 696 | iommu_iotlb_sync(domain, &iotlb_gather); |
452e69b5 | 697 | iommu_dma_free_iova(cookie, dma_addr, size, &iotlb_gather); |
0db2e5d1 RM |
698 | } |
699 | ||
92aec09c | 700 | static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys, |
bd036d2f | 701 | size_t size, int prot, u64 dma_mask) |
92aec09c | 702 | { |
b61d271e | 703 | struct iommu_domain *domain = iommu_get_dma_domain(dev); |
92aec09c | 704 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
8af23fad RM |
705 | struct iova_domain *iovad = &cookie->iovad; |
706 | size_t iova_off = iova_offset(iovad, phys); | |
92aec09c CH |
707 | dma_addr_t iova; |
708 | ||
a8e8af35 | 709 | if (static_branch_unlikely(&iommu_deferred_attach_enabled) && |
3ab65729 | 710 | iommu_deferred_attach(dev, domain)) |
795bbbb9 TM |
711 | return DMA_MAPPING_ERROR; |
712 | ||
8af23fad | 713 | size = iova_align(iovad, size + iova_off); |
92aec09c | 714 | |
6e235020 | 715 | iova = iommu_dma_alloc_iova(domain, size, dma_mask, dev); |
92aec09c CH |
716 | if (!iova) |
717 | return DMA_MAPPING_ERROR; | |
718 | ||
4dc6376a | 719 | if (iommu_map(domain, iova, phys - iova_off, size, prot, GFP_ATOMIC)) { |
2a2b8eaa | 720 | iommu_dma_free_iova(cookie, iova, size, NULL); |
92aec09c CH |
721 | return DMA_MAPPING_ERROR; |
722 | } | |
723 | return iova + iova_off; | |
724 | } | |
725 | ||
0db2e5d1 RM |
726 | static void __iommu_dma_free_pages(struct page **pages, int count) |
727 | { | |
728 | while (count--) | |
729 | __free_page(pages[count]); | |
730 | kvfree(pages); | |
731 | } | |
732 | ||
c4b17afb GK |
733 | static struct page **__iommu_dma_alloc_pages(struct device *dev, |
734 | unsigned int count, unsigned long order_mask, gfp_t gfp) | |
0db2e5d1 RM |
735 | { |
736 | struct page **pages; | |
c4b17afb | 737 | unsigned int i = 0, nid = dev_to_node(dev); |
3b6b7e19 | 738 | |
23baf831 | 739 | order_mask &= GENMASK(MAX_ORDER, 0); |
3b6b7e19 RM |
740 | if (!order_mask) |
741 | return NULL; | |
0db2e5d1 | 742 | |
ab6f4b00 | 743 | pages = kvcalloc(count, sizeof(*pages), GFP_KERNEL); |
0db2e5d1 RM |
744 | if (!pages) |
745 | return NULL; | |
746 | ||
747 | /* IOMMU can map any pages, so himem can also be used here */ | |
748 | gfp |= __GFP_NOWARN | __GFP_HIGHMEM; | |
749 | ||
750 | while (count) { | |
751 | struct page *page = NULL; | |
3b6b7e19 | 752 | unsigned int order_size; |
0db2e5d1 RM |
753 | |
754 | /* | |
755 | * Higher-order allocations are a convenience rather | |
756 | * than a necessity, hence using __GFP_NORETRY until | |
3b6b7e19 | 757 | * falling back to minimum-order allocations. |
0db2e5d1 | 758 | */ |
61883d3c | 759 | for (order_mask &= GENMASK(__fls(count), 0); |
3b6b7e19 RM |
760 | order_mask; order_mask &= ~order_size) { |
761 | unsigned int order = __fls(order_mask); | |
c4b17afb | 762 | gfp_t alloc_flags = gfp; |
3b6b7e19 RM |
763 | |
764 | order_size = 1U << order; | |
c4b17afb GK |
765 | if (order_mask > order_size) |
766 | alloc_flags |= __GFP_NORETRY; | |
767 | page = alloc_pages_node(nid, alloc_flags, order); | |
0db2e5d1 RM |
768 | if (!page) |
769 | continue; | |
4604393c | 770 | if (order) |
0db2e5d1 | 771 | split_page(page, order); |
4604393c | 772 | break; |
0db2e5d1 | 773 | } |
0db2e5d1 RM |
774 | if (!page) { |
775 | __iommu_dma_free_pages(pages, i); | |
776 | return NULL; | |
777 | } | |
3b6b7e19 RM |
778 | count -= order_size; |
779 | while (order_size--) | |
0db2e5d1 RM |
780 | pages[i++] = page++; |
781 | } | |
782 | return pages; | |
783 | } | |
784 | ||
8230ce9a CH |
785 | /* |
786 | * If size is less than PAGE_SIZE, then a full CPU page will be allocated, | |
0db2e5d1 | 787 | * but an IOMMU which supports smaller pages might not map the whole thing. |
0db2e5d1 | 788 | */ |
8230ce9a CH |
789 | static struct page **__iommu_dma_alloc_noncontiguous(struct device *dev, |
790 | size_t size, struct sg_table *sgt, gfp_t gfp, pgprot_t prot, | |
e8d39a90 | 791 | unsigned long attrs) |
0db2e5d1 | 792 | { |
43c5bf11 | 793 | struct iommu_domain *domain = iommu_get_dma_domain(dev); |
842fe519 RM |
794 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
795 | struct iova_domain *iovad = &cookie->iovad; | |
21b95aaf CH |
796 | bool coherent = dev_is_dma_coherent(dev); |
797 | int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs); | |
21b95aaf | 798 | unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap; |
0db2e5d1 | 799 | struct page **pages; |
842fe519 | 800 | dma_addr_t iova; |
a3884774 | 801 | ssize_t ret; |
0db2e5d1 | 802 | |
a8e8af35 | 803 | if (static_branch_unlikely(&iommu_deferred_attach_enabled) && |
3ab65729 | 804 | iommu_deferred_attach(dev, domain)) |
795bbbb9 TM |
805 | return NULL; |
806 | ||
3b6b7e19 RM |
807 | min_size = alloc_sizes & -alloc_sizes; |
808 | if (min_size < PAGE_SIZE) { | |
809 | min_size = PAGE_SIZE; | |
810 | alloc_sizes |= PAGE_SIZE; | |
811 | } else { | |
812 | size = ALIGN(size, min_size); | |
813 | } | |
00085f1e | 814 | if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES) |
3b6b7e19 RM |
815 | alloc_sizes = min_size; |
816 | ||
817 | count = PAGE_ALIGN(size) >> PAGE_SHIFT; | |
c4b17afb GK |
818 | pages = __iommu_dma_alloc_pages(dev, count, alloc_sizes >> PAGE_SHIFT, |
819 | gfp); | |
0db2e5d1 RM |
820 | if (!pages) |
821 | return NULL; | |
822 | ||
842fe519 RM |
823 | size = iova_align(iovad, size); |
824 | iova = iommu_dma_alloc_iova(domain, size, dev->coherent_dma_mask, dev); | |
0db2e5d1 RM |
825 | if (!iova) |
826 | goto out_free_pages; | |
827 | ||
96d57808 JG |
828 | /* |
829 | * Remove the zone/policy flags from the GFP - these are applied to the | |
830 | * __iommu_dma_alloc_pages() but are not used for the supporting | |
831 | * internal allocations that follow. | |
832 | */ | |
833 | gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM | __GFP_COMP); | |
834 | ||
835 | if (sg_alloc_table_from_pages(sgt, pages, count, 0, size, gfp)) | |
0db2e5d1 RM |
836 | goto out_free_iova; |
837 | ||
21b95aaf | 838 | if (!(ioprot & IOMMU_CACHE)) { |
23f88e0a CH |
839 | struct scatterlist *sg; |
840 | int i; | |
841 | ||
8230ce9a | 842 | for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) |
23f88e0a | 843 | arch_dma_prep_coherent(sg_page(sg), sg->length); |
0db2e5d1 RM |
844 | } |
845 | ||
f2b2c051 | 846 | ret = iommu_map_sg(domain, iova, sgt->sgl, sgt->orig_nents, ioprot, |
96d57808 | 847 | gfp); |
a3884774 | 848 | if (ret < 0 || ret < size) |
0db2e5d1 RM |
849 | goto out_free_sg; |
850 | ||
8230ce9a | 851 | sgt->sgl->dma_address = iova; |
e817ee5f | 852 | sgt->sgl->dma_length = size; |
8230ce9a CH |
853 | return pages; |
854 | ||
855 | out_free_sg: | |
856 | sg_free_table(sgt); | |
857 | out_free_iova: | |
858 | iommu_dma_free_iova(cookie, iova, size, NULL); | |
859 | out_free_pages: | |
860 | __iommu_dma_free_pages(pages, count); | |
861 | return NULL; | |
862 | } | |
863 | ||
864 | static void *iommu_dma_alloc_remap(struct device *dev, size_t size, | |
865 | dma_addr_t *dma_handle, gfp_t gfp, pgprot_t prot, | |
866 | unsigned long attrs) | |
867 | { | |
868 | struct page **pages; | |
869 | struct sg_table sgt; | |
870 | void *vaddr; | |
871 | ||
872 | pages = __iommu_dma_alloc_noncontiguous(dev, size, &sgt, gfp, prot, | |
873 | attrs); | |
874 | if (!pages) | |
875 | return NULL; | |
876 | *dma_handle = sgt.sgl->dma_address; | |
877 | sg_free_table(&sgt); | |
51231740 | 878 | vaddr = dma_common_pages_remap(pages, size, prot, |
21b95aaf CH |
879 | __builtin_return_address(0)); |
880 | if (!vaddr) | |
881 | goto out_unmap; | |
21b95aaf | 882 | return vaddr; |
0db2e5d1 | 883 | |
21b95aaf | 884 | out_unmap: |
8230ce9a CH |
885 | __iommu_dma_unmap(dev, *dma_handle, size); |
886 | __iommu_dma_free_pages(pages, PAGE_ALIGN(size) >> PAGE_SHIFT); | |
0db2e5d1 RM |
887 | return NULL; |
888 | } | |
889 | ||
e817ee5f CH |
890 | static struct sg_table *iommu_dma_alloc_noncontiguous(struct device *dev, |
891 | size_t size, enum dma_data_direction dir, gfp_t gfp, | |
892 | unsigned long attrs) | |
893 | { | |
894 | struct dma_sgt_handle *sh; | |
895 | ||
896 | sh = kmalloc(sizeof(*sh), gfp); | |
897 | if (!sh) | |
898 | return NULL; | |
899 | ||
900 | sh->pages = __iommu_dma_alloc_noncontiguous(dev, size, &sh->sgt, gfp, | |
901 | PAGE_KERNEL, attrs); | |
902 | if (!sh->pages) { | |
903 | kfree(sh); | |
904 | return NULL; | |
905 | } | |
906 | return &sh->sgt; | |
907 | } | |
908 | ||
909 | static void iommu_dma_free_noncontiguous(struct device *dev, size_t size, | |
910 | struct sg_table *sgt, enum dma_data_direction dir) | |
911 | { | |
912 | struct dma_sgt_handle *sh = sgt_handle(sgt); | |
913 | ||
914 | __iommu_dma_unmap(dev, sgt->sgl->dma_address, size); | |
915 | __iommu_dma_free_pages(sh->pages, PAGE_ALIGN(size) >> PAGE_SHIFT); | |
916 | sg_free_table(&sh->sgt); | |
0fbea680 | 917 | kfree(sh); |
e817ee5f | 918 | } |
e817ee5f | 919 | |
06d60728 CH |
920 | static void iommu_dma_sync_single_for_cpu(struct device *dev, |
921 | dma_addr_t dma_handle, size_t size, enum dma_data_direction dir) | |
0db2e5d1 | 922 | { |
06d60728 | 923 | phys_addr_t phys; |
0db2e5d1 | 924 | |
2e727bff | 925 | if (dev_is_dma_coherent(dev) && !dev_use_swiotlb(dev)) |
06d60728 | 926 | return; |
1cc896ed | 927 | |
06d60728 | 928 | phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle); |
82612d66 TM |
929 | if (!dev_is_dma_coherent(dev)) |
930 | arch_sync_dma_for_cpu(phys, size, dir); | |
931 | ||
7fd856aa | 932 | if (is_swiotlb_buffer(dev, phys)) |
80808d27 | 933 | swiotlb_sync_single_for_cpu(dev, phys, size, dir); |
0db2e5d1 | 934 | } |
0db2e5d1 | 935 | |
06d60728 CH |
936 | static void iommu_dma_sync_single_for_device(struct device *dev, |
937 | dma_addr_t dma_handle, size_t size, enum dma_data_direction dir) | |
0db2e5d1 | 938 | { |
06d60728 | 939 | phys_addr_t phys; |
0db2e5d1 | 940 | |
2e727bff | 941 | if (dev_is_dma_coherent(dev) && !dev_use_swiotlb(dev)) |
06d60728 | 942 | return; |
1cc896ed | 943 | |
06d60728 | 944 | phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle); |
7fd856aa | 945 | if (is_swiotlb_buffer(dev, phys)) |
80808d27 | 946 | swiotlb_sync_single_for_device(dev, phys, size, dir); |
82612d66 TM |
947 | |
948 | if (!dev_is_dma_coherent(dev)) | |
949 | arch_sync_dma_for_device(phys, size, dir); | |
06d60728 | 950 | } |
0db2e5d1 | 951 | |
06d60728 CH |
952 | static void iommu_dma_sync_sg_for_cpu(struct device *dev, |
953 | struct scatterlist *sgl, int nelems, | |
954 | enum dma_data_direction dir) | |
955 | { | |
956 | struct scatterlist *sg; | |
957 | int i; | |
958 | ||
2e727bff | 959 | if (dev_use_swiotlb(dev)) |
08ae5d4a DS |
960 | for_each_sg(sgl, sg, nelems, i) |
961 | iommu_dma_sync_single_for_cpu(dev, sg_dma_address(sg), | |
962 | sg->length, dir); | |
963 | else if (!dev_is_dma_coherent(dev)) | |
964 | for_each_sg(sgl, sg, nelems, i) | |
82612d66 | 965 | arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir); |
06d60728 CH |
966 | } |
967 | ||
968 | static void iommu_dma_sync_sg_for_device(struct device *dev, | |
969 | struct scatterlist *sgl, int nelems, | |
970 | enum dma_data_direction dir) | |
971 | { | |
972 | struct scatterlist *sg; | |
973 | int i; | |
974 | ||
2e727bff | 975 | if (dev_use_swiotlb(dev)) |
08ae5d4a DS |
976 | for_each_sg(sgl, sg, nelems, i) |
977 | iommu_dma_sync_single_for_device(dev, | |
978 | sg_dma_address(sg), | |
979 | sg->length, dir); | |
980 | else if (!dev_is_dma_coherent(dev)) | |
981 | for_each_sg(sgl, sg, nelems, i) | |
82612d66 | 982 | arch_sync_dma_for_device(sg_phys(sg), sg->length, dir); |
0db2e5d1 RM |
983 | } |
984 | ||
06d60728 CH |
985 | static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page, |
986 | unsigned long offset, size_t size, enum dma_data_direction dir, | |
987 | unsigned long attrs) | |
51f8cc9e | 988 | { |
06d60728 CH |
989 | phys_addr_t phys = page_to_phys(page) + offset; |
990 | bool coherent = dev_is_dma_coherent(dev); | |
9b49bbc2 DS |
991 | int prot = dma_info_to_prot(dir, coherent, attrs); |
992 | struct iommu_domain *domain = iommu_get_dma_domain(dev); | |
993 | struct iommu_dma_cookie *cookie = domain->iova_cookie; | |
994 | struct iova_domain *iovad = &cookie->iovad; | |
9b49bbc2 DS |
995 | dma_addr_t iova, dma_mask = dma_get_mask(dev); |
996 | ||
997 | /* | |
998 | * If both the physical buffer start address and size are | |
999 | * page aligned, we don't need to use a bounce page. | |
1000 | */ | |
2e727bff | 1001 | if (dev_use_swiotlb(dev) && iova_offset(iovad, phys | size)) { |
9b49bbc2 | 1002 | void *padding_start; |
2cbc61a1 | 1003 | size_t padding_size, aligned_size; |
9b49bbc2 | 1004 | |
f316ba0a ML |
1005 | if (!is_swiotlb_active(dev)) { |
1006 | dev_warn_once(dev, "DMA bounce buffers are inactive, unable to map unaligned transaction.\n"); | |
1007 | return DMA_MAPPING_ERROR; | |
1008 | } | |
1009 | ||
9b49bbc2 | 1010 | aligned_size = iova_align(iovad, size); |
e81e99ba DS |
1011 | phys = swiotlb_tbl_map_single(dev, phys, size, aligned_size, |
1012 | iova_mask(iovad), dir, attrs); | |
9b49bbc2 DS |
1013 | |
1014 | if (phys == DMA_MAPPING_ERROR) | |
1015 | return DMA_MAPPING_ERROR; | |
06d60728 | 1016 | |
9b49bbc2 DS |
1017 | /* Cleanup the padding area. */ |
1018 | padding_start = phys_to_virt(phys); | |
1019 | padding_size = aligned_size; | |
1020 | ||
1021 | if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && | |
1022 | (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)) { | |
1023 | padding_start += size; | |
1024 | padding_size -= size; | |
1025 | } | |
1026 | ||
1027 | memset(padding_start, 0, padding_size); | |
1028 | } | |
06d60728 | 1029 | |
9b49bbc2 | 1030 | if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) |
56e35f9c | 1031 | arch_sync_dma_for_device(phys, size, dir); |
9b49bbc2 | 1032 | |
2cbc61a1 | 1033 | iova = __iommu_dma_map(dev, phys, size, prot, dma_mask); |
9b49bbc2 DS |
1034 | if (iova == DMA_MAPPING_ERROR && is_swiotlb_buffer(dev, phys)) |
1035 | swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs); | |
1036 | return iova; | |
51f8cc9e RM |
1037 | } |
1038 | ||
06d60728 CH |
1039 | static void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle, |
1040 | size_t size, enum dma_data_direction dir, unsigned long attrs) | |
0db2e5d1 | 1041 | { |
9b49bbc2 DS |
1042 | struct iommu_domain *domain = iommu_get_dma_domain(dev); |
1043 | phys_addr_t phys; | |
1044 | ||
1045 | phys = iommu_iova_to_phys(domain, dma_handle); | |
1046 | if (WARN_ON(!phys)) | |
1047 | return; | |
1048 | ||
1049 | if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && !dev_is_dma_coherent(dev)) | |
1050 | arch_sync_dma_for_cpu(phys, size, dir); | |
1051 | ||
1052 | __iommu_dma_unmap(dev, dma_handle, size); | |
1053 | ||
1054 | if (unlikely(is_swiotlb_buffer(dev, phys))) | |
1055 | swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs); | |
0db2e5d1 RM |
1056 | } |
1057 | ||
1058 | /* | |
1059 | * Prepare a successfully-mapped scatterlist to give back to the caller. | |
809eac54 RM |
1060 | * |
1061 | * At this point the segments are already laid out by iommu_dma_map_sg() to | |
1062 | * avoid individually crossing any boundaries, so we merely need to check a | |
1063 | * segment's start address to avoid concatenating across one. | |
0db2e5d1 RM |
1064 | */ |
1065 | static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents, | |
1066 | dma_addr_t dma_addr) | |
1067 | { | |
809eac54 RM |
1068 | struct scatterlist *s, *cur = sg; |
1069 | unsigned long seg_mask = dma_get_seg_boundary(dev); | |
1070 | unsigned int cur_len = 0, max_len = dma_get_max_seg_size(dev); | |
1071 | int i, count = 0; | |
0db2e5d1 RM |
1072 | |
1073 | for_each_sg(sg, s, nents, i) { | |
809eac54 | 1074 | /* Restore this segment's original unaligned fields first */ |
30280eee | 1075 | dma_addr_t s_dma_addr = sg_dma_address(s); |
809eac54 | 1076 | unsigned int s_iova_off = sg_dma_address(s); |
0db2e5d1 | 1077 | unsigned int s_length = sg_dma_len(s); |
809eac54 | 1078 | unsigned int s_iova_len = s->length; |
0db2e5d1 | 1079 | |
cad34be7 | 1080 | sg_dma_address(s) = DMA_MAPPING_ERROR; |
809eac54 RM |
1081 | sg_dma_len(s) = 0; |
1082 | ||
30280eee LG |
1083 | if (sg_is_dma_bus_address(s)) { |
1084 | if (i > 0) | |
1085 | cur = sg_next(cur); | |
1086 | ||
1087 | sg_dma_unmark_bus_address(s); | |
1088 | sg_dma_address(cur) = s_dma_addr; | |
1089 | sg_dma_len(cur) = s_length; | |
1090 | sg_dma_mark_bus_address(cur); | |
1091 | count++; | |
1092 | cur_len = 0; | |
1093 | continue; | |
1094 | } | |
1095 | ||
1096 | s->offset += s_iova_off; | |
1097 | s->length = s_length; | |
1098 | ||
809eac54 RM |
1099 | /* |
1100 | * Now fill in the real DMA data. If... | |
1101 | * - there is a valid output segment to append to | |
1102 | * - and this segment starts on an IOVA page boundary | |
1103 | * - but doesn't fall at a segment boundary | |
1104 | * - and wouldn't make the resulting output segment too long | |
1105 | */ | |
1106 | if (cur_len && !s_iova_off && (dma_addr & seg_mask) && | |
ab2cbeb0 | 1107 | (max_len - cur_len >= s_length)) { |
809eac54 RM |
1108 | /* ...then concatenate it with the previous one */ |
1109 | cur_len += s_length; | |
1110 | } else { | |
1111 | /* Otherwise start the next output segment */ | |
1112 | if (i > 0) | |
1113 | cur = sg_next(cur); | |
1114 | cur_len = s_length; | |
1115 | count++; | |
1116 | ||
1117 | sg_dma_address(cur) = dma_addr + s_iova_off; | |
1118 | } | |
1119 | ||
1120 | sg_dma_len(cur) = cur_len; | |
1121 | dma_addr += s_iova_len; | |
1122 | ||
1123 | if (s_length + s_iova_off < s_iova_len) | |
1124 | cur_len = 0; | |
0db2e5d1 | 1125 | } |
809eac54 | 1126 | return count; |
0db2e5d1 RM |
1127 | } |
1128 | ||
1129 | /* | |
1130 | * If mapping failed, then just restore the original list, | |
1131 | * but making sure the DMA fields are invalidated. | |
1132 | */ | |
1133 | static void __invalidate_sg(struct scatterlist *sg, int nents) | |
1134 | { | |
1135 | struct scatterlist *s; | |
1136 | int i; | |
1137 | ||
1138 | for_each_sg(sg, s, nents, i) { | |
30280eee LG |
1139 | if (sg_is_dma_bus_address(s)) { |
1140 | sg_dma_unmark_bus_address(s); | |
1141 | } else { | |
1142 | if (sg_dma_address(s) != DMA_MAPPING_ERROR) | |
1143 | s->offset += sg_dma_address(s); | |
1144 | if (sg_dma_len(s)) | |
1145 | s->length = sg_dma_len(s); | |
1146 | } | |
cad34be7 | 1147 | sg_dma_address(s) = DMA_MAPPING_ERROR; |
0db2e5d1 RM |
1148 | sg_dma_len(s) = 0; |
1149 | } | |
1150 | } | |
1151 | ||
82612d66 TM |
1152 | static void iommu_dma_unmap_sg_swiotlb(struct device *dev, struct scatterlist *sg, |
1153 | int nents, enum dma_data_direction dir, unsigned long attrs) | |
1154 | { | |
1155 | struct scatterlist *s; | |
1156 | int i; | |
1157 | ||
1158 | for_each_sg(sg, s, nents, i) | |
9b49bbc2 | 1159 | iommu_dma_unmap_page(dev, sg_dma_address(s), |
82612d66 TM |
1160 | sg_dma_len(s), dir, attrs); |
1161 | } | |
1162 | ||
1163 | static int iommu_dma_map_sg_swiotlb(struct device *dev, struct scatterlist *sg, | |
1164 | int nents, enum dma_data_direction dir, unsigned long attrs) | |
1165 | { | |
1166 | struct scatterlist *s; | |
1167 | int i; | |
1168 | ||
1169 | for_each_sg(sg, s, nents, i) { | |
9b49bbc2 DS |
1170 | sg_dma_address(s) = iommu_dma_map_page(dev, sg_page(s), |
1171 | s->offset, s->length, dir, attrs); | |
82612d66 TM |
1172 | if (sg_dma_address(s) == DMA_MAPPING_ERROR) |
1173 | goto out_unmap; | |
1174 | sg_dma_len(s) = s->length; | |
1175 | } | |
1176 | ||
1177 | return nents; | |
1178 | ||
1179 | out_unmap: | |
1180 | iommu_dma_unmap_sg_swiotlb(dev, sg, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC); | |
dabb16f6 | 1181 | return -EIO; |
82612d66 TM |
1182 | } |
1183 | ||
0db2e5d1 RM |
1184 | /* |
1185 | * The DMA API client is passing in a scatterlist which could describe | |
1186 | * any old buffer layout, but the IOMMU API requires everything to be | |
1187 | * aligned to IOMMU pages. Hence the need for this complicated bit of | |
1188 | * impedance-matching, to be able to hand off a suitably-aligned list, | |
1189 | * but still preserve the original offsets and sizes for the caller. | |
1190 | */ | |
06d60728 CH |
1191 | static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, |
1192 | int nents, enum dma_data_direction dir, unsigned long attrs) | |
0db2e5d1 | 1193 | { |
43c5bf11 | 1194 | struct iommu_domain *domain = iommu_get_dma_domain(dev); |
842fe519 RM |
1195 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
1196 | struct iova_domain *iovad = &cookie->iovad; | |
0db2e5d1 | 1197 | struct scatterlist *s, *prev = NULL; |
06d60728 | 1198 | int prot = dma_info_to_prot(dir, dev_is_dma_coherent(dev), attrs); |
30280eee LG |
1199 | struct pci_p2pdma_map_state p2pdma_state = {}; |
1200 | enum pci_p2pdma_map_type map; | |
842fe519 | 1201 | dma_addr_t iova; |
0db2e5d1 | 1202 | size_t iova_len = 0; |
809eac54 | 1203 | unsigned long mask = dma_get_seg_boundary(dev); |
dabb16f6 | 1204 | ssize_t ret; |
0db2e5d1 RM |
1205 | int i; |
1206 | ||
dabb16f6 LG |
1207 | if (static_branch_unlikely(&iommu_deferred_attach_enabled)) { |
1208 | ret = iommu_deferred_attach(dev, domain); | |
ac315f96 LG |
1209 | if (ret) |
1210 | goto out; | |
dabb16f6 | 1211 | } |
795bbbb9 | 1212 | |
2e727bff | 1213 | if (dev_use_swiotlb(dev)) |
82612d66 TM |
1214 | return iommu_dma_map_sg_swiotlb(dev, sg, nents, dir, attrs); |
1215 | ||
06d60728 CH |
1216 | if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) |
1217 | iommu_dma_sync_sg_for_device(dev, sg, nents, dir); | |
1218 | ||
0db2e5d1 RM |
1219 | /* |
1220 | * Work out how much IOVA space we need, and align the segments to | |
1221 | * IOVA granules for the IOMMU driver to handle. With some clever | |
1222 | * trickery we can modify the list in-place, but reversibly, by | |
809eac54 | 1223 | * stashing the unaligned parts in the as-yet-unused DMA fields. |
0db2e5d1 RM |
1224 | */ |
1225 | for_each_sg(sg, s, nents, i) { | |
809eac54 | 1226 | size_t s_iova_off = iova_offset(iovad, s->offset); |
0db2e5d1 | 1227 | size_t s_length = s->length; |
809eac54 | 1228 | size_t pad_len = (mask - iova_len + 1) & mask; |
0db2e5d1 | 1229 | |
30280eee LG |
1230 | if (is_pci_p2pdma_page(sg_page(s))) { |
1231 | map = pci_p2pdma_map_segment(&p2pdma_state, dev, s); | |
1232 | switch (map) { | |
1233 | case PCI_P2PDMA_MAP_BUS_ADDR: | |
1234 | /* | |
1235 | * iommu_map_sg() will skip this segment as | |
1236 | * it is marked as a bus address, | |
1237 | * __finalise_sg() will copy the dma address | |
1238 | * into the output segment. | |
1239 | */ | |
1240 | continue; | |
1241 | case PCI_P2PDMA_MAP_THRU_HOST_BRIDGE: | |
1242 | /* | |
1243 | * Mapping through host bridge should be | |
1244 | * mapped with regular IOVAs, thus we | |
1245 | * do nothing here and continue below. | |
1246 | */ | |
1247 | break; | |
1248 | default: | |
1249 | ret = -EREMOTEIO; | |
1250 | goto out_restore_sg; | |
1251 | } | |
1252 | } | |
1253 | ||
809eac54 | 1254 | sg_dma_address(s) = s_iova_off; |
0db2e5d1 | 1255 | sg_dma_len(s) = s_length; |
809eac54 RM |
1256 | s->offset -= s_iova_off; |
1257 | s_length = iova_align(iovad, s_length + s_iova_off); | |
0db2e5d1 RM |
1258 | s->length = s_length; |
1259 | ||
1260 | /* | |
809eac54 RM |
1261 | * Due to the alignment of our single IOVA allocation, we can |
1262 | * depend on these assumptions about the segment boundary mask: | |
1263 | * - If mask size >= IOVA size, then the IOVA range cannot | |
1264 | * possibly fall across a boundary, so we don't care. | |
1265 | * - If mask size < IOVA size, then the IOVA range must start | |
1266 | * exactly on a boundary, therefore we can lay things out | |
1267 | * based purely on segment lengths without needing to know | |
1268 | * the actual addresses beforehand. | |
1269 | * - The mask must be a power of 2, so pad_len == 0 if | |
1270 | * iova_len == 0, thus we cannot dereference prev the first | |
1271 | * time through here (i.e. before it has a meaningful value). | |
0db2e5d1 | 1272 | */ |
809eac54 | 1273 | if (pad_len && pad_len < s_length - 1) { |
0db2e5d1 RM |
1274 | prev->length += pad_len; |
1275 | iova_len += pad_len; | |
1276 | } | |
1277 | ||
1278 | iova_len += s_length; | |
1279 | prev = s; | |
1280 | } | |
1281 | ||
30280eee LG |
1282 | if (!iova_len) |
1283 | return __finalise_sg(dev, sg, nents, 0); | |
1284 | ||
842fe519 | 1285 | iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev); |
dabb16f6 LG |
1286 | if (!iova) { |
1287 | ret = -ENOMEM; | |
0db2e5d1 | 1288 | goto out_restore_sg; |
dabb16f6 | 1289 | } |
0db2e5d1 RM |
1290 | |
1291 | /* | |
1292 | * We'll leave any physical concatenation to the IOMMU driver's | |
1293 | * implementation - it knows better than we do. | |
1294 | */ | |
f2b2c051 | 1295 | ret = iommu_map_sg(domain, iova, sg, nents, prot, GFP_ATOMIC); |
a3884774 | 1296 | if (ret < 0 || ret < iova_len) |
0db2e5d1 RM |
1297 | goto out_free_iova; |
1298 | ||
842fe519 | 1299 | return __finalise_sg(dev, sg, nents, iova); |
0db2e5d1 RM |
1300 | |
1301 | out_free_iova: | |
2a2b8eaa | 1302 | iommu_dma_free_iova(cookie, iova, iova_len, NULL); |
0db2e5d1 RM |
1303 | out_restore_sg: |
1304 | __invalidate_sg(sg, nents); | |
dabb16f6 | 1305 | out: |
30280eee | 1306 | if (ret != -ENOMEM && ret != -EREMOTEIO) |
dabb16f6 LG |
1307 | return -EINVAL; |
1308 | return ret; | |
0db2e5d1 RM |
1309 | } |
1310 | ||
06d60728 CH |
1311 | static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, |
1312 | int nents, enum dma_data_direction dir, unsigned long attrs) | |
0db2e5d1 | 1313 | { |
30280eee | 1314 | dma_addr_t end = 0, start; |
842fe519 RM |
1315 | struct scatterlist *tmp; |
1316 | int i; | |
06d60728 | 1317 | |
2e727bff | 1318 | if (dev_use_swiotlb(dev)) { |
82612d66 TM |
1319 | iommu_dma_unmap_sg_swiotlb(dev, sg, nents, dir, attrs); |
1320 | return; | |
1321 | } | |
1322 | ||
ee9d4097 DS |
1323 | if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) |
1324 | iommu_dma_sync_sg_for_cpu(dev, sg, nents, dir); | |
1325 | ||
0db2e5d1 RM |
1326 | /* |
1327 | * The scatterlist segments are mapped into a single | |
30280eee LG |
1328 | * contiguous IOVA allocation, the start and end points |
1329 | * just have to be determined. | |
0db2e5d1 | 1330 | */ |
30280eee LG |
1331 | for_each_sg(sg, tmp, nents, i) { |
1332 | if (sg_is_dma_bus_address(tmp)) { | |
1333 | sg_dma_unmark_bus_address(tmp); | |
1334 | continue; | |
1335 | } | |
1336 | ||
842fe519 RM |
1337 | if (sg_dma_len(tmp) == 0) |
1338 | break; | |
30280eee LG |
1339 | |
1340 | start = sg_dma_address(tmp); | |
1341 | break; | |
842fe519 | 1342 | } |
30280eee LG |
1343 | |
1344 | nents -= i; | |
1345 | for_each_sg(tmp, tmp, nents, i) { | |
1346 | if (sg_is_dma_bus_address(tmp)) { | |
1347 | sg_dma_unmark_bus_address(tmp); | |
1348 | continue; | |
1349 | } | |
1350 | ||
842fe519 RM |
1351 | if (sg_dma_len(tmp) == 0) |
1352 | break; | |
30280eee LG |
1353 | |
1354 | end = sg_dma_address(tmp) + sg_dma_len(tmp); | |
842fe519 | 1355 | } |
30280eee LG |
1356 | |
1357 | if (end) | |
1358 | __iommu_dma_unmap(dev, start, end - start); | |
0db2e5d1 RM |
1359 | } |
1360 | ||
06d60728 | 1361 | static dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys, |
51f8cc9e RM |
1362 | size_t size, enum dma_data_direction dir, unsigned long attrs) |
1363 | { | |
1364 | return __iommu_dma_map(dev, phys, size, | |
6e235020 TM |
1365 | dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO, |
1366 | dma_get_mask(dev)); | |
51f8cc9e RM |
1367 | } |
1368 | ||
06d60728 | 1369 | static void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle, |
51f8cc9e RM |
1370 | size_t size, enum dma_data_direction dir, unsigned long attrs) |
1371 | { | |
b61d271e | 1372 | __iommu_dma_unmap(dev, handle, size); |
51f8cc9e RM |
1373 | } |
1374 | ||
8553f6e6 | 1375 | static void __iommu_dma_free(struct device *dev, size_t size, void *cpu_addr) |
bcf4b9c4 RM |
1376 | { |
1377 | size_t alloc_size = PAGE_ALIGN(size); | |
1378 | int count = alloc_size >> PAGE_SHIFT; | |
1379 | struct page *page = NULL, **pages = NULL; | |
1380 | ||
bcf4b9c4 | 1381 | /* Non-coherent atomic allocation? Easy */ |
e6475eb0 | 1382 | if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && |
c84dc6e6 | 1383 | dma_free_from_pool(dev, cpu_addr, alloc_size)) |
bcf4b9c4 RM |
1384 | return; |
1385 | ||
f5ff79fd | 1386 | if (is_vmalloc_addr(cpu_addr)) { |
bcf4b9c4 RM |
1387 | /* |
1388 | * If it the address is remapped, then it's either non-coherent | |
1389 | * or highmem CMA, or an iommu_dma_alloc_remap() construction. | |
1390 | */ | |
5cf45379 | 1391 | pages = dma_common_find_pages(cpu_addr); |
bcf4b9c4 RM |
1392 | if (!pages) |
1393 | page = vmalloc_to_page(cpu_addr); | |
51231740 | 1394 | dma_common_free_remap(cpu_addr, alloc_size); |
bcf4b9c4 RM |
1395 | } else { |
1396 | /* Lowmem means a coherent atomic or CMA allocation */ | |
1397 | page = virt_to_page(cpu_addr); | |
1398 | } | |
1399 | ||
1400 | if (pages) | |
1401 | __iommu_dma_free_pages(pages, count); | |
591fcf3b NC |
1402 | if (page) |
1403 | dma_free_contiguous(dev, page, alloc_size); | |
bcf4b9c4 RM |
1404 | } |
1405 | ||
8553f6e6 RM |
1406 | static void iommu_dma_free(struct device *dev, size_t size, void *cpu_addr, |
1407 | dma_addr_t handle, unsigned long attrs) | |
1408 | { | |
1409 | __iommu_dma_unmap(dev, handle, size); | |
1410 | __iommu_dma_free(dev, size, cpu_addr); | |
1411 | } | |
1412 | ||
ee1ef05d CH |
1413 | static void *iommu_dma_alloc_pages(struct device *dev, size_t size, |
1414 | struct page **pagep, gfp_t gfp, unsigned long attrs) | |
06d60728 CH |
1415 | { |
1416 | bool coherent = dev_is_dma_coherent(dev); | |
9ad5d6ed | 1417 | size_t alloc_size = PAGE_ALIGN(size); |
90ae409f | 1418 | int node = dev_to_node(dev); |
9a4ab94a | 1419 | struct page *page = NULL; |
9ad5d6ed | 1420 | void *cpu_addr; |
06d60728 | 1421 | |
591fcf3b | 1422 | page = dma_alloc_contiguous(dev, alloc_size, gfp); |
90ae409f CH |
1423 | if (!page) |
1424 | page = alloc_pages_node(node, gfp, get_order(alloc_size)); | |
072bebc0 RM |
1425 | if (!page) |
1426 | return NULL; | |
1427 | ||
f5ff79fd | 1428 | if (!coherent || PageHighMem(page)) { |
33dcb37c | 1429 | pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs); |
072bebc0 | 1430 | |
9ad5d6ed | 1431 | cpu_addr = dma_common_contiguous_remap(page, alloc_size, |
51231740 | 1432 | prot, __builtin_return_address(0)); |
9ad5d6ed | 1433 | if (!cpu_addr) |
ee1ef05d | 1434 | goto out_free_pages; |
8680aa5a RM |
1435 | |
1436 | if (!coherent) | |
9ad5d6ed | 1437 | arch_dma_prep_coherent(page, size); |
8680aa5a | 1438 | } else { |
9ad5d6ed | 1439 | cpu_addr = page_address(page); |
8680aa5a | 1440 | } |
ee1ef05d CH |
1441 | |
1442 | *pagep = page; | |
9ad5d6ed RM |
1443 | memset(cpu_addr, 0, alloc_size); |
1444 | return cpu_addr; | |
072bebc0 | 1445 | out_free_pages: |
591fcf3b | 1446 | dma_free_contiguous(dev, page, alloc_size); |
072bebc0 | 1447 | return NULL; |
06d60728 CH |
1448 | } |
1449 | ||
ee1ef05d CH |
1450 | static void *iommu_dma_alloc(struct device *dev, size_t size, |
1451 | dma_addr_t *handle, gfp_t gfp, unsigned long attrs) | |
1452 | { | |
1453 | bool coherent = dev_is_dma_coherent(dev); | |
1454 | int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs); | |
1455 | struct page *page = NULL; | |
1456 | void *cpu_addr; | |
1457 | ||
1458 | gfp |= __GFP_ZERO; | |
1459 | ||
f5ff79fd | 1460 | if (gfpflags_allow_blocking(gfp) && |
e8d39a90 CH |
1461 | !(attrs & DMA_ATTR_FORCE_CONTIGUOUS)) { |
1462 | return iommu_dma_alloc_remap(dev, size, handle, gfp, | |
1463 | dma_pgprot(dev, PAGE_KERNEL, attrs), attrs); | |
1464 | } | |
ee1ef05d | 1465 | |
e6475eb0 CH |
1466 | if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && |
1467 | !gfpflags_allow_blocking(gfp) && !coherent) | |
9420139f CH |
1468 | page = dma_alloc_from_pool(dev, PAGE_ALIGN(size), &cpu_addr, |
1469 | gfp, NULL); | |
ee1ef05d CH |
1470 | else |
1471 | cpu_addr = iommu_dma_alloc_pages(dev, size, &page, gfp, attrs); | |
1472 | if (!cpu_addr) | |
1473 | return NULL; | |
1474 | ||
6e235020 TM |
1475 | *handle = __iommu_dma_map(dev, page_to_phys(page), size, ioprot, |
1476 | dev->coherent_dma_mask); | |
ee1ef05d CH |
1477 | if (*handle == DMA_MAPPING_ERROR) { |
1478 | __iommu_dma_free(dev, size, cpu_addr); | |
1479 | return NULL; | |
1480 | } | |
1481 | ||
1482 | return cpu_addr; | |
1483 | } | |
1484 | ||
06d60728 CH |
1485 | static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma, |
1486 | void *cpu_addr, dma_addr_t dma_addr, size_t size, | |
1487 | unsigned long attrs) | |
1488 | { | |
1489 | unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; | |
efd9f10b | 1490 | unsigned long pfn, off = vma->vm_pgoff; |
06d60728 CH |
1491 | int ret; |
1492 | ||
33dcb37c | 1493 | vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs); |
06d60728 CH |
1494 | |
1495 | if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) | |
1496 | return ret; | |
1497 | ||
1498 | if (off >= nr_pages || vma_pages(vma) > nr_pages - off) | |
1499 | return -ENXIO; | |
1500 | ||
f5ff79fd | 1501 | if (is_vmalloc_addr(cpu_addr)) { |
5cf45379 | 1502 | struct page **pages = dma_common_find_pages(cpu_addr); |
06d60728 | 1503 | |
efd9f10b | 1504 | if (pages) |
71fe89ce | 1505 | return vm_map_pages(vma, pages, nr_pages); |
efd9f10b CH |
1506 | pfn = vmalloc_to_pfn(cpu_addr); |
1507 | } else { | |
1508 | pfn = page_to_pfn(virt_to_page(cpu_addr)); | |
06d60728 CH |
1509 | } |
1510 | ||
efd9f10b CH |
1511 | return remap_pfn_range(vma, vma->vm_start, pfn + off, |
1512 | vma->vm_end - vma->vm_start, | |
1513 | vma->vm_page_prot); | |
06d60728 CH |
1514 | } |
1515 | ||
06d60728 CH |
1516 | static int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt, |
1517 | void *cpu_addr, dma_addr_t dma_addr, size_t size, | |
1518 | unsigned long attrs) | |
1519 | { | |
3fb3378b CH |
1520 | struct page *page; |
1521 | int ret; | |
06d60728 | 1522 | |
f5ff79fd | 1523 | if (is_vmalloc_addr(cpu_addr)) { |
5cf45379 | 1524 | struct page **pages = dma_common_find_pages(cpu_addr); |
06d60728 | 1525 | |
3fb3378b CH |
1526 | if (pages) { |
1527 | return sg_alloc_table_from_pages(sgt, pages, | |
1528 | PAGE_ALIGN(size) >> PAGE_SHIFT, | |
1529 | 0, size, GFP_KERNEL); | |
1530 | } | |
1531 | ||
1532 | page = vmalloc_to_page(cpu_addr); | |
1533 | } else { | |
1534 | page = virt_to_page(cpu_addr); | |
06d60728 CH |
1535 | } |
1536 | ||
3fb3378b CH |
1537 | ret = sg_alloc_table(sgt, 1, GFP_KERNEL); |
1538 | if (!ret) | |
1539 | sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); | |
1540 | return ret; | |
06d60728 CH |
1541 | } |
1542 | ||
158a6d3c YS |
1543 | static unsigned long iommu_dma_get_merge_boundary(struct device *dev) |
1544 | { | |
1545 | struct iommu_domain *domain = iommu_get_dma_domain(dev); | |
1546 | ||
1547 | return (1UL << __ffs(domain->pgsize_bitmap)) - 1; | |
1548 | } | |
1549 | ||
6d9870b7 JG |
1550 | static size_t iommu_dma_opt_mapping_size(void) |
1551 | { | |
1552 | return iova_rcache_range(); | |
1553 | } | |
1554 | ||
06d60728 | 1555 | static const struct dma_map_ops iommu_dma_ops = { |
30280eee | 1556 | .flags = DMA_F_PCI_P2PDMA_SUPPORTED, |
06d60728 CH |
1557 | .alloc = iommu_dma_alloc, |
1558 | .free = iommu_dma_free, | |
efa70f2f CH |
1559 | .alloc_pages = dma_common_alloc_pages, |
1560 | .free_pages = dma_common_free_pages, | |
e817ee5f CH |
1561 | .alloc_noncontiguous = iommu_dma_alloc_noncontiguous, |
1562 | .free_noncontiguous = iommu_dma_free_noncontiguous, | |
06d60728 CH |
1563 | .mmap = iommu_dma_mmap, |
1564 | .get_sgtable = iommu_dma_get_sgtable, | |
1565 | .map_page = iommu_dma_map_page, | |
1566 | .unmap_page = iommu_dma_unmap_page, | |
1567 | .map_sg = iommu_dma_map_sg, | |
1568 | .unmap_sg = iommu_dma_unmap_sg, | |
1569 | .sync_single_for_cpu = iommu_dma_sync_single_for_cpu, | |
1570 | .sync_single_for_device = iommu_dma_sync_single_for_device, | |
1571 | .sync_sg_for_cpu = iommu_dma_sync_sg_for_cpu, | |
1572 | .sync_sg_for_device = iommu_dma_sync_sg_for_device, | |
1573 | .map_resource = iommu_dma_map_resource, | |
1574 | .unmap_resource = iommu_dma_unmap_resource, | |
158a6d3c | 1575 | .get_merge_boundary = iommu_dma_get_merge_boundary, |
6d9870b7 | 1576 | .opt_mapping_size = iommu_dma_opt_mapping_size, |
06d60728 CH |
1577 | }; |
1578 | ||
1579 | /* | |
1580 | * The IOMMU core code allocates the default DMA domain, which the underlying | |
1581 | * IOMMU driver needs to support via the dma-iommu layer. | |
1582 | */ | |
ac6d7046 | 1583 | void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 dma_limit) |
06d60728 CH |
1584 | { |
1585 | struct iommu_domain *domain = iommu_get_domain_for_dev(dev); | |
1586 | ||
1587 | if (!domain) | |
1588 | goto out_err; | |
1589 | ||
1590 | /* | |
1591 | * The IOMMU core code allocates the default DMA domain, which the | |
1592 | * underlying IOMMU driver needs to support via the dma-iommu layer. | |
1593 | */ | |
bf3aed46 | 1594 | if (iommu_is_dma_domain(domain)) { |
ac6d7046 | 1595 | if (iommu_dma_init_domain(domain, dma_base, dma_limit, dev)) |
06d60728 CH |
1596 | goto out_err; |
1597 | dev->dma_ops = &iommu_dma_ops; | |
1598 | } | |
1599 | ||
1600 | return; | |
1601 | out_err: | |
1602 | pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n", | |
1603 | dev_name(dev)); | |
51f8cc9e | 1604 | } |
8ce4904b | 1605 | EXPORT_SYMBOL_GPL(iommu_setup_dma_ops); |
51f8cc9e | 1606 | |
44bb7e24 RM |
1607 | static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev, |
1608 | phys_addr_t msi_addr, struct iommu_domain *domain) | |
1609 | { | |
1610 | struct iommu_dma_cookie *cookie = domain->iova_cookie; | |
1611 | struct iommu_dma_msi_page *msi_page; | |
842fe519 | 1612 | dma_addr_t iova; |
44bb7e24 | 1613 | int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; |
fdbe574e | 1614 | size_t size = cookie_msi_granule(cookie); |
44bb7e24 | 1615 | |
fdbe574e | 1616 | msi_addr &= ~(phys_addr_t)(size - 1); |
44bb7e24 RM |
1617 | list_for_each_entry(msi_page, &cookie->msi_page_list, list) |
1618 | if (msi_page->phys == msi_addr) | |
1619 | return msi_page; | |
1620 | ||
c1864790 | 1621 | msi_page = kzalloc(sizeof(*msi_page), GFP_KERNEL); |
44bb7e24 RM |
1622 | if (!msi_page) |
1623 | return NULL; | |
1624 | ||
8af23fad RM |
1625 | iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev); |
1626 | if (!iova) | |
a44e6657 | 1627 | goto out_free_page; |
44bb7e24 | 1628 | |
1369459b | 1629 | if (iommu_map(domain, iova, msi_addr, size, prot, GFP_KERNEL)) |
8af23fad RM |
1630 | goto out_free_iova; |
1631 | ||
44bb7e24 | 1632 | INIT_LIST_HEAD(&msi_page->list); |
a44e6657 RM |
1633 | msi_page->phys = msi_addr; |
1634 | msi_page->iova = iova; | |
44bb7e24 RM |
1635 | list_add(&msi_page->list, &cookie->msi_page_list); |
1636 | return msi_page; | |
1637 | ||
8af23fad | 1638 | out_free_iova: |
2a2b8eaa | 1639 | iommu_dma_free_iova(cookie, iova, size, NULL); |
44bb7e24 RM |
1640 | out_free_page: |
1641 | kfree(msi_page); | |
1642 | return NULL; | |
1643 | } | |
1644 | ||
fa49364c RM |
1645 | /** |
1646 | * iommu_dma_prepare_msi() - Map the MSI page in the IOMMU domain | |
1647 | * @desc: MSI descriptor, will store the MSI page | |
1648 | * @msi_addr: MSI target address to be mapped | |
1649 | * | |
1650 | * Return: 0 on success or negative error code if the mapping failed. | |
1651 | */ | |
ece6e6f0 | 1652 | int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr) |
44bb7e24 | 1653 | { |
ece6e6f0 | 1654 | struct device *dev = msi_desc_to_dev(desc); |
44bb7e24 | 1655 | struct iommu_domain *domain = iommu_get_domain_for_dev(dev); |
44bb7e24 | 1656 | struct iommu_dma_msi_page *msi_page; |
c1864790 | 1657 | static DEFINE_MUTEX(msi_prepare_lock); /* see below */ |
44bb7e24 | 1658 | |
ece6e6f0 JG |
1659 | if (!domain || !domain->iova_cookie) { |
1660 | desc->iommu_cookie = NULL; | |
1661 | return 0; | |
1662 | } | |
44bb7e24 | 1663 | |
44bb7e24 | 1664 | /* |
c1864790 RM |
1665 | * In fact the whole prepare operation should already be serialised by |
1666 | * irq_domain_mutex further up the callchain, but that's pretty subtle | |
1667 | * on its own, so consider this locking as failsafe documentation... | |
44bb7e24 | 1668 | */ |
c1864790 | 1669 | mutex_lock(&msi_prepare_lock); |
44bb7e24 | 1670 | msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain); |
c1864790 | 1671 | mutex_unlock(&msi_prepare_lock); |
44bb7e24 | 1672 | |
ece6e6f0 JG |
1673 | msi_desc_set_iommu_cookie(desc, msi_page); |
1674 | ||
1675 | if (!msi_page) | |
1676 | return -ENOMEM; | |
1677 | return 0; | |
1678 | } | |
1679 | ||
fa49364c RM |
1680 | /** |
1681 | * iommu_dma_compose_msi_msg() - Apply translation to an MSI message | |
1682 | * @desc: MSI descriptor prepared by iommu_dma_prepare_msi() | |
1683 | * @msg: MSI message containing target physical address | |
1684 | */ | |
1685 | void iommu_dma_compose_msi_msg(struct msi_desc *desc, struct msi_msg *msg) | |
ece6e6f0 JG |
1686 | { |
1687 | struct device *dev = msi_desc_to_dev(desc); | |
1688 | const struct iommu_domain *domain = iommu_get_domain_for_dev(dev); | |
1689 | const struct iommu_dma_msi_page *msi_page; | |
1690 | ||
1691 | msi_page = msi_desc_get_iommu_cookie(desc); | |
1692 | ||
1693 | if (!domain || !domain->iova_cookie || WARN_ON(!msi_page)) | |
1694 | return; | |
1695 | ||
1696 | msg->address_hi = upper_32_bits(msi_page->iova); | |
1697 | msg->address_lo &= cookie_msi_granule(domain->iova_cookie) - 1; | |
1698 | msg->address_lo += lower_32_bits(msi_page->iova); | |
44bb7e24 | 1699 | } |
06d60728 CH |
1700 | |
1701 | static int iommu_dma_init(void) | |
1702 | { | |
a8e8af35 LJ |
1703 | if (is_kdump_kernel()) |
1704 | static_branch_enable(&iommu_deferred_attach_enabled); | |
1705 | ||
06d60728 | 1706 | return iova_cache_get(); |
44bb7e24 | 1707 | } |
06d60728 | 1708 | arch_initcall(iommu_dma_init); |