Commit | Line | Data |
---|---|---|
caab277b | 1 | // SPDX-License-Identifier: GPL-2.0-only |
0db2e5d1 RM |
2 | /* |
3 | * A fairly generic DMA-API to IOMMU-API glue layer. | |
4 | * | |
5 | * Copyright (C) 2014-2015 ARM Ltd. | |
6 | * | |
7 | * based in part on arch/arm/mm/dma-mapping.c: | |
8 | * Copyright (C) 2000-2004 Russell King | |
0db2e5d1 RM |
9 | */ |
10 | ||
f51dc892 | 11 | #include <linux/acpi_iort.h> |
a17e3026 RM |
12 | #include <linux/atomic.h> |
13 | #include <linux/crash_dump.h> | |
0db2e5d1 | 14 | #include <linux/device.h> |
a17e3026 | 15 | #include <linux/dma-direct.h> |
a17e3026 | 16 | #include <linux/dma-map-ops.h> |
5b11e9cd | 17 | #include <linux/gfp.h> |
0db2e5d1 RM |
18 | #include <linux/huge_mm.h> |
19 | #include <linux/iommu.h> | |
20 | #include <linux/iova.h> | |
44bb7e24 | 21 | #include <linux/irq.h> |
b8397a8f | 22 | #include <linux/list_sort.h> |
30280eee | 23 | #include <linux/memremap.h> |
0db2e5d1 | 24 | #include <linux/mm.h> |
c1864790 | 25 | #include <linux/mutex.h> |
fade1ec0 | 26 | #include <linux/pci.h> |
5b11e9cd | 27 | #include <linux/scatterlist.h> |
a17e3026 RM |
28 | #include <linux/spinlock.h> |
29 | #include <linux/swiotlb.h> | |
5b11e9cd | 30 | #include <linux/vmalloc.h> |
0db2e5d1 | 31 | |
f2042ed2 RM |
32 | #include "dma-iommu.h" |
33 | ||
44bb7e24 RM |
34 | struct iommu_dma_msi_page { |
35 | struct list_head list; | |
36 | dma_addr_t iova; | |
37 | phys_addr_t phys; | |
38 | }; | |
39 | ||
fdbe574e RM |
40 | enum iommu_dma_cookie_type { |
41 | IOMMU_DMA_IOVA_COOKIE, | |
42 | IOMMU_DMA_MSI_COOKIE, | |
43 | }; | |
44 | ||
44bb7e24 | 45 | struct iommu_dma_cookie { |
fdbe574e RM |
46 | enum iommu_dma_cookie_type type; |
47 | union { | |
48 | /* Full allocator for IOMMU_DMA_IOVA_COOKIE */ | |
a17e3026 RM |
49 | struct { |
50 | struct iova_domain iovad; | |
51 | ||
52 | struct iova_fq __percpu *fq; /* Flush queue */ | |
53 | /* Number of TLB flushes that have been started */ | |
54 | atomic64_t fq_flush_start_cnt; | |
55 | /* Number of TLB flushes that have been finished */ | |
56 | atomic64_t fq_flush_finish_cnt; | |
57 | /* Timer to regularily empty the flush queues */ | |
58 | struct timer_list fq_timer; | |
59 | /* 1 when timer is active, 0 when not */ | |
60 | atomic_t fq_timer_on; | |
61 | }; | |
fdbe574e RM |
62 | /* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */ |
63 | dma_addr_t msi_iova; | |
64 | }; | |
65 | struct list_head msi_page_list; | |
2da274cd ZL |
66 | |
67 | /* Domain for flush queue callback; NULL if flush queue not in use */ | |
68 | struct iommu_domain *fq_domain; | |
ac9a5d52 | 69 | struct mutex mutex; |
44bb7e24 RM |
70 | }; |
71 | ||
a8e8af35 | 72 | static DEFINE_STATIC_KEY_FALSE(iommu_deferred_attach_enabled); |
af3e9579 | 73 | bool iommu_dma_forcedac __read_mostly; |
3542dcb1 RM |
74 | |
75 | static int __init iommu_dma_forcedac_setup(char *str) | |
76 | { | |
77 | int ret = kstrtobool(str, &iommu_dma_forcedac); | |
78 | ||
79 | if (!ret && iommu_dma_forcedac) | |
80 | pr_info("Forcing DAC for PCI devices\n"); | |
81 | return ret; | |
82 | } | |
83 | early_param("iommu.forcedac", iommu_dma_forcedac_setup); | |
a8e8af35 | 84 | |
a17e3026 RM |
85 | /* Number of entries per flush queue */ |
86 | #define IOVA_FQ_SIZE 256 | |
87 | ||
88 | /* Timeout (in ms) after which entries are flushed from the queue */ | |
89 | #define IOVA_FQ_TIMEOUT 10 | |
90 | ||
91 | /* Flush queue entry for deferred flushing */ | |
92 | struct iova_fq_entry { | |
93 | unsigned long iova_pfn; | |
94 | unsigned long pages; | |
95 | struct list_head freelist; | |
96 | u64 counter; /* Flush counter when this entry was added */ | |
97 | }; | |
98 | ||
99 | /* Per-CPU flush queue structure */ | |
100 | struct iova_fq { | |
101 | struct iova_fq_entry entries[IOVA_FQ_SIZE]; | |
102 | unsigned int head, tail; | |
103 | spinlock_t lock; | |
104 | }; | |
105 | ||
f7f07484 RM |
106 | #define fq_ring_for_each(i, fq) \ |
107 | for ((i) = (fq)->head; (i) != (fq)->tail; (i) = ((i) + 1) % IOVA_FQ_SIZE) | |
108 | ||
109 | static inline bool fq_full(struct iova_fq *fq) | |
110 | { | |
111 | assert_spin_locked(&fq->lock); | |
112 | return (((fq->tail + 1) % IOVA_FQ_SIZE) == fq->head); | |
113 | } | |
114 | ||
a17e3026 | 115 | static inline unsigned int fq_ring_add(struct iova_fq *fq) |
f7f07484 | 116 | { |
a17e3026 | 117 | unsigned int idx = fq->tail; |
f7f07484 RM |
118 | |
119 | assert_spin_locked(&fq->lock); | |
120 | ||
121 | fq->tail = (idx + 1) % IOVA_FQ_SIZE; | |
122 | ||
123 | return idx; | |
124 | } | |
125 | ||
a17e3026 | 126 | static void fq_ring_free(struct iommu_dma_cookie *cookie, struct iova_fq *fq) |
f7f07484 | 127 | { |
a17e3026 RM |
128 | u64 counter = atomic64_read(&cookie->fq_flush_finish_cnt); |
129 | unsigned int idx; | |
f7f07484 RM |
130 | |
131 | assert_spin_locked(&fq->lock); | |
132 | ||
133 | fq_ring_for_each(idx, fq) { | |
134 | ||
135 | if (fq->entries[idx].counter >= counter) | |
136 | break; | |
137 | ||
138 | put_pages_list(&fq->entries[idx].freelist); | |
a17e3026 | 139 | free_iova_fast(&cookie->iovad, |
f7f07484 RM |
140 | fq->entries[idx].iova_pfn, |
141 | fq->entries[idx].pages); | |
142 | ||
143 | fq->head = (fq->head + 1) % IOVA_FQ_SIZE; | |
144 | } | |
145 | } | |
146 | ||
a17e3026 | 147 | static void fq_flush_iotlb(struct iommu_dma_cookie *cookie) |
f7f07484 | 148 | { |
a17e3026 RM |
149 | atomic64_inc(&cookie->fq_flush_start_cnt); |
150 | cookie->fq_domain->ops->flush_iotlb_all(cookie->fq_domain); | |
151 | atomic64_inc(&cookie->fq_flush_finish_cnt); | |
f7f07484 RM |
152 | } |
153 | ||
154 | static void fq_flush_timeout(struct timer_list *t) | |
155 | { | |
a17e3026 | 156 | struct iommu_dma_cookie *cookie = from_timer(cookie, t, fq_timer); |
f7f07484 RM |
157 | int cpu; |
158 | ||
a17e3026 RM |
159 | atomic_set(&cookie->fq_timer_on, 0); |
160 | fq_flush_iotlb(cookie); | |
f7f07484 RM |
161 | |
162 | for_each_possible_cpu(cpu) { | |
163 | unsigned long flags; | |
164 | struct iova_fq *fq; | |
165 | ||
a17e3026 | 166 | fq = per_cpu_ptr(cookie->fq, cpu); |
f7f07484 | 167 | spin_lock_irqsave(&fq->lock, flags); |
a17e3026 | 168 | fq_ring_free(cookie, fq); |
f7f07484 RM |
169 | spin_unlock_irqrestore(&fq->lock, flags); |
170 | } | |
171 | } | |
172 | ||
a17e3026 | 173 | static void queue_iova(struct iommu_dma_cookie *cookie, |
f7f07484 RM |
174 | unsigned long pfn, unsigned long pages, |
175 | struct list_head *freelist) | |
176 | { | |
177 | struct iova_fq *fq; | |
178 | unsigned long flags; | |
a17e3026 | 179 | unsigned int idx; |
f7f07484 RM |
180 | |
181 | /* | |
182 | * Order against the IOMMU driver's pagetable update from unmapping | |
a17e3026 | 183 | * @pte, to guarantee that fq_flush_iotlb() observes that if called |
f7f07484 RM |
184 | * from a different CPU before we release the lock below. Full barrier |
185 | * so it also pairs with iommu_dma_init_fq() to avoid seeing partially | |
186 | * written fq state here. | |
187 | */ | |
188 | smp_mb(); | |
189 | ||
a17e3026 | 190 | fq = raw_cpu_ptr(cookie->fq); |
f7f07484 RM |
191 | spin_lock_irqsave(&fq->lock, flags); |
192 | ||
193 | /* | |
194 | * First remove all entries from the flush queue that have already been | |
195 | * flushed out on another CPU. This makes the fq_full() check below less | |
196 | * likely to be true. | |
197 | */ | |
a17e3026 | 198 | fq_ring_free(cookie, fq); |
f7f07484 RM |
199 | |
200 | if (fq_full(fq)) { | |
a17e3026 RM |
201 | fq_flush_iotlb(cookie); |
202 | fq_ring_free(cookie, fq); | |
f7f07484 RM |
203 | } |
204 | ||
205 | idx = fq_ring_add(fq); | |
206 | ||
207 | fq->entries[idx].iova_pfn = pfn; | |
208 | fq->entries[idx].pages = pages; | |
a17e3026 | 209 | fq->entries[idx].counter = atomic64_read(&cookie->fq_flush_start_cnt); |
f7f07484 RM |
210 | list_splice(freelist, &fq->entries[idx].freelist); |
211 | ||
212 | spin_unlock_irqrestore(&fq->lock, flags); | |
213 | ||
214 | /* Avoid false sharing as much as possible. */ | |
a17e3026 RM |
215 | if (!atomic_read(&cookie->fq_timer_on) && |
216 | !atomic_xchg(&cookie->fq_timer_on, 1)) | |
217 | mod_timer(&cookie->fq_timer, | |
f7f07484 RM |
218 | jiffies + msecs_to_jiffies(IOVA_FQ_TIMEOUT)); |
219 | } | |
220 | ||
a17e3026 | 221 | static void iommu_dma_free_fq(struct iommu_dma_cookie *cookie) |
f7f07484 RM |
222 | { |
223 | int cpu, idx; | |
224 | ||
a17e3026 | 225 | if (!cookie->fq) |
f7f07484 RM |
226 | return; |
227 | ||
a17e3026 RM |
228 | del_timer_sync(&cookie->fq_timer); |
229 | /* The IOVAs will be torn down separately, so just free our queued pages */ | |
f7f07484 | 230 | for_each_possible_cpu(cpu) { |
a17e3026 | 231 | struct iova_fq *fq = per_cpu_ptr(cookie->fq, cpu); |
f7f07484 RM |
232 | |
233 | fq_ring_for_each(idx, fq) | |
234 | put_pages_list(&fq->entries[idx].freelist); | |
235 | } | |
236 | ||
a17e3026 | 237 | free_percpu(cookie->fq); |
f7f07484 RM |
238 | } |
239 | ||
a17e3026 RM |
240 | /* sysfs updates are serialised by the mutex of the group owning @domain */ |
241 | int iommu_dma_init_fq(struct iommu_domain *domain) | |
f7f07484 | 242 | { |
a17e3026 | 243 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
f7f07484 RM |
244 | struct iova_fq __percpu *queue; |
245 | int i, cpu; | |
246 | ||
a17e3026 RM |
247 | if (cookie->fq_domain) |
248 | return 0; | |
249 | ||
250 | atomic64_set(&cookie->fq_flush_start_cnt, 0); | |
251 | atomic64_set(&cookie->fq_flush_finish_cnt, 0); | |
f7f07484 RM |
252 | |
253 | queue = alloc_percpu(struct iova_fq); | |
a17e3026 RM |
254 | if (!queue) { |
255 | pr_warn("iova flush queue initialization failed\n"); | |
f7f07484 | 256 | return -ENOMEM; |
a17e3026 | 257 | } |
f7f07484 RM |
258 | |
259 | for_each_possible_cpu(cpu) { | |
260 | struct iova_fq *fq = per_cpu_ptr(queue, cpu); | |
261 | ||
262 | fq->head = 0; | |
263 | fq->tail = 0; | |
264 | ||
265 | spin_lock_init(&fq->lock); | |
266 | ||
267 | for (i = 0; i < IOVA_FQ_SIZE; i++) | |
268 | INIT_LIST_HEAD(&fq->entries[i].freelist); | |
269 | } | |
270 | ||
a17e3026 | 271 | cookie->fq = queue; |
f7f07484 | 272 | |
a17e3026 RM |
273 | timer_setup(&cookie->fq_timer, fq_flush_timeout, 0); |
274 | atomic_set(&cookie->fq_timer_on, 0); | |
275 | /* | |
276 | * Prevent incomplete fq state being observable. Pairs with path from | |
277 | * __iommu_dma_unmap() through iommu_dma_free_iova() to queue_iova() | |
278 | */ | |
279 | smp_wmb(); | |
280 | WRITE_ONCE(cookie->fq_domain, domain); | |
f7f07484 RM |
281 | return 0; |
282 | } | |
283 | ||
fdbe574e RM |
284 | static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie) |
285 | { | |
286 | if (cookie->type == IOMMU_DMA_IOVA_COOKIE) | |
287 | return cookie->iovad.granule; | |
288 | return PAGE_SIZE; | |
289 | } | |
290 | ||
fdbe574e RM |
291 | static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type) |
292 | { | |
293 | struct iommu_dma_cookie *cookie; | |
294 | ||
295 | cookie = kzalloc(sizeof(*cookie), GFP_KERNEL); | |
296 | if (cookie) { | |
fdbe574e RM |
297 | INIT_LIST_HEAD(&cookie->msi_page_list); |
298 | cookie->type = type; | |
299 | } | |
300 | return cookie; | |
44bb7e24 RM |
301 | } |
302 | ||
0db2e5d1 RM |
303 | /** |
304 | * iommu_get_dma_cookie - Acquire DMA-API resources for a domain | |
305 | * @domain: IOMMU domain to prepare for DMA-API usage | |
0db2e5d1 RM |
306 | */ |
307 | int iommu_get_dma_cookie(struct iommu_domain *domain) | |
fdbe574e RM |
308 | { |
309 | if (domain->iova_cookie) | |
310 | return -EEXIST; | |
311 | ||
312 | domain->iova_cookie = cookie_alloc(IOMMU_DMA_IOVA_COOKIE); | |
313 | if (!domain->iova_cookie) | |
314 | return -ENOMEM; | |
315 | ||
ac9a5d52 | 316 | mutex_init(&domain->iova_cookie->mutex); |
fdbe574e RM |
317 | return 0; |
318 | } | |
fdbe574e RM |
319 | |
320 | /** | |
321 | * iommu_get_msi_cookie - Acquire just MSI remapping resources | |
322 | * @domain: IOMMU domain to prepare | |
323 | * @base: Start address of IOVA region for MSI mappings | |
324 | * | |
325 | * Users who manage their own IOVA allocation and do not want DMA API support, | |
326 | * but would still like to take advantage of automatic MSI remapping, can use | |
327 | * this to initialise their own domain appropriately. Users should reserve a | |
328 | * contiguous IOVA region, starting at @base, large enough to accommodate the | |
329 | * number of PAGE_SIZE mappings necessary to cover every MSI doorbell address | |
330 | * used by the devices attached to @domain. | |
331 | */ | |
332 | int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base) | |
0db2e5d1 | 333 | { |
44bb7e24 | 334 | struct iommu_dma_cookie *cookie; |
0db2e5d1 | 335 | |
fdbe574e RM |
336 | if (domain->type != IOMMU_DOMAIN_UNMANAGED) |
337 | return -EINVAL; | |
338 | ||
0db2e5d1 RM |
339 | if (domain->iova_cookie) |
340 | return -EEXIST; | |
341 | ||
fdbe574e | 342 | cookie = cookie_alloc(IOMMU_DMA_MSI_COOKIE); |
44bb7e24 RM |
343 | if (!cookie) |
344 | return -ENOMEM; | |
0db2e5d1 | 345 | |
fdbe574e | 346 | cookie->msi_iova = base; |
44bb7e24 RM |
347 | domain->iova_cookie = cookie; |
348 | return 0; | |
0db2e5d1 | 349 | } |
fdbe574e | 350 | EXPORT_SYMBOL(iommu_get_msi_cookie); |
0db2e5d1 RM |
351 | |
352 | /** | |
353 | * iommu_put_dma_cookie - Release a domain's DMA mapping resources | |
fdbe574e RM |
354 | * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() or |
355 | * iommu_get_msi_cookie() | |
0db2e5d1 RM |
356 | */ |
357 | void iommu_put_dma_cookie(struct iommu_domain *domain) | |
358 | { | |
44bb7e24 RM |
359 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
360 | struct iommu_dma_msi_page *msi, *tmp; | |
0db2e5d1 | 361 | |
44bb7e24 | 362 | if (!cookie) |
0db2e5d1 RM |
363 | return; |
364 | ||
f7f07484 | 365 | if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule) { |
a17e3026 | 366 | iommu_dma_free_fq(cookie); |
44bb7e24 | 367 | put_iova_domain(&cookie->iovad); |
f7f07484 | 368 | } |
44bb7e24 RM |
369 | |
370 | list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) { | |
371 | list_del(&msi->list); | |
372 | kfree(msi); | |
373 | } | |
374 | kfree(cookie); | |
0db2e5d1 RM |
375 | domain->iova_cookie = NULL; |
376 | } | |
0db2e5d1 | 377 | |
273df963 RM |
378 | /** |
379 | * iommu_dma_get_resv_regions - Reserved region driver helper | |
380 | * @dev: Device from iommu_get_resv_regions() | |
381 | * @list: Reserved region list from iommu_get_resv_regions() | |
382 | * | |
383 | * IOMMU drivers can use this to implement their .get_resv_regions callback | |
cd2c9fcf SK |
384 | * for general non-IOMMU-specific reservations. Currently, this covers GICv3 |
385 | * ITS region reservation on ACPI based ARM platforms that may require HW MSI | |
386 | * reservation. | |
273df963 RM |
387 | */ |
388 | void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list) | |
fade1ec0 | 389 | { |
fade1ec0 | 390 | |
98cc4f71 | 391 | if (!is_of_node(dev_iommu_fwspec_get(dev)->iommu_fwnode)) |
55be25b8 | 392 | iort_iommu_get_resv_regions(dev, list); |
273df963 | 393 | |
fade1ec0 | 394 | } |
273df963 | 395 | EXPORT_SYMBOL(iommu_dma_get_resv_regions); |
fade1ec0 | 396 | |
7c1b058c RM |
397 | static int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie, |
398 | phys_addr_t start, phys_addr_t end) | |
399 | { | |
400 | struct iova_domain *iovad = &cookie->iovad; | |
401 | struct iommu_dma_msi_page *msi_page; | |
402 | int i, num_pages; | |
403 | ||
404 | start -= iova_offset(iovad, start); | |
405 | num_pages = iova_align(iovad, end - start) >> iova_shift(iovad); | |
406 | ||
7c1b058c | 407 | for (i = 0; i < num_pages; i++) { |
65ac74f1 MZ |
408 | msi_page = kmalloc(sizeof(*msi_page), GFP_KERNEL); |
409 | if (!msi_page) | |
410 | return -ENOMEM; | |
411 | ||
412 | msi_page->phys = start; | |
413 | msi_page->iova = start; | |
414 | INIT_LIST_HEAD(&msi_page->list); | |
415 | list_add(&msi_page->list, &cookie->msi_page_list); | |
7c1b058c RM |
416 | start += iovad->granule; |
417 | } | |
418 | ||
419 | return 0; | |
420 | } | |
421 | ||
b8397a8f RM |
422 | static int iommu_dma_ranges_sort(void *priv, const struct list_head *a, |
423 | const struct list_head *b) | |
424 | { | |
425 | struct resource_entry *res_a = list_entry(a, typeof(*res_a), node); | |
426 | struct resource_entry *res_b = list_entry(b, typeof(*res_b), node); | |
427 | ||
428 | return res_a->res->start > res_b->res->start; | |
429 | } | |
430 | ||
aadad097 | 431 | static int iova_reserve_pci_windows(struct pci_dev *dev, |
cd2c9fcf SK |
432 | struct iova_domain *iovad) |
433 | { | |
434 | struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus); | |
435 | struct resource_entry *window; | |
436 | unsigned long lo, hi; | |
aadad097 | 437 | phys_addr_t start = 0, end; |
cd2c9fcf SK |
438 | |
439 | resource_list_for_each_entry(window, &bridge->windows) { | |
440 | if (resource_type(window->res) != IORESOURCE_MEM) | |
441 | continue; | |
442 | ||
443 | lo = iova_pfn(iovad, window->res->start - window->offset); | |
444 | hi = iova_pfn(iovad, window->res->end - window->offset); | |
445 | reserve_iova(iovad, lo, hi); | |
446 | } | |
aadad097 SM |
447 | |
448 | /* Get reserved DMA windows from host bridge */ | |
b8397a8f | 449 | list_sort(NULL, &bridge->dma_ranges, iommu_dma_ranges_sort); |
aadad097 SM |
450 | resource_list_for_each_entry(window, &bridge->dma_ranges) { |
451 | end = window->res->start - window->offset; | |
452 | resv_iova: | |
453 | if (end > start) { | |
454 | lo = iova_pfn(iovad, start); | |
455 | hi = iova_pfn(iovad, end); | |
456 | reserve_iova(iovad, lo, hi); | |
571f3160 | 457 | } else if (end < start) { |
b8397a8f | 458 | /* DMA ranges should be non-overlapping */ |
571f3160 | 459 | dev_err(&dev->dev, |
7154cbd3 JR |
460 | "Failed to reserve IOVA [%pa-%pa]\n", |
461 | &start, &end); | |
aadad097 SM |
462 | return -EINVAL; |
463 | } | |
464 | ||
465 | start = window->res->end - window->offset + 1; | |
466 | /* If window is last entry */ | |
467 | if (window->node.next == &bridge->dma_ranges && | |
29fcea8c AB |
468 | end != ~(phys_addr_t)0) { |
469 | end = ~(phys_addr_t)0; | |
aadad097 SM |
470 | goto resv_iova; |
471 | } | |
472 | } | |
473 | ||
474 | return 0; | |
cd2c9fcf SK |
475 | } |
476 | ||
7c1b058c RM |
477 | static int iova_reserve_iommu_regions(struct device *dev, |
478 | struct iommu_domain *domain) | |
479 | { | |
480 | struct iommu_dma_cookie *cookie = domain->iova_cookie; | |
481 | struct iova_domain *iovad = &cookie->iovad; | |
482 | struct iommu_resv_region *region; | |
483 | LIST_HEAD(resv_regions); | |
484 | int ret = 0; | |
485 | ||
aadad097 SM |
486 | if (dev_is_pci(dev)) { |
487 | ret = iova_reserve_pci_windows(to_pci_dev(dev), iovad); | |
488 | if (ret) | |
489 | return ret; | |
490 | } | |
cd2c9fcf | 491 | |
7c1b058c RM |
492 | iommu_get_resv_regions(dev, &resv_regions); |
493 | list_for_each_entry(region, &resv_regions, list) { | |
494 | unsigned long lo, hi; | |
495 | ||
496 | /* We ARE the software that manages these! */ | |
497 | if (region->type == IOMMU_RESV_SW_MSI) | |
498 | continue; | |
499 | ||
500 | lo = iova_pfn(iovad, region->start); | |
501 | hi = iova_pfn(iovad, region->start + region->length - 1); | |
502 | reserve_iova(iovad, lo, hi); | |
503 | ||
504 | if (region->type == IOMMU_RESV_MSI) | |
505 | ret = cookie_init_hw_msi_region(cookie, region->start, | |
506 | region->start + region->length); | |
507 | if (ret) | |
508 | break; | |
509 | } | |
510 | iommu_put_resv_regions(dev, &resv_regions); | |
511 | ||
512 | return ret; | |
513 | } | |
514 | ||
82c3cefb LB |
515 | static bool dev_is_untrusted(struct device *dev) |
516 | { | |
517 | return dev_is_pci(dev) && to_pci_dev(dev)->untrusted; | |
518 | } | |
519 | ||
2e727bff DS |
520 | static bool dev_use_swiotlb(struct device *dev) |
521 | { | |
522 | return IS_ENABLED(CONFIG_SWIOTLB) && dev_is_untrusted(dev); | |
523 | } | |
524 | ||
0db2e5d1 RM |
525 | /** |
526 | * iommu_dma_init_domain - Initialise a DMA mapping domain | |
527 | * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() | |
528 | * @base: IOVA at which the mappable address space starts | |
ac6d7046 | 529 | * @limit: Last address of the IOVA space |
fade1ec0 | 530 | * @dev: Device the domain is being initialised for |
0db2e5d1 | 531 | * |
ac6d7046 | 532 | * @base and @limit + 1 should be exact multiples of IOMMU page granularity to |
0db2e5d1 RM |
533 | * avoid rounding surprises. If necessary, we reserve the page at address 0 |
534 | * to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but | |
535 | * any change which could make prior IOVAs invalid will fail. | |
536 | */ | |
06d60728 | 537 | static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, |
ac6d7046 | 538 | dma_addr_t limit, struct device *dev) |
0db2e5d1 | 539 | { |
fdbe574e | 540 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
c61a4633 | 541 | unsigned long order, base_pfn; |
6b0c54e7 | 542 | struct iova_domain *iovad; |
32e92d9f | 543 | int ret; |
0db2e5d1 | 544 | |
fdbe574e RM |
545 | if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE) |
546 | return -EINVAL; | |
0db2e5d1 | 547 | |
6b0c54e7 YL |
548 | iovad = &cookie->iovad; |
549 | ||
0db2e5d1 | 550 | /* Use the smallest supported page size for IOVA granularity */ |
d16e0faa | 551 | order = __ffs(domain->pgsize_bitmap); |
0db2e5d1 | 552 | base_pfn = max_t(unsigned long, 1, base >> order); |
0db2e5d1 RM |
553 | |
554 | /* Check the domain allows at least some access to the device... */ | |
555 | if (domain->geometry.force_aperture) { | |
556 | if (base > domain->geometry.aperture_end || | |
ac6d7046 | 557 | limit < domain->geometry.aperture_start) { |
0db2e5d1 RM |
558 | pr_warn("specified DMA range outside IOMMU capability\n"); |
559 | return -EFAULT; | |
560 | } | |
561 | /* ...then finally give it a kicking to make sure it fits */ | |
562 | base_pfn = max_t(unsigned long, base_pfn, | |
563 | domain->geometry.aperture_start >> order); | |
0db2e5d1 RM |
564 | } |
565 | ||
f51d7bb7 | 566 | /* start_pfn is always nonzero for an already-initialised domain */ |
ac9a5d52 | 567 | mutex_lock(&cookie->mutex); |
0db2e5d1 RM |
568 | if (iovad->start_pfn) { |
569 | if (1UL << order != iovad->granule || | |
f51d7bb7 | 570 | base_pfn != iovad->start_pfn) { |
0db2e5d1 | 571 | pr_warn("Incompatible range for DMA domain\n"); |
ac9a5d52 YW |
572 | ret = -EFAULT; |
573 | goto done_unlock; | |
0db2e5d1 | 574 | } |
7c1b058c | 575 | |
ac9a5d52 YW |
576 | ret = 0; |
577 | goto done_unlock; | |
0db2e5d1 | 578 | } |
7c1b058c | 579 | |
aa3ac946 | 580 | init_iova_domain(iovad, 1UL << order, base_pfn); |
32e92d9f JG |
581 | ret = iova_domain_init_rcaches(iovad); |
582 | if (ret) | |
ac9a5d52 | 583 | goto done_unlock; |
2da274cd | 584 | |
c208916f | 585 | /* If the FQ fails we can simply fall back to strict mode */ |
452e69b5 RM |
586 | if (domain->type == IOMMU_DOMAIN_DMA_FQ && iommu_dma_init_fq(domain)) |
587 | domain->type = IOMMU_DOMAIN_DMA; | |
7c1b058c | 588 | |
ac9a5d52 YW |
589 | ret = iova_reserve_iommu_regions(dev, domain); |
590 | ||
591 | done_unlock: | |
592 | mutex_unlock(&cookie->mutex); | |
593 | return ret; | |
0db2e5d1 | 594 | } |
0db2e5d1 RM |
595 | |
596 | /** | |
737c85ca MH |
597 | * dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API |
598 | * page flags. | |
0db2e5d1 RM |
599 | * @dir: Direction of DMA transfer |
600 | * @coherent: Is the DMA master cache-coherent? | |
737c85ca | 601 | * @attrs: DMA attributes for the mapping |
0db2e5d1 RM |
602 | * |
603 | * Return: corresponding IOMMU API page protection flags | |
604 | */ | |
06d60728 | 605 | static int dma_info_to_prot(enum dma_data_direction dir, bool coherent, |
737c85ca | 606 | unsigned long attrs) |
0db2e5d1 RM |
607 | { |
608 | int prot = coherent ? IOMMU_CACHE : 0; | |
609 | ||
737c85ca MH |
610 | if (attrs & DMA_ATTR_PRIVILEGED) |
611 | prot |= IOMMU_PRIV; | |
612 | ||
0db2e5d1 RM |
613 | switch (dir) { |
614 | case DMA_BIDIRECTIONAL: | |
615 | return prot | IOMMU_READ | IOMMU_WRITE; | |
616 | case DMA_TO_DEVICE: | |
617 | return prot | IOMMU_READ; | |
618 | case DMA_FROM_DEVICE: | |
619 | return prot | IOMMU_WRITE; | |
620 | default: | |
621 | return 0; | |
622 | } | |
623 | } | |
624 | ||
842fe519 | 625 | static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain, |
bd036d2f | 626 | size_t size, u64 dma_limit, struct device *dev) |
0db2e5d1 | 627 | { |
a44e6657 RM |
628 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
629 | struct iova_domain *iovad = &cookie->iovad; | |
bb65a64c | 630 | unsigned long shift, iova_len, iova = 0; |
0db2e5d1 | 631 | |
a44e6657 RM |
632 | if (cookie->type == IOMMU_DMA_MSI_COOKIE) { |
633 | cookie->msi_iova += size; | |
634 | return cookie->msi_iova - size; | |
635 | } | |
636 | ||
637 | shift = iova_shift(iovad); | |
638 | iova_len = size >> shift; | |
639 | ||
a7ba70f1 | 640 | dma_limit = min_not_zero(dma_limit, dev->bus_dma_limit); |
03bfdc31 | 641 | |
c987ff0d | 642 | if (domain->geometry.force_aperture) |
bd036d2f | 643 | dma_limit = min(dma_limit, (u64)domain->geometry.aperture_end); |
122fac03 RM |
644 | |
645 | /* Try to get PCI devices a SAC address */ | |
3542dcb1 | 646 | if (dma_limit > DMA_BIT_MASK(32) && !iommu_dma_forcedac && dev_is_pci(dev)) |
538d5b33 TN |
647 | iova = alloc_iova_fast(iovad, iova_len, |
648 | DMA_BIT_MASK(32) >> shift, false); | |
bb65a64c | 649 | |
122fac03 | 650 | if (!iova) |
538d5b33 TN |
651 | iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift, |
652 | true); | |
122fac03 | 653 | |
bb65a64c | 654 | return (dma_addr_t)iova << shift; |
0db2e5d1 RM |
655 | } |
656 | ||
842fe519 | 657 | static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie, |
452e69b5 | 658 | dma_addr_t iova, size_t size, struct iommu_iotlb_gather *gather) |
0db2e5d1 | 659 | { |
842fe519 | 660 | struct iova_domain *iovad = &cookie->iovad; |
0db2e5d1 | 661 | |
a44e6657 | 662 | /* The MSI case is only ever cleaning up its most recent allocation */ |
bb65a64c | 663 | if (cookie->type == IOMMU_DMA_MSI_COOKIE) |
a44e6657 | 664 | cookie->msi_iova -= size; |
452e69b5 | 665 | else if (gather && gather->queued) |
a17e3026 | 666 | queue_iova(cookie, iova_pfn(iovad, iova), |
2a2b8eaa | 667 | size >> iova_shift(iovad), |
87f60cc6 | 668 | &gather->freelist); |
bb65a64c | 669 | else |
1cc896ed RM |
670 | free_iova_fast(iovad, iova_pfn(iovad, iova), |
671 | size >> iova_shift(iovad)); | |
842fe519 RM |
672 | } |
673 | ||
b61d271e | 674 | static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr, |
842fe519 RM |
675 | size_t size) |
676 | { | |
b61d271e | 677 | struct iommu_domain *domain = iommu_get_dma_domain(dev); |
a44e6657 RM |
678 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
679 | struct iova_domain *iovad = &cookie->iovad; | |
842fe519 | 680 | size_t iova_off = iova_offset(iovad, dma_addr); |
a7d20dc1 WD |
681 | struct iommu_iotlb_gather iotlb_gather; |
682 | size_t unmapped; | |
842fe519 RM |
683 | |
684 | dma_addr -= iova_off; | |
685 | size = iova_align(iovad, size + iova_off); | |
a7d20dc1 | 686 | iommu_iotlb_gather_init(&iotlb_gather); |
452e69b5 | 687 | iotlb_gather.queued = READ_ONCE(cookie->fq_domain); |
a7d20dc1 WD |
688 | |
689 | unmapped = iommu_unmap_fast(domain, dma_addr, size, &iotlb_gather); | |
690 | WARN_ON(unmapped != size); | |
842fe519 | 691 | |
452e69b5 | 692 | if (!iotlb_gather.queued) |
aae4c8e2 | 693 | iommu_iotlb_sync(domain, &iotlb_gather); |
452e69b5 | 694 | iommu_dma_free_iova(cookie, dma_addr, size, &iotlb_gather); |
0db2e5d1 RM |
695 | } |
696 | ||
92aec09c | 697 | static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys, |
bd036d2f | 698 | size_t size, int prot, u64 dma_mask) |
92aec09c | 699 | { |
b61d271e | 700 | struct iommu_domain *domain = iommu_get_dma_domain(dev); |
92aec09c | 701 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
8af23fad RM |
702 | struct iova_domain *iovad = &cookie->iovad; |
703 | size_t iova_off = iova_offset(iovad, phys); | |
92aec09c CH |
704 | dma_addr_t iova; |
705 | ||
a8e8af35 | 706 | if (static_branch_unlikely(&iommu_deferred_attach_enabled) && |
3ab65729 | 707 | iommu_deferred_attach(dev, domain)) |
795bbbb9 TM |
708 | return DMA_MAPPING_ERROR; |
709 | ||
8af23fad | 710 | size = iova_align(iovad, size + iova_off); |
92aec09c | 711 | |
6e235020 | 712 | iova = iommu_dma_alloc_iova(domain, size, dma_mask, dev); |
92aec09c CH |
713 | if (!iova) |
714 | return DMA_MAPPING_ERROR; | |
715 | ||
781ca2de | 716 | if (iommu_map_atomic(domain, iova, phys - iova_off, size, prot)) { |
2a2b8eaa | 717 | iommu_dma_free_iova(cookie, iova, size, NULL); |
92aec09c CH |
718 | return DMA_MAPPING_ERROR; |
719 | } | |
720 | return iova + iova_off; | |
721 | } | |
722 | ||
0db2e5d1 RM |
723 | static void __iommu_dma_free_pages(struct page **pages, int count) |
724 | { | |
725 | while (count--) | |
726 | __free_page(pages[count]); | |
727 | kvfree(pages); | |
728 | } | |
729 | ||
c4b17afb GK |
730 | static struct page **__iommu_dma_alloc_pages(struct device *dev, |
731 | unsigned int count, unsigned long order_mask, gfp_t gfp) | |
0db2e5d1 RM |
732 | { |
733 | struct page **pages; | |
c4b17afb | 734 | unsigned int i = 0, nid = dev_to_node(dev); |
3b6b7e19 RM |
735 | |
736 | order_mask &= (2U << MAX_ORDER) - 1; | |
737 | if (!order_mask) | |
738 | return NULL; | |
0db2e5d1 | 739 | |
ab6f4b00 | 740 | pages = kvcalloc(count, sizeof(*pages), GFP_KERNEL); |
0db2e5d1 RM |
741 | if (!pages) |
742 | return NULL; | |
743 | ||
744 | /* IOMMU can map any pages, so himem can also be used here */ | |
745 | gfp |= __GFP_NOWARN | __GFP_HIGHMEM; | |
746 | ||
747 | while (count) { | |
748 | struct page *page = NULL; | |
3b6b7e19 | 749 | unsigned int order_size; |
0db2e5d1 RM |
750 | |
751 | /* | |
752 | * Higher-order allocations are a convenience rather | |
753 | * than a necessity, hence using __GFP_NORETRY until | |
3b6b7e19 | 754 | * falling back to minimum-order allocations. |
0db2e5d1 | 755 | */ |
3b6b7e19 RM |
756 | for (order_mask &= (2U << __fls(count)) - 1; |
757 | order_mask; order_mask &= ~order_size) { | |
758 | unsigned int order = __fls(order_mask); | |
c4b17afb | 759 | gfp_t alloc_flags = gfp; |
3b6b7e19 RM |
760 | |
761 | order_size = 1U << order; | |
c4b17afb GK |
762 | if (order_mask > order_size) |
763 | alloc_flags |= __GFP_NORETRY; | |
764 | page = alloc_pages_node(nid, alloc_flags, order); | |
0db2e5d1 RM |
765 | if (!page) |
766 | continue; | |
4604393c | 767 | if (order) |
0db2e5d1 | 768 | split_page(page, order); |
4604393c | 769 | break; |
0db2e5d1 | 770 | } |
0db2e5d1 RM |
771 | if (!page) { |
772 | __iommu_dma_free_pages(pages, i); | |
773 | return NULL; | |
774 | } | |
3b6b7e19 RM |
775 | count -= order_size; |
776 | while (order_size--) | |
0db2e5d1 RM |
777 | pages[i++] = page++; |
778 | } | |
779 | return pages; | |
780 | } | |
781 | ||
8230ce9a CH |
782 | /* |
783 | * If size is less than PAGE_SIZE, then a full CPU page will be allocated, | |
0db2e5d1 | 784 | * but an IOMMU which supports smaller pages might not map the whole thing. |
0db2e5d1 | 785 | */ |
8230ce9a CH |
786 | static struct page **__iommu_dma_alloc_noncontiguous(struct device *dev, |
787 | size_t size, struct sg_table *sgt, gfp_t gfp, pgprot_t prot, | |
e8d39a90 | 788 | unsigned long attrs) |
0db2e5d1 | 789 | { |
43c5bf11 | 790 | struct iommu_domain *domain = iommu_get_dma_domain(dev); |
842fe519 RM |
791 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
792 | struct iova_domain *iovad = &cookie->iovad; | |
21b95aaf CH |
793 | bool coherent = dev_is_dma_coherent(dev); |
794 | int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs); | |
21b95aaf | 795 | unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap; |
0db2e5d1 | 796 | struct page **pages; |
842fe519 | 797 | dma_addr_t iova; |
a3884774 | 798 | ssize_t ret; |
0db2e5d1 | 799 | |
a8e8af35 | 800 | if (static_branch_unlikely(&iommu_deferred_attach_enabled) && |
3ab65729 | 801 | iommu_deferred_attach(dev, domain)) |
795bbbb9 TM |
802 | return NULL; |
803 | ||
3b6b7e19 RM |
804 | min_size = alloc_sizes & -alloc_sizes; |
805 | if (min_size < PAGE_SIZE) { | |
806 | min_size = PAGE_SIZE; | |
807 | alloc_sizes |= PAGE_SIZE; | |
808 | } else { | |
809 | size = ALIGN(size, min_size); | |
810 | } | |
00085f1e | 811 | if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES) |
3b6b7e19 RM |
812 | alloc_sizes = min_size; |
813 | ||
814 | count = PAGE_ALIGN(size) >> PAGE_SHIFT; | |
c4b17afb GK |
815 | pages = __iommu_dma_alloc_pages(dev, count, alloc_sizes >> PAGE_SHIFT, |
816 | gfp); | |
0db2e5d1 RM |
817 | if (!pages) |
818 | return NULL; | |
819 | ||
842fe519 RM |
820 | size = iova_align(iovad, size); |
821 | iova = iommu_dma_alloc_iova(domain, size, dev->coherent_dma_mask, dev); | |
0db2e5d1 RM |
822 | if (!iova) |
823 | goto out_free_pages; | |
824 | ||
8230ce9a | 825 | if (sg_alloc_table_from_pages(sgt, pages, count, 0, size, GFP_KERNEL)) |
0db2e5d1 RM |
826 | goto out_free_iova; |
827 | ||
21b95aaf | 828 | if (!(ioprot & IOMMU_CACHE)) { |
23f88e0a CH |
829 | struct scatterlist *sg; |
830 | int i; | |
831 | ||
8230ce9a | 832 | for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) |
23f88e0a | 833 | arch_dma_prep_coherent(sg_page(sg), sg->length); |
0db2e5d1 RM |
834 | } |
835 | ||
a3884774 YW |
836 | ret = iommu_map_sg_atomic(domain, iova, sgt->sgl, sgt->orig_nents, ioprot); |
837 | if (ret < 0 || ret < size) | |
0db2e5d1 RM |
838 | goto out_free_sg; |
839 | ||
8230ce9a | 840 | sgt->sgl->dma_address = iova; |
e817ee5f | 841 | sgt->sgl->dma_length = size; |
8230ce9a CH |
842 | return pages; |
843 | ||
844 | out_free_sg: | |
845 | sg_free_table(sgt); | |
846 | out_free_iova: | |
847 | iommu_dma_free_iova(cookie, iova, size, NULL); | |
848 | out_free_pages: | |
849 | __iommu_dma_free_pages(pages, count); | |
850 | return NULL; | |
851 | } | |
852 | ||
853 | static void *iommu_dma_alloc_remap(struct device *dev, size_t size, | |
854 | dma_addr_t *dma_handle, gfp_t gfp, pgprot_t prot, | |
855 | unsigned long attrs) | |
856 | { | |
857 | struct page **pages; | |
858 | struct sg_table sgt; | |
859 | void *vaddr; | |
860 | ||
861 | pages = __iommu_dma_alloc_noncontiguous(dev, size, &sgt, gfp, prot, | |
862 | attrs); | |
863 | if (!pages) | |
864 | return NULL; | |
865 | *dma_handle = sgt.sgl->dma_address; | |
866 | sg_free_table(&sgt); | |
51231740 | 867 | vaddr = dma_common_pages_remap(pages, size, prot, |
21b95aaf CH |
868 | __builtin_return_address(0)); |
869 | if (!vaddr) | |
870 | goto out_unmap; | |
21b95aaf | 871 | return vaddr; |
0db2e5d1 | 872 | |
21b95aaf | 873 | out_unmap: |
8230ce9a CH |
874 | __iommu_dma_unmap(dev, *dma_handle, size); |
875 | __iommu_dma_free_pages(pages, PAGE_ALIGN(size) >> PAGE_SHIFT); | |
0db2e5d1 RM |
876 | return NULL; |
877 | } | |
878 | ||
e817ee5f CH |
879 | static struct sg_table *iommu_dma_alloc_noncontiguous(struct device *dev, |
880 | size_t size, enum dma_data_direction dir, gfp_t gfp, | |
881 | unsigned long attrs) | |
882 | { | |
883 | struct dma_sgt_handle *sh; | |
884 | ||
885 | sh = kmalloc(sizeof(*sh), gfp); | |
886 | if (!sh) | |
887 | return NULL; | |
888 | ||
889 | sh->pages = __iommu_dma_alloc_noncontiguous(dev, size, &sh->sgt, gfp, | |
890 | PAGE_KERNEL, attrs); | |
891 | if (!sh->pages) { | |
892 | kfree(sh); | |
893 | return NULL; | |
894 | } | |
895 | return &sh->sgt; | |
896 | } | |
897 | ||
898 | static void iommu_dma_free_noncontiguous(struct device *dev, size_t size, | |
899 | struct sg_table *sgt, enum dma_data_direction dir) | |
900 | { | |
901 | struct dma_sgt_handle *sh = sgt_handle(sgt); | |
902 | ||
903 | __iommu_dma_unmap(dev, sgt->sgl->dma_address, size); | |
904 | __iommu_dma_free_pages(sh->pages, PAGE_ALIGN(size) >> PAGE_SHIFT); | |
905 | sg_free_table(&sh->sgt); | |
0fbea680 | 906 | kfree(sh); |
e817ee5f | 907 | } |
e817ee5f | 908 | |
06d60728 CH |
909 | static void iommu_dma_sync_single_for_cpu(struct device *dev, |
910 | dma_addr_t dma_handle, size_t size, enum dma_data_direction dir) | |
0db2e5d1 | 911 | { |
06d60728 | 912 | phys_addr_t phys; |
0db2e5d1 | 913 | |
2e727bff | 914 | if (dev_is_dma_coherent(dev) && !dev_use_swiotlb(dev)) |
06d60728 | 915 | return; |
1cc896ed | 916 | |
06d60728 | 917 | phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle); |
82612d66 TM |
918 | if (!dev_is_dma_coherent(dev)) |
919 | arch_sync_dma_for_cpu(phys, size, dir); | |
920 | ||
7fd856aa | 921 | if (is_swiotlb_buffer(dev, phys)) |
80808d27 | 922 | swiotlb_sync_single_for_cpu(dev, phys, size, dir); |
0db2e5d1 | 923 | } |
0db2e5d1 | 924 | |
06d60728 CH |
925 | static void iommu_dma_sync_single_for_device(struct device *dev, |
926 | dma_addr_t dma_handle, size_t size, enum dma_data_direction dir) | |
0db2e5d1 | 927 | { |
06d60728 | 928 | phys_addr_t phys; |
0db2e5d1 | 929 | |
2e727bff | 930 | if (dev_is_dma_coherent(dev) && !dev_use_swiotlb(dev)) |
06d60728 | 931 | return; |
1cc896ed | 932 | |
06d60728 | 933 | phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle); |
7fd856aa | 934 | if (is_swiotlb_buffer(dev, phys)) |
80808d27 | 935 | swiotlb_sync_single_for_device(dev, phys, size, dir); |
82612d66 TM |
936 | |
937 | if (!dev_is_dma_coherent(dev)) | |
938 | arch_sync_dma_for_device(phys, size, dir); | |
06d60728 | 939 | } |
0db2e5d1 | 940 | |
06d60728 CH |
941 | static void iommu_dma_sync_sg_for_cpu(struct device *dev, |
942 | struct scatterlist *sgl, int nelems, | |
943 | enum dma_data_direction dir) | |
944 | { | |
945 | struct scatterlist *sg; | |
946 | int i; | |
947 | ||
2e727bff | 948 | if (dev_use_swiotlb(dev)) |
08ae5d4a DS |
949 | for_each_sg(sgl, sg, nelems, i) |
950 | iommu_dma_sync_single_for_cpu(dev, sg_dma_address(sg), | |
951 | sg->length, dir); | |
952 | else if (!dev_is_dma_coherent(dev)) | |
953 | for_each_sg(sgl, sg, nelems, i) | |
82612d66 | 954 | arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir); |
06d60728 CH |
955 | } |
956 | ||
957 | static void iommu_dma_sync_sg_for_device(struct device *dev, | |
958 | struct scatterlist *sgl, int nelems, | |
959 | enum dma_data_direction dir) | |
960 | { | |
961 | struct scatterlist *sg; | |
962 | int i; | |
963 | ||
2e727bff | 964 | if (dev_use_swiotlb(dev)) |
08ae5d4a DS |
965 | for_each_sg(sgl, sg, nelems, i) |
966 | iommu_dma_sync_single_for_device(dev, | |
967 | sg_dma_address(sg), | |
968 | sg->length, dir); | |
969 | else if (!dev_is_dma_coherent(dev)) | |
970 | for_each_sg(sgl, sg, nelems, i) | |
82612d66 | 971 | arch_sync_dma_for_device(sg_phys(sg), sg->length, dir); |
0db2e5d1 RM |
972 | } |
973 | ||
06d60728 CH |
974 | static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page, |
975 | unsigned long offset, size_t size, enum dma_data_direction dir, | |
976 | unsigned long attrs) | |
51f8cc9e | 977 | { |
06d60728 CH |
978 | phys_addr_t phys = page_to_phys(page) + offset; |
979 | bool coherent = dev_is_dma_coherent(dev); | |
9b49bbc2 DS |
980 | int prot = dma_info_to_prot(dir, coherent, attrs); |
981 | struct iommu_domain *domain = iommu_get_dma_domain(dev); | |
982 | struct iommu_dma_cookie *cookie = domain->iova_cookie; | |
983 | struct iova_domain *iovad = &cookie->iovad; | |
9b49bbc2 DS |
984 | dma_addr_t iova, dma_mask = dma_get_mask(dev); |
985 | ||
986 | /* | |
987 | * If both the physical buffer start address and size are | |
988 | * page aligned, we don't need to use a bounce page. | |
989 | */ | |
2e727bff | 990 | if (dev_use_swiotlb(dev) && iova_offset(iovad, phys | size)) { |
9b49bbc2 | 991 | void *padding_start; |
2cbc61a1 | 992 | size_t padding_size, aligned_size; |
9b49bbc2 | 993 | |
f316ba0a ML |
994 | if (!is_swiotlb_active(dev)) { |
995 | dev_warn_once(dev, "DMA bounce buffers are inactive, unable to map unaligned transaction.\n"); | |
996 | return DMA_MAPPING_ERROR; | |
997 | } | |
998 | ||
9b49bbc2 | 999 | aligned_size = iova_align(iovad, size); |
e81e99ba DS |
1000 | phys = swiotlb_tbl_map_single(dev, phys, size, aligned_size, |
1001 | iova_mask(iovad), dir, attrs); | |
9b49bbc2 DS |
1002 | |
1003 | if (phys == DMA_MAPPING_ERROR) | |
1004 | return DMA_MAPPING_ERROR; | |
06d60728 | 1005 | |
9b49bbc2 DS |
1006 | /* Cleanup the padding area. */ |
1007 | padding_start = phys_to_virt(phys); | |
1008 | padding_size = aligned_size; | |
1009 | ||
1010 | if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && | |
1011 | (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)) { | |
1012 | padding_start += size; | |
1013 | padding_size -= size; | |
1014 | } | |
1015 | ||
1016 | memset(padding_start, 0, padding_size); | |
1017 | } | |
06d60728 | 1018 | |
9b49bbc2 | 1019 | if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) |
56e35f9c | 1020 | arch_sync_dma_for_device(phys, size, dir); |
9b49bbc2 | 1021 | |
2cbc61a1 | 1022 | iova = __iommu_dma_map(dev, phys, size, prot, dma_mask); |
9b49bbc2 DS |
1023 | if (iova == DMA_MAPPING_ERROR && is_swiotlb_buffer(dev, phys)) |
1024 | swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs); | |
1025 | return iova; | |
51f8cc9e RM |
1026 | } |
1027 | ||
06d60728 CH |
1028 | static void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle, |
1029 | size_t size, enum dma_data_direction dir, unsigned long attrs) | |
0db2e5d1 | 1030 | { |
9b49bbc2 DS |
1031 | struct iommu_domain *domain = iommu_get_dma_domain(dev); |
1032 | phys_addr_t phys; | |
1033 | ||
1034 | phys = iommu_iova_to_phys(domain, dma_handle); | |
1035 | if (WARN_ON(!phys)) | |
1036 | return; | |
1037 | ||
1038 | if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && !dev_is_dma_coherent(dev)) | |
1039 | arch_sync_dma_for_cpu(phys, size, dir); | |
1040 | ||
1041 | __iommu_dma_unmap(dev, dma_handle, size); | |
1042 | ||
1043 | if (unlikely(is_swiotlb_buffer(dev, phys))) | |
1044 | swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs); | |
0db2e5d1 RM |
1045 | } |
1046 | ||
1047 | /* | |
1048 | * Prepare a successfully-mapped scatterlist to give back to the caller. | |
809eac54 RM |
1049 | * |
1050 | * At this point the segments are already laid out by iommu_dma_map_sg() to | |
1051 | * avoid individually crossing any boundaries, so we merely need to check a | |
1052 | * segment's start address to avoid concatenating across one. | |
0db2e5d1 RM |
1053 | */ |
1054 | static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents, | |
1055 | dma_addr_t dma_addr) | |
1056 | { | |
809eac54 RM |
1057 | struct scatterlist *s, *cur = sg; |
1058 | unsigned long seg_mask = dma_get_seg_boundary(dev); | |
1059 | unsigned int cur_len = 0, max_len = dma_get_max_seg_size(dev); | |
1060 | int i, count = 0; | |
0db2e5d1 RM |
1061 | |
1062 | for_each_sg(sg, s, nents, i) { | |
809eac54 | 1063 | /* Restore this segment's original unaligned fields first */ |
30280eee | 1064 | dma_addr_t s_dma_addr = sg_dma_address(s); |
809eac54 | 1065 | unsigned int s_iova_off = sg_dma_address(s); |
0db2e5d1 | 1066 | unsigned int s_length = sg_dma_len(s); |
809eac54 | 1067 | unsigned int s_iova_len = s->length; |
0db2e5d1 | 1068 | |
cad34be7 | 1069 | sg_dma_address(s) = DMA_MAPPING_ERROR; |
809eac54 RM |
1070 | sg_dma_len(s) = 0; |
1071 | ||
30280eee LG |
1072 | if (sg_is_dma_bus_address(s)) { |
1073 | if (i > 0) | |
1074 | cur = sg_next(cur); | |
1075 | ||
1076 | sg_dma_unmark_bus_address(s); | |
1077 | sg_dma_address(cur) = s_dma_addr; | |
1078 | sg_dma_len(cur) = s_length; | |
1079 | sg_dma_mark_bus_address(cur); | |
1080 | count++; | |
1081 | cur_len = 0; | |
1082 | continue; | |
1083 | } | |
1084 | ||
1085 | s->offset += s_iova_off; | |
1086 | s->length = s_length; | |
1087 | ||
809eac54 RM |
1088 | /* |
1089 | * Now fill in the real DMA data. If... | |
1090 | * - there is a valid output segment to append to | |
1091 | * - and this segment starts on an IOVA page boundary | |
1092 | * - but doesn't fall at a segment boundary | |
1093 | * - and wouldn't make the resulting output segment too long | |
1094 | */ | |
1095 | if (cur_len && !s_iova_off && (dma_addr & seg_mask) && | |
ab2cbeb0 | 1096 | (max_len - cur_len >= s_length)) { |
809eac54 RM |
1097 | /* ...then concatenate it with the previous one */ |
1098 | cur_len += s_length; | |
1099 | } else { | |
1100 | /* Otherwise start the next output segment */ | |
1101 | if (i > 0) | |
1102 | cur = sg_next(cur); | |
1103 | cur_len = s_length; | |
1104 | count++; | |
1105 | ||
1106 | sg_dma_address(cur) = dma_addr + s_iova_off; | |
1107 | } | |
1108 | ||
1109 | sg_dma_len(cur) = cur_len; | |
1110 | dma_addr += s_iova_len; | |
1111 | ||
1112 | if (s_length + s_iova_off < s_iova_len) | |
1113 | cur_len = 0; | |
0db2e5d1 | 1114 | } |
809eac54 | 1115 | return count; |
0db2e5d1 RM |
1116 | } |
1117 | ||
1118 | /* | |
1119 | * If mapping failed, then just restore the original list, | |
1120 | * but making sure the DMA fields are invalidated. | |
1121 | */ | |
1122 | static void __invalidate_sg(struct scatterlist *sg, int nents) | |
1123 | { | |
1124 | struct scatterlist *s; | |
1125 | int i; | |
1126 | ||
1127 | for_each_sg(sg, s, nents, i) { | |
30280eee LG |
1128 | if (sg_is_dma_bus_address(s)) { |
1129 | sg_dma_unmark_bus_address(s); | |
1130 | } else { | |
1131 | if (sg_dma_address(s) != DMA_MAPPING_ERROR) | |
1132 | s->offset += sg_dma_address(s); | |
1133 | if (sg_dma_len(s)) | |
1134 | s->length = sg_dma_len(s); | |
1135 | } | |
cad34be7 | 1136 | sg_dma_address(s) = DMA_MAPPING_ERROR; |
0db2e5d1 RM |
1137 | sg_dma_len(s) = 0; |
1138 | } | |
1139 | } | |
1140 | ||
82612d66 TM |
1141 | static void iommu_dma_unmap_sg_swiotlb(struct device *dev, struct scatterlist *sg, |
1142 | int nents, enum dma_data_direction dir, unsigned long attrs) | |
1143 | { | |
1144 | struct scatterlist *s; | |
1145 | int i; | |
1146 | ||
1147 | for_each_sg(sg, s, nents, i) | |
9b49bbc2 | 1148 | iommu_dma_unmap_page(dev, sg_dma_address(s), |
82612d66 TM |
1149 | sg_dma_len(s), dir, attrs); |
1150 | } | |
1151 | ||
1152 | static int iommu_dma_map_sg_swiotlb(struct device *dev, struct scatterlist *sg, | |
1153 | int nents, enum dma_data_direction dir, unsigned long attrs) | |
1154 | { | |
1155 | struct scatterlist *s; | |
1156 | int i; | |
1157 | ||
1158 | for_each_sg(sg, s, nents, i) { | |
9b49bbc2 DS |
1159 | sg_dma_address(s) = iommu_dma_map_page(dev, sg_page(s), |
1160 | s->offset, s->length, dir, attrs); | |
82612d66 TM |
1161 | if (sg_dma_address(s) == DMA_MAPPING_ERROR) |
1162 | goto out_unmap; | |
1163 | sg_dma_len(s) = s->length; | |
1164 | } | |
1165 | ||
1166 | return nents; | |
1167 | ||
1168 | out_unmap: | |
1169 | iommu_dma_unmap_sg_swiotlb(dev, sg, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC); | |
dabb16f6 | 1170 | return -EIO; |
82612d66 TM |
1171 | } |
1172 | ||
0db2e5d1 RM |
1173 | /* |
1174 | * The DMA API client is passing in a scatterlist which could describe | |
1175 | * any old buffer layout, but the IOMMU API requires everything to be | |
1176 | * aligned to IOMMU pages. Hence the need for this complicated bit of | |
1177 | * impedance-matching, to be able to hand off a suitably-aligned list, | |
1178 | * but still preserve the original offsets and sizes for the caller. | |
1179 | */ | |
06d60728 CH |
1180 | static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, |
1181 | int nents, enum dma_data_direction dir, unsigned long attrs) | |
0db2e5d1 | 1182 | { |
43c5bf11 | 1183 | struct iommu_domain *domain = iommu_get_dma_domain(dev); |
842fe519 RM |
1184 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
1185 | struct iova_domain *iovad = &cookie->iovad; | |
0db2e5d1 | 1186 | struct scatterlist *s, *prev = NULL; |
06d60728 | 1187 | int prot = dma_info_to_prot(dir, dev_is_dma_coherent(dev), attrs); |
30280eee LG |
1188 | struct pci_p2pdma_map_state p2pdma_state = {}; |
1189 | enum pci_p2pdma_map_type map; | |
842fe519 | 1190 | dma_addr_t iova; |
0db2e5d1 | 1191 | size_t iova_len = 0; |
809eac54 | 1192 | unsigned long mask = dma_get_seg_boundary(dev); |
dabb16f6 | 1193 | ssize_t ret; |
0db2e5d1 RM |
1194 | int i; |
1195 | ||
dabb16f6 LG |
1196 | if (static_branch_unlikely(&iommu_deferred_attach_enabled)) { |
1197 | ret = iommu_deferred_attach(dev, domain); | |
ac315f96 LG |
1198 | if (ret) |
1199 | goto out; | |
dabb16f6 | 1200 | } |
795bbbb9 | 1201 | |
2e727bff | 1202 | if (dev_use_swiotlb(dev)) |
82612d66 TM |
1203 | return iommu_dma_map_sg_swiotlb(dev, sg, nents, dir, attrs); |
1204 | ||
06d60728 CH |
1205 | if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) |
1206 | iommu_dma_sync_sg_for_device(dev, sg, nents, dir); | |
1207 | ||
0db2e5d1 RM |
1208 | /* |
1209 | * Work out how much IOVA space we need, and align the segments to | |
1210 | * IOVA granules for the IOMMU driver to handle. With some clever | |
1211 | * trickery we can modify the list in-place, but reversibly, by | |
809eac54 | 1212 | * stashing the unaligned parts in the as-yet-unused DMA fields. |
0db2e5d1 RM |
1213 | */ |
1214 | for_each_sg(sg, s, nents, i) { | |
809eac54 | 1215 | size_t s_iova_off = iova_offset(iovad, s->offset); |
0db2e5d1 | 1216 | size_t s_length = s->length; |
809eac54 | 1217 | size_t pad_len = (mask - iova_len + 1) & mask; |
0db2e5d1 | 1218 | |
30280eee LG |
1219 | if (is_pci_p2pdma_page(sg_page(s))) { |
1220 | map = pci_p2pdma_map_segment(&p2pdma_state, dev, s); | |
1221 | switch (map) { | |
1222 | case PCI_P2PDMA_MAP_BUS_ADDR: | |
1223 | /* | |
1224 | * iommu_map_sg() will skip this segment as | |
1225 | * it is marked as a bus address, | |
1226 | * __finalise_sg() will copy the dma address | |
1227 | * into the output segment. | |
1228 | */ | |
1229 | continue; | |
1230 | case PCI_P2PDMA_MAP_THRU_HOST_BRIDGE: | |
1231 | /* | |
1232 | * Mapping through host bridge should be | |
1233 | * mapped with regular IOVAs, thus we | |
1234 | * do nothing here and continue below. | |
1235 | */ | |
1236 | break; | |
1237 | default: | |
1238 | ret = -EREMOTEIO; | |
1239 | goto out_restore_sg; | |
1240 | } | |
1241 | } | |
1242 | ||
809eac54 | 1243 | sg_dma_address(s) = s_iova_off; |
0db2e5d1 | 1244 | sg_dma_len(s) = s_length; |
809eac54 RM |
1245 | s->offset -= s_iova_off; |
1246 | s_length = iova_align(iovad, s_length + s_iova_off); | |
0db2e5d1 RM |
1247 | s->length = s_length; |
1248 | ||
1249 | /* | |
809eac54 RM |
1250 | * Due to the alignment of our single IOVA allocation, we can |
1251 | * depend on these assumptions about the segment boundary mask: | |
1252 | * - If mask size >= IOVA size, then the IOVA range cannot | |
1253 | * possibly fall across a boundary, so we don't care. | |
1254 | * - If mask size < IOVA size, then the IOVA range must start | |
1255 | * exactly on a boundary, therefore we can lay things out | |
1256 | * based purely on segment lengths without needing to know | |
1257 | * the actual addresses beforehand. | |
1258 | * - The mask must be a power of 2, so pad_len == 0 if | |
1259 | * iova_len == 0, thus we cannot dereference prev the first | |
1260 | * time through here (i.e. before it has a meaningful value). | |
0db2e5d1 | 1261 | */ |
809eac54 | 1262 | if (pad_len && pad_len < s_length - 1) { |
0db2e5d1 RM |
1263 | prev->length += pad_len; |
1264 | iova_len += pad_len; | |
1265 | } | |
1266 | ||
1267 | iova_len += s_length; | |
1268 | prev = s; | |
1269 | } | |
1270 | ||
30280eee LG |
1271 | if (!iova_len) |
1272 | return __finalise_sg(dev, sg, nents, 0); | |
1273 | ||
842fe519 | 1274 | iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev); |
dabb16f6 LG |
1275 | if (!iova) { |
1276 | ret = -ENOMEM; | |
0db2e5d1 | 1277 | goto out_restore_sg; |
dabb16f6 | 1278 | } |
0db2e5d1 RM |
1279 | |
1280 | /* | |
1281 | * We'll leave any physical concatenation to the IOMMU driver's | |
1282 | * implementation - it knows better than we do. | |
1283 | */ | |
dabb16f6 | 1284 | ret = iommu_map_sg_atomic(domain, iova, sg, nents, prot); |
a3884774 | 1285 | if (ret < 0 || ret < iova_len) |
0db2e5d1 RM |
1286 | goto out_free_iova; |
1287 | ||
842fe519 | 1288 | return __finalise_sg(dev, sg, nents, iova); |
0db2e5d1 RM |
1289 | |
1290 | out_free_iova: | |
2a2b8eaa | 1291 | iommu_dma_free_iova(cookie, iova, iova_len, NULL); |
0db2e5d1 RM |
1292 | out_restore_sg: |
1293 | __invalidate_sg(sg, nents); | |
dabb16f6 | 1294 | out: |
30280eee | 1295 | if (ret != -ENOMEM && ret != -EREMOTEIO) |
dabb16f6 LG |
1296 | return -EINVAL; |
1297 | return ret; | |
0db2e5d1 RM |
1298 | } |
1299 | ||
06d60728 CH |
1300 | static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, |
1301 | int nents, enum dma_data_direction dir, unsigned long attrs) | |
0db2e5d1 | 1302 | { |
30280eee | 1303 | dma_addr_t end = 0, start; |
842fe519 RM |
1304 | struct scatterlist *tmp; |
1305 | int i; | |
06d60728 | 1306 | |
2e727bff | 1307 | if (dev_use_swiotlb(dev)) { |
82612d66 TM |
1308 | iommu_dma_unmap_sg_swiotlb(dev, sg, nents, dir, attrs); |
1309 | return; | |
1310 | } | |
1311 | ||
ee9d4097 DS |
1312 | if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) |
1313 | iommu_dma_sync_sg_for_cpu(dev, sg, nents, dir); | |
1314 | ||
0db2e5d1 RM |
1315 | /* |
1316 | * The scatterlist segments are mapped into a single | |
30280eee LG |
1317 | * contiguous IOVA allocation, the start and end points |
1318 | * just have to be determined. | |
0db2e5d1 | 1319 | */ |
30280eee LG |
1320 | for_each_sg(sg, tmp, nents, i) { |
1321 | if (sg_is_dma_bus_address(tmp)) { | |
1322 | sg_dma_unmark_bus_address(tmp); | |
1323 | continue; | |
1324 | } | |
1325 | ||
842fe519 RM |
1326 | if (sg_dma_len(tmp) == 0) |
1327 | break; | |
30280eee LG |
1328 | |
1329 | start = sg_dma_address(tmp); | |
1330 | break; | |
842fe519 | 1331 | } |
30280eee LG |
1332 | |
1333 | nents -= i; | |
1334 | for_each_sg(tmp, tmp, nents, i) { | |
1335 | if (sg_is_dma_bus_address(tmp)) { | |
1336 | sg_dma_unmark_bus_address(tmp); | |
1337 | continue; | |
1338 | } | |
1339 | ||
842fe519 RM |
1340 | if (sg_dma_len(tmp) == 0) |
1341 | break; | |
30280eee LG |
1342 | |
1343 | end = sg_dma_address(tmp) + sg_dma_len(tmp); | |
842fe519 | 1344 | } |
30280eee LG |
1345 | |
1346 | if (end) | |
1347 | __iommu_dma_unmap(dev, start, end - start); | |
0db2e5d1 RM |
1348 | } |
1349 | ||
06d60728 | 1350 | static dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys, |
51f8cc9e RM |
1351 | size_t size, enum dma_data_direction dir, unsigned long attrs) |
1352 | { | |
1353 | return __iommu_dma_map(dev, phys, size, | |
6e235020 TM |
1354 | dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO, |
1355 | dma_get_mask(dev)); | |
51f8cc9e RM |
1356 | } |
1357 | ||
06d60728 | 1358 | static void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle, |
51f8cc9e RM |
1359 | size_t size, enum dma_data_direction dir, unsigned long attrs) |
1360 | { | |
b61d271e | 1361 | __iommu_dma_unmap(dev, handle, size); |
51f8cc9e RM |
1362 | } |
1363 | ||
8553f6e6 | 1364 | static void __iommu_dma_free(struct device *dev, size_t size, void *cpu_addr) |
bcf4b9c4 RM |
1365 | { |
1366 | size_t alloc_size = PAGE_ALIGN(size); | |
1367 | int count = alloc_size >> PAGE_SHIFT; | |
1368 | struct page *page = NULL, **pages = NULL; | |
1369 | ||
bcf4b9c4 | 1370 | /* Non-coherent atomic allocation? Easy */ |
e6475eb0 | 1371 | if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && |
c84dc6e6 | 1372 | dma_free_from_pool(dev, cpu_addr, alloc_size)) |
bcf4b9c4 RM |
1373 | return; |
1374 | ||
f5ff79fd | 1375 | if (is_vmalloc_addr(cpu_addr)) { |
bcf4b9c4 RM |
1376 | /* |
1377 | * If it the address is remapped, then it's either non-coherent | |
1378 | * or highmem CMA, or an iommu_dma_alloc_remap() construction. | |
1379 | */ | |
5cf45379 | 1380 | pages = dma_common_find_pages(cpu_addr); |
bcf4b9c4 RM |
1381 | if (!pages) |
1382 | page = vmalloc_to_page(cpu_addr); | |
51231740 | 1383 | dma_common_free_remap(cpu_addr, alloc_size); |
bcf4b9c4 RM |
1384 | } else { |
1385 | /* Lowmem means a coherent atomic or CMA allocation */ | |
1386 | page = virt_to_page(cpu_addr); | |
1387 | } | |
1388 | ||
1389 | if (pages) | |
1390 | __iommu_dma_free_pages(pages, count); | |
591fcf3b NC |
1391 | if (page) |
1392 | dma_free_contiguous(dev, page, alloc_size); | |
bcf4b9c4 RM |
1393 | } |
1394 | ||
8553f6e6 RM |
1395 | static void iommu_dma_free(struct device *dev, size_t size, void *cpu_addr, |
1396 | dma_addr_t handle, unsigned long attrs) | |
1397 | { | |
1398 | __iommu_dma_unmap(dev, handle, size); | |
1399 | __iommu_dma_free(dev, size, cpu_addr); | |
1400 | } | |
1401 | ||
ee1ef05d CH |
1402 | static void *iommu_dma_alloc_pages(struct device *dev, size_t size, |
1403 | struct page **pagep, gfp_t gfp, unsigned long attrs) | |
06d60728 CH |
1404 | { |
1405 | bool coherent = dev_is_dma_coherent(dev); | |
9ad5d6ed | 1406 | size_t alloc_size = PAGE_ALIGN(size); |
90ae409f | 1407 | int node = dev_to_node(dev); |
9a4ab94a | 1408 | struct page *page = NULL; |
9ad5d6ed | 1409 | void *cpu_addr; |
06d60728 | 1410 | |
591fcf3b | 1411 | page = dma_alloc_contiguous(dev, alloc_size, gfp); |
90ae409f CH |
1412 | if (!page) |
1413 | page = alloc_pages_node(node, gfp, get_order(alloc_size)); | |
072bebc0 RM |
1414 | if (!page) |
1415 | return NULL; | |
1416 | ||
f5ff79fd | 1417 | if (!coherent || PageHighMem(page)) { |
33dcb37c | 1418 | pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs); |
072bebc0 | 1419 | |
9ad5d6ed | 1420 | cpu_addr = dma_common_contiguous_remap(page, alloc_size, |
51231740 | 1421 | prot, __builtin_return_address(0)); |
9ad5d6ed | 1422 | if (!cpu_addr) |
ee1ef05d | 1423 | goto out_free_pages; |
8680aa5a RM |
1424 | |
1425 | if (!coherent) | |
9ad5d6ed | 1426 | arch_dma_prep_coherent(page, size); |
8680aa5a | 1427 | } else { |
9ad5d6ed | 1428 | cpu_addr = page_address(page); |
8680aa5a | 1429 | } |
ee1ef05d CH |
1430 | |
1431 | *pagep = page; | |
9ad5d6ed RM |
1432 | memset(cpu_addr, 0, alloc_size); |
1433 | return cpu_addr; | |
072bebc0 | 1434 | out_free_pages: |
591fcf3b | 1435 | dma_free_contiguous(dev, page, alloc_size); |
072bebc0 | 1436 | return NULL; |
06d60728 CH |
1437 | } |
1438 | ||
ee1ef05d CH |
1439 | static void *iommu_dma_alloc(struct device *dev, size_t size, |
1440 | dma_addr_t *handle, gfp_t gfp, unsigned long attrs) | |
1441 | { | |
1442 | bool coherent = dev_is_dma_coherent(dev); | |
1443 | int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs); | |
1444 | struct page *page = NULL; | |
1445 | void *cpu_addr; | |
1446 | ||
1447 | gfp |= __GFP_ZERO; | |
1448 | ||
f5ff79fd | 1449 | if (gfpflags_allow_blocking(gfp) && |
e8d39a90 CH |
1450 | !(attrs & DMA_ATTR_FORCE_CONTIGUOUS)) { |
1451 | return iommu_dma_alloc_remap(dev, size, handle, gfp, | |
1452 | dma_pgprot(dev, PAGE_KERNEL, attrs), attrs); | |
1453 | } | |
ee1ef05d | 1454 | |
e6475eb0 CH |
1455 | if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && |
1456 | !gfpflags_allow_blocking(gfp) && !coherent) | |
9420139f CH |
1457 | page = dma_alloc_from_pool(dev, PAGE_ALIGN(size), &cpu_addr, |
1458 | gfp, NULL); | |
ee1ef05d CH |
1459 | else |
1460 | cpu_addr = iommu_dma_alloc_pages(dev, size, &page, gfp, attrs); | |
1461 | if (!cpu_addr) | |
1462 | return NULL; | |
1463 | ||
6e235020 TM |
1464 | *handle = __iommu_dma_map(dev, page_to_phys(page), size, ioprot, |
1465 | dev->coherent_dma_mask); | |
ee1ef05d CH |
1466 | if (*handle == DMA_MAPPING_ERROR) { |
1467 | __iommu_dma_free(dev, size, cpu_addr); | |
1468 | return NULL; | |
1469 | } | |
1470 | ||
1471 | return cpu_addr; | |
1472 | } | |
1473 | ||
06d60728 CH |
1474 | static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma, |
1475 | void *cpu_addr, dma_addr_t dma_addr, size_t size, | |
1476 | unsigned long attrs) | |
1477 | { | |
1478 | unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; | |
efd9f10b | 1479 | unsigned long pfn, off = vma->vm_pgoff; |
06d60728 CH |
1480 | int ret; |
1481 | ||
33dcb37c | 1482 | vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs); |
06d60728 CH |
1483 | |
1484 | if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) | |
1485 | return ret; | |
1486 | ||
1487 | if (off >= nr_pages || vma_pages(vma) > nr_pages - off) | |
1488 | return -ENXIO; | |
1489 | ||
f5ff79fd | 1490 | if (is_vmalloc_addr(cpu_addr)) { |
5cf45379 | 1491 | struct page **pages = dma_common_find_pages(cpu_addr); |
06d60728 | 1492 | |
efd9f10b | 1493 | if (pages) |
71fe89ce | 1494 | return vm_map_pages(vma, pages, nr_pages); |
efd9f10b CH |
1495 | pfn = vmalloc_to_pfn(cpu_addr); |
1496 | } else { | |
1497 | pfn = page_to_pfn(virt_to_page(cpu_addr)); | |
06d60728 CH |
1498 | } |
1499 | ||
efd9f10b CH |
1500 | return remap_pfn_range(vma, vma->vm_start, pfn + off, |
1501 | vma->vm_end - vma->vm_start, | |
1502 | vma->vm_page_prot); | |
06d60728 CH |
1503 | } |
1504 | ||
06d60728 CH |
1505 | static int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt, |
1506 | void *cpu_addr, dma_addr_t dma_addr, size_t size, | |
1507 | unsigned long attrs) | |
1508 | { | |
3fb3378b CH |
1509 | struct page *page; |
1510 | int ret; | |
06d60728 | 1511 | |
f5ff79fd | 1512 | if (is_vmalloc_addr(cpu_addr)) { |
5cf45379 | 1513 | struct page **pages = dma_common_find_pages(cpu_addr); |
06d60728 | 1514 | |
3fb3378b CH |
1515 | if (pages) { |
1516 | return sg_alloc_table_from_pages(sgt, pages, | |
1517 | PAGE_ALIGN(size) >> PAGE_SHIFT, | |
1518 | 0, size, GFP_KERNEL); | |
1519 | } | |
1520 | ||
1521 | page = vmalloc_to_page(cpu_addr); | |
1522 | } else { | |
1523 | page = virt_to_page(cpu_addr); | |
06d60728 CH |
1524 | } |
1525 | ||
3fb3378b CH |
1526 | ret = sg_alloc_table(sgt, 1, GFP_KERNEL); |
1527 | if (!ret) | |
1528 | sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); | |
1529 | return ret; | |
06d60728 CH |
1530 | } |
1531 | ||
158a6d3c YS |
1532 | static unsigned long iommu_dma_get_merge_boundary(struct device *dev) |
1533 | { | |
1534 | struct iommu_domain *domain = iommu_get_dma_domain(dev); | |
1535 | ||
1536 | return (1UL << __ffs(domain->pgsize_bitmap)) - 1; | |
1537 | } | |
1538 | ||
6d9870b7 JG |
1539 | static size_t iommu_dma_opt_mapping_size(void) |
1540 | { | |
1541 | return iova_rcache_range(); | |
1542 | } | |
1543 | ||
06d60728 | 1544 | static const struct dma_map_ops iommu_dma_ops = { |
30280eee | 1545 | .flags = DMA_F_PCI_P2PDMA_SUPPORTED, |
06d60728 CH |
1546 | .alloc = iommu_dma_alloc, |
1547 | .free = iommu_dma_free, | |
efa70f2f CH |
1548 | .alloc_pages = dma_common_alloc_pages, |
1549 | .free_pages = dma_common_free_pages, | |
e817ee5f CH |
1550 | .alloc_noncontiguous = iommu_dma_alloc_noncontiguous, |
1551 | .free_noncontiguous = iommu_dma_free_noncontiguous, | |
06d60728 CH |
1552 | .mmap = iommu_dma_mmap, |
1553 | .get_sgtable = iommu_dma_get_sgtable, | |
1554 | .map_page = iommu_dma_map_page, | |
1555 | .unmap_page = iommu_dma_unmap_page, | |
1556 | .map_sg = iommu_dma_map_sg, | |
1557 | .unmap_sg = iommu_dma_unmap_sg, | |
1558 | .sync_single_for_cpu = iommu_dma_sync_single_for_cpu, | |
1559 | .sync_single_for_device = iommu_dma_sync_single_for_device, | |
1560 | .sync_sg_for_cpu = iommu_dma_sync_sg_for_cpu, | |
1561 | .sync_sg_for_device = iommu_dma_sync_sg_for_device, | |
1562 | .map_resource = iommu_dma_map_resource, | |
1563 | .unmap_resource = iommu_dma_unmap_resource, | |
158a6d3c | 1564 | .get_merge_boundary = iommu_dma_get_merge_boundary, |
6d9870b7 | 1565 | .opt_mapping_size = iommu_dma_opt_mapping_size, |
06d60728 CH |
1566 | }; |
1567 | ||
1568 | /* | |
1569 | * The IOMMU core code allocates the default DMA domain, which the underlying | |
1570 | * IOMMU driver needs to support via the dma-iommu layer. | |
1571 | */ | |
ac6d7046 | 1572 | void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 dma_limit) |
06d60728 CH |
1573 | { |
1574 | struct iommu_domain *domain = iommu_get_domain_for_dev(dev); | |
1575 | ||
1576 | if (!domain) | |
1577 | goto out_err; | |
1578 | ||
1579 | /* | |
1580 | * The IOMMU core code allocates the default DMA domain, which the | |
1581 | * underlying IOMMU driver needs to support via the dma-iommu layer. | |
1582 | */ | |
bf3aed46 | 1583 | if (iommu_is_dma_domain(domain)) { |
ac6d7046 | 1584 | if (iommu_dma_init_domain(domain, dma_base, dma_limit, dev)) |
06d60728 CH |
1585 | goto out_err; |
1586 | dev->dma_ops = &iommu_dma_ops; | |
1587 | } | |
1588 | ||
1589 | return; | |
1590 | out_err: | |
1591 | pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n", | |
1592 | dev_name(dev)); | |
51f8cc9e | 1593 | } |
8ce4904b | 1594 | EXPORT_SYMBOL_GPL(iommu_setup_dma_ops); |
51f8cc9e | 1595 | |
44bb7e24 RM |
1596 | static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev, |
1597 | phys_addr_t msi_addr, struct iommu_domain *domain) | |
1598 | { | |
1599 | struct iommu_dma_cookie *cookie = domain->iova_cookie; | |
1600 | struct iommu_dma_msi_page *msi_page; | |
842fe519 | 1601 | dma_addr_t iova; |
44bb7e24 | 1602 | int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; |
fdbe574e | 1603 | size_t size = cookie_msi_granule(cookie); |
44bb7e24 | 1604 | |
fdbe574e | 1605 | msi_addr &= ~(phys_addr_t)(size - 1); |
44bb7e24 RM |
1606 | list_for_each_entry(msi_page, &cookie->msi_page_list, list) |
1607 | if (msi_page->phys == msi_addr) | |
1608 | return msi_page; | |
1609 | ||
c1864790 | 1610 | msi_page = kzalloc(sizeof(*msi_page), GFP_KERNEL); |
44bb7e24 RM |
1611 | if (!msi_page) |
1612 | return NULL; | |
1613 | ||
8af23fad RM |
1614 | iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev); |
1615 | if (!iova) | |
a44e6657 | 1616 | goto out_free_page; |
44bb7e24 | 1617 | |
8af23fad RM |
1618 | if (iommu_map(domain, iova, msi_addr, size, prot)) |
1619 | goto out_free_iova; | |
1620 | ||
44bb7e24 | 1621 | INIT_LIST_HEAD(&msi_page->list); |
a44e6657 RM |
1622 | msi_page->phys = msi_addr; |
1623 | msi_page->iova = iova; | |
44bb7e24 RM |
1624 | list_add(&msi_page->list, &cookie->msi_page_list); |
1625 | return msi_page; | |
1626 | ||
8af23fad | 1627 | out_free_iova: |
2a2b8eaa | 1628 | iommu_dma_free_iova(cookie, iova, size, NULL); |
44bb7e24 RM |
1629 | out_free_page: |
1630 | kfree(msi_page); | |
1631 | return NULL; | |
1632 | } | |
1633 | ||
fa49364c RM |
1634 | /** |
1635 | * iommu_dma_prepare_msi() - Map the MSI page in the IOMMU domain | |
1636 | * @desc: MSI descriptor, will store the MSI page | |
1637 | * @msi_addr: MSI target address to be mapped | |
1638 | * | |
1639 | * Return: 0 on success or negative error code if the mapping failed. | |
1640 | */ | |
ece6e6f0 | 1641 | int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr) |
44bb7e24 | 1642 | { |
ece6e6f0 | 1643 | struct device *dev = msi_desc_to_dev(desc); |
44bb7e24 | 1644 | struct iommu_domain *domain = iommu_get_domain_for_dev(dev); |
44bb7e24 | 1645 | struct iommu_dma_msi_page *msi_page; |
c1864790 | 1646 | static DEFINE_MUTEX(msi_prepare_lock); /* see below */ |
44bb7e24 | 1647 | |
ece6e6f0 JG |
1648 | if (!domain || !domain->iova_cookie) { |
1649 | desc->iommu_cookie = NULL; | |
1650 | return 0; | |
1651 | } | |
44bb7e24 | 1652 | |
44bb7e24 | 1653 | /* |
c1864790 RM |
1654 | * In fact the whole prepare operation should already be serialised by |
1655 | * irq_domain_mutex further up the callchain, but that's pretty subtle | |
1656 | * on its own, so consider this locking as failsafe documentation... | |
44bb7e24 | 1657 | */ |
c1864790 | 1658 | mutex_lock(&msi_prepare_lock); |
44bb7e24 | 1659 | msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain); |
c1864790 | 1660 | mutex_unlock(&msi_prepare_lock); |
44bb7e24 | 1661 | |
ece6e6f0 JG |
1662 | msi_desc_set_iommu_cookie(desc, msi_page); |
1663 | ||
1664 | if (!msi_page) | |
1665 | return -ENOMEM; | |
1666 | return 0; | |
1667 | } | |
1668 | ||
fa49364c RM |
1669 | /** |
1670 | * iommu_dma_compose_msi_msg() - Apply translation to an MSI message | |
1671 | * @desc: MSI descriptor prepared by iommu_dma_prepare_msi() | |
1672 | * @msg: MSI message containing target physical address | |
1673 | */ | |
1674 | void iommu_dma_compose_msi_msg(struct msi_desc *desc, struct msi_msg *msg) | |
ece6e6f0 JG |
1675 | { |
1676 | struct device *dev = msi_desc_to_dev(desc); | |
1677 | const struct iommu_domain *domain = iommu_get_domain_for_dev(dev); | |
1678 | const struct iommu_dma_msi_page *msi_page; | |
1679 | ||
1680 | msi_page = msi_desc_get_iommu_cookie(desc); | |
1681 | ||
1682 | if (!domain || !domain->iova_cookie || WARN_ON(!msi_page)) | |
1683 | return; | |
1684 | ||
1685 | msg->address_hi = upper_32_bits(msi_page->iova); | |
1686 | msg->address_lo &= cookie_msi_granule(domain->iova_cookie) - 1; | |
1687 | msg->address_lo += lower_32_bits(msi_page->iova); | |
44bb7e24 | 1688 | } |
06d60728 CH |
1689 | |
1690 | static int iommu_dma_init(void) | |
1691 | { | |
a8e8af35 LJ |
1692 | if (is_kdump_kernel()) |
1693 | static_branch_enable(&iommu_deferred_attach_enabled); | |
1694 | ||
06d60728 | 1695 | return iova_cache_get(); |
44bb7e24 | 1696 | } |
06d60728 | 1697 | arch_initcall(iommu_dma_init); |