Commit | Line | Data |
---|---|---|
caab277b | 1 | // SPDX-License-Identifier: GPL-2.0-only |
0db2e5d1 RM |
2 | /* |
3 | * A fairly generic DMA-API to IOMMU-API glue layer. | |
4 | * | |
5 | * Copyright (C) 2014-2015 ARM Ltd. | |
6 | * | |
7 | * based in part on arch/arm/mm/dma-mapping.c: | |
8 | * Copyright (C) 2000-2004 Russell King | |
0db2e5d1 RM |
9 | */ |
10 | ||
f51dc892 | 11 | #include <linux/acpi_iort.h> |
0db2e5d1 | 12 | #include <linux/device.h> |
06d60728 | 13 | #include <linux/dma-contiguous.h> |
0db2e5d1 | 14 | #include <linux/dma-iommu.h> |
af751d43 | 15 | #include <linux/dma-noncoherent.h> |
5b11e9cd | 16 | #include <linux/gfp.h> |
0db2e5d1 RM |
17 | #include <linux/huge_mm.h> |
18 | #include <linux/iommu.h> | |
19 | #include <linux/iova.h> | |
44bb7e24 | 20 | #include <linux/irq.h> |
0db2e5d1 | 21 | #include <linux/mm.h> |
fade1ec0 | 22 | #include <linux/pci.h> |
5b11e9cd RM |
23 | #include <linux/scatterlist.h> |
24 | #include <linux/vmalloc.h> | |
0db2e5d1 | 25 | |
44bb7e24 RM |
26 | struct iommu_dma_msi_page { |
27 | struct list_head list; | |
28 | dma_addr_t iova; | |
29 | phys_addr_t phys; | |
30 | }; | |
31 | ||
fdbe574e RM |
32 | enum iommu_dma_cookie_type { |
33 | IOMMU_DMA_IOVA_COOKIE, | |
34 | IOMMU_DMA_MSI_COOKIE, | |
35 | }; | |
36 | ||
44bb7e24 | 37 | struct iommu_dma_cookie { |
fdbe574e RM |
38 | enum iommu_dma_cookie_type type; |
39 | union { | |
40 | /* Full allocator for IOMMU_DMA_IOVA_COOKIE */ | |
41 | struct iova_domain iovad; | |
42 | /* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */ | |
43 | dma_addr_t msi_iova; | |
44 | }; | |
45 | struct list_head msi_page_list; | |
46 | spinlock_t msi_lock; | |
2da274cd ZL |
47 | |
48 | /* Domain for flush queue callback; NULL if flush queue not in use */ | |
49 | struct iommu_domain *fq_domain; | |
44bb7e24 RM |
50 | }; |
51 | ||
fdbe574e RM |
52 | static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie) |
53 | { | |
54 | if (cookie->type == IOMMU_DMA_IOVA_COOKIE) | |
55 | return cookie->iovad.granule; | |
56 | return PAGE_SIZE; | |
57 | } | |
58 | ||
fdbe574e RM |
59 | static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type) |
60 | { | |
61 | struct iommu_dma_cookie *cookie; | |
62 | ||
63 | cookie = kzalloc(sizeof(*cookie), GFP_KERNEL); | |
64 | if (cookie) { | |
65 | spin_lock_init(&cookie->msi_lock); | |
66 | INIT_LIST_HEAD(&cookie->msi_page_list); | |
67 | cookie->type = type; | |
68 | } | |
69 | return cookie; | |
44bb7e24 RM |
70 | } |
71 | ||
0db2e5d1 RM |
72 | /** |
73 | * iommu_get_dma_cookie - Acquire DMA-API resources for a domain | |
74 | * @domain: IOMMU domain to prepare for DMA-API usage | |
75 | * | |
76 | * IOMMU drivers should normally call this from their domain_alloc | |
77 | * callback when domain->type == IOMMU_DOMAIN_DMA. | |
78 | */ | |
79 | int iommu_get_dma_cookie(struct iommu_domain *domain) | |
fdbe574e RM |
80 | { |
81 | if (domain->iova_cookie) | |
82 | return -EEXIST; | |
83 | ||
84 | domain->iova_cookie = cookie_alloc(IOMMU_DMA_IOVA_COOKIE); | |
85 | if (!domain->iova_cookie) | |
86 | return -ENOMEM; | |
87 | ||
88 | return 0; | |
89 | } | |
90 | EXPORT_SYMBOL(iommu_get_dma_cookie); | |
91 | ||
92 | /** | |
93 | * iommu_get_msi_cookie - Acquire just MSI remapping resources | |
94 | * @domain: IOMMU domain to prepare | |
95 | * @base: Start address of IOVA region for MSI mappings | |
96 | * | |
97 | * Users who manage their own IOVA allocation and do not want DMA API support, | |
98 | * but would still like to take advantage of automatic MSI remapping, can use | |
99 | * this to initialise their own domain appropriately. Users should reserve a | |
100 | * contiguous IOVA region, starting at @base, large enough to accommodate the | |
101 | * number of PAGE_SIZE mappings necessary to cover every MSI doorbell address | |
102 | * used by the devices attached to @domain. | |
103 | */ | |
104 | int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base) | |
0db2e5d1 | 105 | { |
44bb7e24 | 106 | struct iommu_dma_cookie *cookie; |
0db2e5d1 | 107 | |
fdbe574e RM |
108 | if (domain->type != IOMMU_DOMAIN_UNMANAGED) |
109 | return -EINVAL; | |
110 | ||
0db2e5d1 RM |
111 | if (domain->iova_cookie) |
112 | return -EEXIST; | |
113 | ||
fdbe574e | 114 | cookie = cookie_alloc(IOMMU_DMA_MSI_COOKIE); |
44bb7e24 RM |
115 | if (!cookie) |
116 | return -ENOMEM; | |
0db2e5d1 | 117 | |
fdbe574e | 118 | cookie->msi_iova = base; |
44bb7e24 RM |
119 | domain->iova_cookie = cookie; |
120 | return 0; | |
0db2e5d1 | 121 | } |
fdbe574e | 122 | EXPORT_SYMBOL(iommu_get_msi_cookie); |
0db2e5d1 RM |
123 | |
124 | /** | |
125 | * iommu_put_dma_cookie - Release a domain's DMA mapping resources | |
fdbe574e RM |
126 | * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() or |
127 | * iommu_get_msi_cookie() | |
0db2e5d1 RM |
128 | * |
129 | * IOMMU drivers should normally call this from their domain_free callback. | |
130 | */ | |
131 | void iommu_put_dma_cookie(struct iommu_domain *domain) | |
132 | { | |
44bb7e24 RM |
133 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
134 | struct iommu_dma_msi_page *msi, *tmp; | |
0db2e5d1 | 135 | |
44bb7e24 | 136 | if (!cookie) |
0db2e5d1 RM |
137 | return; |
138 | ||
fdbe574e | 139 | if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule) |
44bb7e24 RM |
140 | put_iova_domain(&cookie->iovad); |
141 | ||
142 | list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) { | |
143 | list_del(&msi->list); | |
144 | kfree(msi); | |
145 | } | |
146 | kfree(cookie); | |
0db2e5d1 RM |
147 | domain->iova_cookie = NULL; |
148 | } | |
149 | EXPORT_SYMBOL(iommu_put_dma_cookie); | |
150 | ||
273df963 RM |
151 | /** |
152 | * iommu_dma_get_resv_regions - Reserved region driver helper | |
153 | * @dev: Device from iommu_get_resv_regions() | |
154 | * @list: Reserved region list from iommu_get_resv_regions() | |
155 | * | |
156 | * IOMMU drivers can use this to implement their .get_resv_regions callback | |
cd2c9fcf SK |
157 | * for general non-IOMMU-specific reservations. Currently, this covers GICv3 |
158 | * ITS region reservation on ACPI based ARM platforms that may require HW MSI | |
159 | * reservation. | |
273df963 RM |
160 | */ |
161 | void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list) | |
fade1ec0 | 162 | { |
fade1ec0 | 163 | |
98cc4f71 | 164 | if (!is_of_node(dev_iommu_fwspec_get(dev)->iommu_fwnode)) |
cd2c9fcf | 165 | iort_iommu_msi_get_resv_regions(dev, list); |
273df963 | 166 | |
fade1ec0 | 167 | } |
273df963 | 168 | EXPORT_SYMBOL(iommu_dma_get_resv_regions); |
fade1ec0 | 169 | |
7c1b058c RM |
170 | static int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie, |
171 | phys_addr_t start, phys_addr_t end) | |
172 | { | |
173 | struct iova_domain *iovad = &cookie->iovad; | |
174 | struct iommu_dma_msi_page *msi_page; | |
175 | int i, num_pages; | |
176 | ||
177 | start -= iova_offset(iovad, start); | |
178 | num_pages = iova_align(iovad, end - start) >> iova_shift(iovad); | |
179 | ||
180 | msi_page = kcalloc(num_pages, sizeof(*msi_page), GFP_KERNEL); | |
181 | if (!msi_page) | |
182 | return -ENOMEM; | |
183 | ||
184 | for (i = 0; i < num_pages; i++) { | |
185 | msi_page[i].phys = start; | |
186 | msi_page[i].iova = start; | |
187 | INIT_LIST_HEAD(&msi_page[i].list); | |
188 | list_add(&msi_page[i].list, &cookie->msi_page_list); | |
189 | start += iovad->granule; | |
190 | } | |
191 | ||
192 | return 0; | |
193 | } | |
194 | ||
aadad097 | 195 | static int iova_reserve_pci_windows(struct pci_dev *dev, |
cd2c9fcf SK |
196 | struct iova_domain *iovad) |
197 | { | |
198 | struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus); | |
199 | struct resource_entry *window; | |
200 | unsigned long lo, hi; | |
aadad097 | 201 | phys_addr_t start = 0, end; |
cd2c9fcf SK |
202 | |
203 | resource_list_for_each_entry(window, &bridge->windows) { | |
204 | if (resource_type(window->res) != IORESOURCE_MEM) | |
205 | continue; | |
206 | ||
207 | lo = iova_pfn(iovad, window->res->start - window->offset); | |
208 | hi = iova_pfn(iovad, window->res->end - window->offset); | |
209 | reserve_iova(iovad, lo, hi); | |
210 | } | |
aadad097 SM |
211 | |
212 | /* Get reserved DMA windows from host bridge */ | |
213 | resource_list_for_each_entry(window, &bridge->dma_ranges) { | |
214 | end = window->res->start - window->offset; | |
215 | resv_iova: | |
216 | if (end > start) { | |
217 | lo = iova_pfn(iovad, start); | |
218 | hi = iova_pfn(iovad, end); | |
219 | reserve_iova(iovad, lo, hi); | |
220 | } else { | |
221 | /* dma_ranges list should be sorted */ | |
222 | dev_err(&dev->dev, "Failed to reserve IOVA\n"); | |
223 | return -EINVAL; | |
224 | } | |
225 | ||
226 | start = window->res->end - window->offset + 1; | |
227 | /* If window is last entry */ | |
228 | if (window->node.next == &bridge->dma_ranges && | |
29fcea8c AB |
229 | end != ~(phys_addr_t)0) { |
230 | end = ~(phys_addr_t)0; | |
aadad097 SM |
231 | goto resv_iova; |
232 | } | |
233 | } | |
234 | ||
235 | return 0; | |
cd2c9fcf SK |
236 | } |
237 | ||
7c1b058c RM |
238 | static int iova_reserve_iommu_regions(struct device *dev, |
239 | struct iommu_domain *domain) | |
240 | { | |
241 | struct iommu_dma_cookie *cookie = domain->iova_cookie; | |
242 | struct iova_domain *iovad = &cookie->iovad; | |
243 | struct iommu_resv_region *region; | |
244 | LIST_HEAD(resv_regions); | |
245 | int ret = 0; | |
246 | ||
aadad097 SM |
247 | if (dev_is_pci(dev)) { |
248 | ret = iova_reserve_pci_windows(to_pci_dev(dev), iovad); | |
249 | if (ret) | |
250 | return ret; | |
251 | } | |
cd2c9fcf | 252 | |
7c1b058c RM |
253 | iommu_get_resv_regions(dev, &resv_regions); |
254 | list_for_each_entry(region, &resv_regions, list) { | |
255 | unsigned long lo, hi; | |
256 | ||
257 | /* We ARE the software that manages these! */ | |
258 | if (region->type == IOMMU_RESV_SW_MSI) | |
259 | continue; | |
260 | ||
261 | lo = iova_pfn(iovad, region->start); | |
262 | hi = iova_pfn(iovad, region->start + region->length - 1); | |
263 | reserve_iova(iovad, lo, hi); | |
264 | ||
265 | if (region->type == IOMMU_RESV_MSI) | |
266 | ret = cookie_init_hw_msi_region(cookie, region->start, | |
267 | region->start + region->length); | |
268 | if (ret) | |
269 | break; | |
270 | } | |
271 | iommu_put_resv_regions(dev, &resv_regions); | |
272 | ||
273 | return ret; | |
274 | } | |
275 | ||
2da274cd ZL |
276 | static void iommu_dma_flush_iotlb_all(struct iova_domain *iovad) |
277 | { | |
278 | struct iommu_dma_cookie *cookie; | |
279 | struct iommu_domain *domain; | |
280 | ||
281 | cookie = container_of(iovad, struct iommu_dma_cookie, iovad); | |
282 | domain = cookie->fq_domain; | |
283 | /* | |
284 | * The IOMMU driver supporting DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE | |
285 | * implies that ops->flush_iotlb_all must be non-NULL. | |
286 | */ | |
287 | domain->ops->flush_iotlb_all(domain); | |
288 | } | |
289 | ||
0db2e5d1 RM |
290 | /** |
291 | * iommu_dma_init_domain - Initialise a DMA mapping domain | |
292 | * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() | |
293 | * @base: IOVA at which the mappable address space starts | |
294 | * @size: Size of IOVA space | |
fade1ec0 | 295 | * @dev: Device the domain is being initialised for |
0db2e5d1 RM |
296 | * |
297 | * @base and @size should be exact multiples of IOMMU page granularity to | |
298 | * avoid rounding surprises. If necessary, we reserve the page at address 0 | |
299 | * to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but | |
300 | * any change which could make prior IOVAs invalid will fail. | |
301 | */ | |
06d60728 | 302 | static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, |
fade1ec0 | 303 | u64 size, struct device *dev) |
0db2e5d1 | 304 | { |
fdbe574e | 305 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
c61a4633 | 306 | unsigned long order, base_pfn; |
6b0c54e7 | 307 | struct iova_domain *iovad; |
2da274cd | 308 | int attr; |
0db2e5d1 | 309 | |
fdbe574e RM |
310 | if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE) |
311 | return -EINVAL; | |
0db2e5d1 | 312 | |
6b0c54e7 YL |
313 | iovad = &cookie->iovad; |
314 | ||
0db2e5d1 | 315 | /* Use the smallest supported page size for IOVA granularity */ |
d16e0faa | 316 | order = __ffs(domain->pgsize_bitmap); |
0db2e5d1 | 317 | base_pfn = max_t(unsigned long, 1, base >> order); |
0db2e5d1 RM |
318 | |
319 | /* Check the domain allows at least some access to the device... */ | |
320 | if (domain->geometry.force_aperture) { | |
321 | if (base > domain->geometry.aperture_end || | |
322 | base + size <= domain->geometry.aperture_start) { | |
323 | pr_warn("specified DMA range outside IOMMU capability\n"); | |
324 | return -EFAULT; | |
325 | } | |
326 | /* ...then finally give it a kicking to make sure it fits */ | |
327 | base_pfn = max_t(unsigned long, base_pfn, | |
328 | domain->geometry.aperture_start >> order); | |
0db2e5d1 RM |
329 | } |
330 | ||
f51d7bb7 | 331 | /* start_pfn is always nonzero for an already-initialised domain */ |
0db2e5d1 RM |
332 | if (iovad->start_pfn) { |
333 | if (1UL << order != iovad->granule || | |
f51d7bb7 | 334 | base_pfn != iovad->start_pfn) { |
0db2e5d1 RM |
335 | pr_warn("Incompatible range for DMA domain\n"); |
336 | return -EFAULT; | |
337 | } | |
7c1b058c RM |
338 | |
339 | return 0; | |
0db2e5d1 | 340 | } |
7c1b058c | 341 | |
aa3ac946 | 342 | init_iova_domain(iovad, 1UL << order, base_pfn); |
2da274cd ZL |
343 | |
344 | if (!cookie->fq_domain && !iommu_domain_get_attr(domain, | |
345 | DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, &attr) && attr) { | |
346 | cookie->fq_domain = domain; | |
347 | init_iova_flush_queue(iovad, iommu_dma_flush_iotlb_all, NULL); | |
348 | } | |
349 | ||
7c1b058c RM |
350 | if (!dev) |
351 | return 0; | |
352 | ||
353 | return iova_reserve_iommu_regions(dev, domain); | |
0db2e5d1 | 354 | } |
0db2e5d1 RM |
355 | |
356 | /** | |
737c85ca MH |
357 | * dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API |
358 | * page flags. | |
0db2e5d1 RM |
359 | * @dir: Direction of DMA transfer |
360 | * @coherent: Is the DMA master cache-coherent? | |
737c85ca | 361 | * @attrs: DMA attributes for the mapping |
0db2e5d1 RM |
362 | * |
363 | * Return: corresponding IOMMU API page protection flags | |
364 | */ | |
06d60728 | 365 | static int dma_info_to_prot(enum dma_data_direction dir, bool coherent, |
737c85ca | 366 | unsigned long attrs) |
0db2e5d1 RM |
367 | { |
368 | int prot = coherent ? IOMMU_CACHE : 0; | |
369 | ||
737c85ca MH |
370 | if (attrs & DMA_ATTR_PRIVILEGED) |
371 | prot |= IOMMU_PRIV; | |
372 | ||
0db2e5d1 RM |
373 | switch (dir) { |
374 | case DMA_BIDIRECTIONAL: | |
375 | return prot | IOMMU_READ | IOMMU_WRITE; | |
376 | case DMA_TO_DEVICE: | |
377 | return prot | IOMMU_READ; | |
378 | case DMA_FROM_DEVICE: | |
379 | return prot | IOMMU_WRITE; | |
380 | default: | |
381 | return 0; | |
382 | } | |
383 | } | |
384 | ||
842fe519 RM |
385 | static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain, |
386 | size_t size, dma_addr_t dma_limit, struct device *dev) | |
0db2e5d1 | 387 | { |
a44e6657 RM |
388 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
389 | struct iova_domain *iovad = &cookie->iovad; | |
bb65a64c | 390 | unsigned long shift, iova_len, iova = 0; |
0db2e5d1 | 391 | |
a44e6657 RM |
392 | if (cookie->type == IOMMU_DMA_MSI_COOKIE) { |
393 | cookie->msi_iova += size; | |
394 | return cookie->msi_iova - size; | |
395 | } | |
396 | ||
397 | shift = iova_shift(iovad); | |
398 | iova_len = size >> shift; | |
bb65a64c RM |
399 | /* |
400 | * Freeing non-power-of-two-sized allocations back into the IOVA caches | |
401 | * will come back to bite us badly, so we have to waste a bit of space | |
402 | * rounding up anything cacheable to make sure that can't happen. The | |
403 | * order of the unadjusted size will still match upon freeing. | |
404 | */ | |
405 | if (iova_len < (1 << (IOVA_RANGE_CACHE_MAX_SIZE - 1))) | |
406 | iova_len = roundup_pow_of_two(iova_len); | |
a44e6657 | 407 | |
03bfdc31 RM |
408 | if (dev->bus_dma_mask) |
409 | dma_limit &= dev->bus_dma_mask; | |
410 | ||
c987ff0d RM |
411 | if (domain->geometry.force_aperture) |
412 | dma_limit = min(dma_limit, domain->geometry.aperture_end); | |
122fac03 RM |
413 | |
414 | /* Try to get PCI devices a SAC address */ | |
415 | if (dma_limit > DMA_BIT_MASK(32) && dev_is_pci(dev)) | |
538d5b33 TN |
416 | iova = alloc_iova_fast(iovad, iova_len, |
417 | DMA_BIT_MASK(32) >> shift, false); | |
bb65a64c | 418 | |
122fac03 | 419 | if (!iova) |
538d5b33 TN |
420 | iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift, |
421 | true); | |
122fac03 | 422 | |
bb65a64c | 423 | return (dma_addr_t)iova << shift; |
0db2e5d1 RM |
424 | } |
425 | ||
842fe519 RM |
426 | static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie, |
427 | dma_addr_t iova, size_t size) | |
0db2e5d1 | 428 | { |
842fe519 | 429 | struct iova_domain *iovad = &cookie->iovad; |
0db2e5d1 | 430 | |
a44e6657 | 431 | /* The MSI case is only ever cleaning up its most recent allocation */ |
bb65a64c | 432 | if (cookie->type == IOMMU_DMA_MSI_COOKIE) |
a44e6657 | 433 | cookie->msi_iova -= size; |
2da274cd ZL |
434 | else if (cookie->fq_domain) /* non-strict mode */ |
435 | queue_iova(iovad, iova_pfn(iovad, iova), | |
436 | size >> iova_shift(iovad), 0); | |
bb65a64c | 437 | else |
1cc896ed RM |
438 | free_iova_fast(iovad, iova_pfn(iovad, iova), |
439 | size >> iova_shift(iovad)); | |
842fe519 RM |
440 | } |
441 | ||
b61d271e | 442 | static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr, |
842fe519 RM |
443 | size_t size) |
444 | { | |
b61d271e | 445 | struct iommu_domain *domain = iommu_get_dma_domain(dev); |
a44e6657 RM |
446 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
447 | struct iova_domain *iovad = &cookie->iovad; | |
842fe519 | 448 | size_t iova_off = iova_offset(iovad, dma_addr); |
a7d20dc1 WD |
449 | struct iommu_iotlb_gather iotlb_gather; |
450 | size_t unmapped; | |
842fe519 RM |
451 | |
452 | dma_addr -= iova_off; | |
453 | size = iova_align(iovad, size + iova_off); | |
a7d20dc1 WD |
454 | iommu_iotlb_gather_init(&iotlb_gather); |
455 | ||
456 | unmapped = iommu_unmap_fast(domain, dma_addr, size, &iotlb_gather); | |
457 | WARN_ON(unmapped != size); | |
842fe519 | 458 | |
2da274cd | 459 | if (!cookie->fq_domain) |
a7d20dc1 | 460 | iommu_tlb_sync(domain, &iotlb_gather); |
a44e6657 | 461 | iommu_dma_free_iova(cookie, dma_addr, size); |
0db2e5d1 RM |
462 | } |
463 | ||
92aec09c | 464 | static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys, |
b61d271e | 465 | size_t size, int prot) |
92aec09c | 466 | { |
b61d271e | 467 | struct iommu_domain *domain = iommu_get_dma_domain(dev); |
92aec09c | 468 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
8af23fad RM |
469 | struct iova_domain *iovad = &cookie->iovad; |
470 | size_t iova_off = iova_offset(iovad, phys); | |
92aec09c CH |
471 | dma_addr_t iova; |
472 | ||
8af23fad | 473 | size = iova_align(iovad, size + iova_off); |
92aec09c CH |
474 | |
475 | iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev); | |
476 | if (!iova) | |
477 | return DMA_MAPPING_ERROR; | |
478 | ||
479 | if (iommu_map(domain, iova, phys - iova_off, size, prot)) { | |
480 | iommu_dma_free_iova(cookie, iova, size); | |
481 | return DMA_MAPPING_ERROR; | |
482 | } | |
483 | return iova + iova_off; | |
484 | } | |
485 | ||
0db2e5d1 RM |
486 | static void __iommu_dma_free_pages(struct page **pages, int count) |
487 | { | |
488 | while (count--) | |
489 | __free_page(pages[count]); | |
490 | kvfree(pages); | |
491 | } | |
492 | ||
c4b17afb GK |
493 | static struct page **__iommu_dma_alloc_pages(struct device *dev, |
494 | unsigned int count, unsigned long order_mask, gfp_t gfp) | |
0db2e5d1 RM |
495 | { |
496 | struct page **pages; | |
c4b17afb | 497 | unsigned int i = 0, nid = dev_to_node(dev); |
3b6b7e19 RM |
498 | |
499 | order_mask &= (2U << MAX_ORDER) - 1; | |
500 | if (!order_mask) | |
501 | return NULL; | |
0db2e5d1 | 502 | |
c4b17afb | 503 | pages = kvzalloc(count * sizeof(*pages), GFP_KERNEL); |
0db2e5d1 RM |
504 | if (!pages) |
505 | return NULL; | |
506 | ||
507 | /* IOMMU can map any pages, so himem can also be used here */ | |
508 | gfp |= __GFP_NOWARN | __GFP_HIGHMEM; | |
509 | ||
510 | while (count) { | |
511 | struct page *page = NULL; | |
3b6b7e19 | 512 | unsigned int order_size; |
0db2e5d1 RM |
513 | |
514 | /* | |
515 | * Higher-order allocations are a convenience rather | |
516 | * than a necessity, hence using __GFP_NORETRY until | |
3b6b7e19 | 517 | * falling back to minimum-order allocations. |
0db2e5d1 | 518 | */ |
3b6b7e19 RM |
519 | for (order_mask &= (2U << __fls(count)) - 1; |
520 | order_mask; order_mask &= ~order_size) { | |
521 | unsigned int order = __fls(order_mask); | |
c4b17afb | 522 | gfp_t alloc_flags = gfp; |
3b6b7e19 RM |
523 | |
524 | order_size = 1U << order; | |
c4b17afb GK |
525 | if (order_mask > order_size) |
526 | alloc_flags |= __GFP_NORETRY; | |
527 | page = alloc_pages_node(nid, alloc_flags, order); | |
0db2e5d1 RM |
528 | if (!page) |
529 | continue; | |
3b6b7e19 RM |
530 | if (!order) |
531 | break; | |
532 | if (!PageCompound(page)) { | |
0db2e5d1 RM |
533 | split_page(page, order); |
534 | break; | |
3b6b7e19 RM |
535 | } else if (!split_huge_page(page)) { |
536 | break; | |
0db2e5d1 | 537 | } |
3b6b7e19 | 538 | __free_pages(page, order); |
0db2e5d1 | 539 | } |
0db2e5d1 RM |
540 | if (!page) { |
541 | __iommu_dma_free_pages(pages, i); | |
542 | return NULL; | |
543 | } | |
3b6b7e19 RM |
544 | count -= order_size; |
545 | while (order_size--) | |
0db2e5d1 RM |
546 | pages[i++] = page++; |
547 | } | |
548 | return pages; | |
549 | } | |
550 | ||
0db2e5d1 | 551 | /** |
21b95aaf | 552 | * iommu_dma_alloc_remap - Allocate and map a buffer contiguous in IOVA space |
0db2e5d1 RM |
553 | * @dev: Device to allocate memory for. Must be a real device |
554 | * attached to an iommu_dma_domain | |
555 | * @size: Size of buffer in bytes | |
21b95aaf | 556 | * @dma_handle: Out argument for allocated DMA handle |
0db2e5d1 | 557 | * @gfp: Allocation flags |
3b6b7e19 | 558 | * @attrs: DMA attributes for this allocation |
0db2e5d1 RM |
559 | * |
560 | * If @size is less than PAGE_SIZE, then a full CPU page will be allocated, | |
561 | * but an IOMMU which supports smaller pages might not map the whole thing. | |
562 | * | |
21b95aaf | 563 | * Return: Mapped virtual address, or NULL on failure. |
0db2e5d1 | 564 | */ |
21b95aaf CH |
565 | static void *iommu_dma_alloc_remap(struct device *dev, size_t size, |
566 | dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) | |
0db2e5d1 | 567 | { |
43c5bf11 | 568 | struct iommu_domain *domain = iommu_get_dma_domain(dev); |
842fe519 RM |
569 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
570 | struct iova_domain *iovad = &cookie->iovad; | |
21b95aaf CH |
571 | bool coherent = dev_is_dma_coherent(dev); |
572 | int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs); | |
33dcb37c | 573 | pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs); |
21b95aaf | 574 | unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap; |
0db2e5d1 RM |
575 | struct page **pages; |
576 | struct sg_table sgt; | |
842fe519 | 577 | dma_addr_t iova; |
21b95aaf | 578 | void *vaddr; |
0db2e5d1 | 579 | |
21b95aaf | 580 | *dma_handle = DMA_MAPPING_ERROR; |
0db2e5d1 | 581 | |
3b6b7e19 RM |
582 | min_size = alloc_sizes & -alloc_sizes; |
583 | if (min_size < PAGE_SIZE) { | |
584 | min_size = PAGE_SIZE; | |
585 | alloc_sizes |= PAGE_SIZE; | |
586 | } else { | |
587 | size = ALIGN(size, min_size); | |
588 | } | |
00085f1e | 589 | if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES) |
3b6b7e19 RM |
590 | alloc_sizes = min_size; |
591 | ||
592 | count = PAGE_ALIGN(size) >> PAGE_SHIFT; | |
c4b17afb GK |
593 | pages = __iommu_dma_alloc_pages(dev, count, alloc_sizes >> PAGE_SHIFT, |
594 | gfp); | |
0db2e5d1 RM |
595 | if (!pages) |
596 | return NULL; | |
597 | ||
842fe519 RM |
598 | size = iova_align(iovad, size); |
599 | iova = iommu_dma_alloc_iova(domain, size, dev->coherent_dma_mask, dev); | |
0db2e5d1 RM |
600 | if (!iova) |
601 | goto out_free_pages; | |
602 | ||
0db2e5d1 RM |
603 | if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, GFP_KERNEL)) |
604 | goto out_free_iova; | |
605 | ||
21b95aaf | 606 | if (!(ioprot & IOMMU_CACHE)) { |
23f88e0a CH |
607 | struct scatterlist *sg; |
608 | int i; | |
609 | ||
610 | for_each_sg(sgt.sgl, sg, sgt.orig_nents, i) | |
611 | arch_dma_prep_coherent(sg_page(sg), sg->length); | |
0db2e5d1 RM |
612 | } |
613 | ||
21b95aaf | 614 | if (iommu_map_sg(domain, iova, sgt.sgl, sgt.orig_nents, ioprot) |
0db2e5d1 RM |
615 | < size) |
616 | goto out_free_sg; | |
617 | ||
51231740 | 618 | vaddr = dma_common_pages_remap(pages, size, prot, |
21b95aaf CH |
619 | __builtin_return_address(0)); |
620 | if (!vaddr) | |
621 | goto out_unmap; | |
622 | ||
623 | *dma_handle = iova; | |
0db2e5d1 | 624 | sg_free_table(&sgt); |
21b95aaf | 625 | return vaddr; |
0db2e5d1 | 626 | |
21b95aaf CH |
627 | out_unmap: |
628 | __iommu_dma_unmap(dev, iova, size); | |
0db2e5d1 RM |
629 | out_free_sg: |
630 | sg_free_table(&sgt); | |
631 | out_free_iova: | |
842fe519 | 632 | iommu_dma_free_iova(cookie, iova, size); |
0db2e5d1 RM |
633 | out_free_pages: |
634 | __iommu_dma_free_pages(pages, count); | |
635 | return NULL; | |
636 | } | |
637 | ||
638 | /** | |
06d60728 CH |
639 | * __iommu_dma_mmap - Map a buffer into provided user VMA |
640 | * @pages: Array representing buffer from __iommu_dma_alloc() | |
0db2e5d1 RM |
641 | * @size: Size of buffer in bytes |
642 | * @vma: VMA describing requested userspace mapping | |
643 | * | |
644 | * Maps the pages of the buffer in @pages into @vma. The caller is responsible | |
645 | * for verifying the correct size and protection of @vma beforehand. | |
646 | */ | |
06d60728 CH |
647 | static int __iommu_dma_mmap(struct page **pages, size_t size, |
648 | struct vm_area_struct *vma) | |
0db2e5d1 | 649 | { |
b0d0084f | 650 | return vm_map_pages(vma, pages, PAGE_ALIGN(size) >> PAGE_SHIFT); |
0db2e5d1 RM |
651 | } |
652 | ||
06d60728 CH |
653 | static void iommu_dma_sync_single_for_cpu(struct device *dev, |
654 | dma_addr_t dma_handle, size_t size, enum dma_data_direction dir) | |
0db2e5d1 | 655 | { |
06d60728 | 656 | phys_addr_t phys; |
0db2e5d1 | 657 | |
06d60728 CH |
658 | if (dev_is_dma_coherent(dev)) |
659 | return; | |
1cc896ed | 660 | |
06d60728 CH |
661 | phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle); |
662 | arch_sync_dma_for_cpu(dev, phys, size, dir); | |
0db2e5d1 | 663 | } |
0db2e5d1 | 664 | |
06d60728 CH |
665 | static void iommu_dma_sync_single_for_device(struct device *dev, |
666 | dma_addr_t dma_handle, size_t size, enum dma_data_direction dir) | |
0db2e5d1 | 667 | { |
06d60728 | 668 | phys_addr_t phys; |
0db2e5d1 | 669 | |
06d60728 CH |
670 | if (dev_is_dma_coherent(dev)) |
671 | return; | |
1cc896ed | 672 | |
06d60728 CH |
673 | phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle); |
674 | arch_sync_dma_for_device(dev, phys, size, dir); | |
675 | } | |
0db2e5d1 | 676 | |
06d60728 CH |
677 | static void iommu_dma_sync_sg_for_cpu(struct device *dev, |
678 | struct scatterlist *sgl, int nelems, | |
679 | enum dma_data_direction dir) | |
680 | { | |
681 | struct scatterlist *sg; | |
682 | int i; | |
683 | ||
684 | if (dev_is_dma_coherent(dev)) | |
685 | return; | |
686 | ||
687 | for_each_sg(sgl, sg, nelems, i) | |
688 | arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir); | |
689 | } | |
690 | ||
691 | static void iommu_dma_sync_sg_for_device(struct device *dev, | |
692 | struct scatterlist *sgl, int nelems, | |
693 | enum dma_data_direction dir) | |
694 | { | |
695 | struct scatterlist *sg; | |
696 | int i; | |
697 | ||
698 | if (dev_is_dma_coherent(dev)) | |
699 | return; | |
700 | ||
701 | for_each_sg(sgl, sg, nelems, i) | |
702 | arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, dir); | |
0db2e5d1 RM |
703 | } |
704 | ||
06d60728 CH |
705 | static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page, |
706 | unsigned long offset, size_t size, enum dma_data_direction dir, | |
707 | unsigned long attrs) | |
51f8cc9e | 708 | { |
06d60728 CH |
709 | phys_addr_t phys = page_to_phys(page) + offset; |
710 | bool coherent = dev_is_dma_coherent(dev); | |
b61d271e | 711 | int prot = dma_info_to_prot(dir, coherent, attrs); |
06d60728 CH |
712 | dma_addr_t dma_handle; |
713 | ||
b61d271e | 714 | dma_handle =__iommu_dma_map(dev, phys, size, prot); |
06d60728 CH |
715 | if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) && |
716 | dma_handle != DMA_MAPPING_ERROR) | |
717 | arch_sync_dma_for_device(dev, phys, size, dir); | |
718 | return dma_handle; | |
51f8cc9e RM |
719 | } |
720 | ||
06d60728 CH |
721 | static void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle, |
722 | size_t size, enum dma_data_direction dir, unsigned long attrs) | |
0db2e5d1 | 723 | { |
06d60728 CH |
724 | if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) |
725 | iommu_dma_sync_single_for_cpu(dev, dma_handle, size, dir); | |
b61d271e | 726 | __iommu_dma_unmap(dev, dma_handle, size); |
0db2e5d1 RM |
727 | } |
728 | ||
729 | /* | |
730 | * Prepare a successfully-mapped scatterlist to give back to the caller. | |
809eac54 RM |
731 | * |
732 | * At this point the segments are already laid out by iommu_dma_map_sg() to | |
733 | * avoid individually crossing any boundaries, so we merely need to check a | |
734 | * segment's start address to avoid concatenating across one. | |
0db2e5d1 RM |
735 | */ |
736 | static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents, | |
737 | dma_addr_t dma_addr) | |
738 | { | |
809eac54 RM |
739 | struct scatterlist *s, *cur = sg; |
740 | unsigned long seg_mask = dma_get_seg_boundary(dev); | |
741 | unsigned int cur_len = 0, max_len = dma_get_max_seg_size(dev); | |
742 | int i, count = 0; | |
0db2e5d1 RM |
743 | |
744 | for_each_sg(sg, s, nents, i) { | |
809eac54 RM |
745 | /* Restore this segment's original unaligned fields first */ |
746 | unsigned int s_iova_off = sg_dma_address(s); | |
0db2e5d1 | 747 | unsigned int s_length = sg_dma_len(s); |
809eac54 | 748 | unsigned int s_iova_len = s->length; |
0db2e5d1 | 749 | |
809eac54 | 750 | s->offset += s_iova_off; |
0db2e5d1 | 751 | s->length = s_length; |
cad34be7 | 752 | sg_dma_address(s) = DMA_MAPPING_ERROR; |
809eac54 RM |
753 | sg_dma_len(s) = 0; |
754 | ||
755 | /* | |
756 | * Now fill in the real DMA data. If... | |
757 | * - there is a valid output segment to append to | |
758 | * - and this segment starts on an IOVA page boundary | |
759 | * - but doesn't fall at a segment boundary | |
760 | * - and wouldn't make the resulting output segment too long | |
761 | */ | |
762 | if (cur_len && !s_iova_off && (dma_addr & seg_mask) && | |
ab2cbeb0 | 763 | (max_len - cur_len >= s_length)) { |
809eac54 RM |
764 | /* ...then concatenate it with the previous one */ |
765 | cur_len += s_length; | |
766 | } else { | |
767 | /* Otherwise start the next output segment */ | |
768 | if (i > 0) | |
769 | cur = sg_next(cur); | |
770 | cur_len = s_length; | |
771 | count++; | |
772 | ||
773 | sg_dma_address(cur) = dma_addr + s_iova_off; | |
774 | } | |
775 | ||
776 | sg_dma_len(cur) = cur_len; | |
777 | dma_addr += s_iova_len; | |
778 | ||
779 | if (s_length + s_iova_off < s_iova_len) | |
780 | cur_len = 0; | |
0db2e5d1 | 781 | } |
809eac54 | 782 | return count; |
0db2e5d1 RM |
783 | } |
784 | ||
785 | /* | |
786 | * If mapping failed, then just restore the original list, | |
787 | * but making sure the DMA fields are invalidated. | |
788 | */ | |
789 | static void __invalidate_sg(struct scatterlist *sg, int nents) | |
790 | { | |
791 | struct scatterlist *s; | |
792 | int i; | |
793 | ||
794 | for_each_sg(sg, s, nents, i) { | |
cad34be7 | 795 | if (sg_dma_address(s) != DMA_MAPPING_ERROR) |
07b48ac4 | 796 | s->offset += sg_dma_address(s); |
0db2e5d1 RM |
797 | if (sg_dma_len(s)) |
798 | s->length = sg_dma_len(s); | |
cad34be7 | 799 | sg_dma_address(s) = DMA_MAPPING_ERROR; |
0db2e5d1 RM |
800 | sg_dma_len(s) = 0; |
801 | } | |
802 | } | |
803 | ||
804 | /* | |
805 | * The DMA API client is passing in a scatterlist which could describe | |
806 | * any old buffer layout, but the IOMMU API requires everything to be | |
807 | * aligned to IOMMU pages. Hence the need for this complicated bit of | |
808 | * impedance-matching, to be able to hand off a suitably-aligned list, | |
809 | * but still preserve the original offsets and sizes for the caller. | |
810 | */ | |
06d60728 CH |
811 | static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, |
812 | int nents, enum dma_data_direction dir, unsigned long attrs) | |
0db2e5d1 | 813 | { |
43c5bf11 | 814 | struct iommu_domain *domain = iommu_get_dma_domain(dev); |
842fe519 RM |
815 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
816 | struct iova_domain *iovad = &cookie->iovad; | |
0db2e5d1 | 817 | struct scatterlist *s, *prev = NULL; |
06d60728 | 818 | int prot = dma_info_to_prot(dir, dev_is_dma_coherent(dev), attrs); |
842fe519 | 819 | dma_addr_t iova; |
0db2e5d1 | 820 | size_t iova_len = 0; |
809eac54 | 821 | unsigned long mask = dma_get_seg_boundary(dev); |
0db2e5d1 RM |
822 | int i; |
823 | ||
06d60728 CH |
824 | if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) |
825 | iommu_dma_sync_sg_for_device(dev, sg, nents, dir); | |
826 | ||
0db2e5d1 RM |
827 | /* |
828 | * Work out how much IOVA space we need, and align the segments to | |
829 | * IOVA granules for the IOMMU driver to handle. With some clever | |
830 | * trickery we can modify the list in-place, but reversibly, by | |
809eac54 | 831 | * stashing the unaligned parts in the as-yet-unused DMA fields. |
0db2e5d1 RM |
832 | */ |
833 | for_each_sg(sg, s, nents, i) { | |
809eac54 | 834 | size_t s_iova_off = iova_offset(iovad, s->offset); |
0db2e5d1 | 835 | size_t s_length = s->length; |
809eac54 | 836 | size_t pad_len = (mask - iova_len + 1) & mask; |
0db2e5d1 | 837 | |
809eac54 | 838 | sg_dma_address(s) = s_iova_off; |
0db2e5d1 | 839 | sg_dma_len(s) = s_length; |
809eac54 RM |
840 | s->offset -= s_iova_off; |
841 | s_length = iova_align(iovad, s_length + s_iova_off); | |
0db2e5d1 RM |
842 | s->length = s_length; |
843 | ||
844 | /* | |
809eac54 RM |
845 | * Due to the alignment of our single IOVA allocation, we can |
846 | * depend on these assumptions about the segment boundary mask: | |
847 | * - If mask size >= IOVA size, then the IOVA range cannot | |
848 | * possibly fall across a boundary, so we don't care. | |
849 | * - If mask size < IOVA size, then the IOVA range must start | |
850 | * exactly on a boundary, therefore we can lay things out | |
851 | * based purely on segment lengths without needing to know | |
852 | * the actual addresses beforehand. | |
853 | * - The mask must be a power of 2, so pad_len == 0 if | |
854 | * iova_len == 0, thus we cannot dereference prev the first | |
855 | * time through here (i.e. before it has a meaningful value). | |
0db2e5d1 | 856 | */ |
809eac54 | 857 | if (pad_len && pad_len < s_length - 1) { |
0db2e5d1 RM |
858 | prev->length += pad_len; |
859 | iova_len += pad_len; | |
860 | } | |
861 | ||
862 | iova_len += s_length; | |
863 | prev = s; | |
864 | } | |
865 | ||
842fe519 | 866 | iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev); |
0db2e5d1 RM |
867 | if (!iova) |
868 | goto out_restore_sg; | |
869 | ||
870 | /* | |
871 | * We'll leave any physical concatenation to the IOMMU driver's | |
872 | * implementation - it knows better than we do. | |
873 | */ | |
842fe519 | 874 | if (iommu_map_sg(domain, iova, sg, nents, prot) < iova_len) |
0db2e5d1 RM |
875 | goto out_free_iova; |
876 | ||
842fe519 | 877 | return __finalise_sg(dev, sg, nents, iova); |
0db2e5d1 RM |
878 | |
879 | out_free_iova: | |
842fe519 | 880 | iommu_dma_free_iova(cookie, iova, iova_len); |
0db2e5d1 RM |
881 | out_restore_sg: |
882 | __invalidate_sg(sg, nents); | |
883 | return 0; | |
884 | } | |
885 | ||
06d60728 CH |
886 | static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, |
887 | int nents, enum dma_data_direction dir, unsigned long attrs) | |
0db2e5d1 | 888 | { |
842fe519 RM |
889 | dma_addr_t start, end; |
890 | struct scatterlist *tmp; | |
891 | int i; | |
06d60728 | 892 | |
1b961423 | 893 | if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) |
06d60728 CH |
894 | iommu_dma_sync_sg_for_cpu(dev, sg, nents, dir); |
895 | ||
0db2e5d1 RM |
896 | /* |
897 | * The scatterlist segments are mapped into a single | |
898 | * contiguous IOVA allocation, so this is incredibly easy. | |
899 | */ | |
842fe519 RM |
900 | start = sg_dma_address(sg); |
901 | for_each_sg(sg_next(sg), tmp, nents - 1, i) { | |
902 | if (sg_dma_len(tmp) == 0) | |
903 | break; | |
904 | sg = tmp; | |
905 | } | |
906 | end = sg_dma_address(sg) + sg_dma_len(sg); | |
b61d271e | 907 | __iommu_dma_unmap(dev, start, end - start); |
0db2e5d1 RM |
908 | } |
909 | ||
06d60728 | 910 | static dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys, |
51f8cc9e RM |
911 | size_t size, enum dma_data_direction dir, unsigned long attrs) |
912 | { | |
913 | return __iommu_dma_map(dev, phys, size, | |
b61d271e | 914 | dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO); |
51f8cc9e RM |
915 | } |
916 | ||
06d60728 | 917 | static void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle, |
51f8cc9e RM |
918 | size_t size, enum dma_data_direction dir, unsigned long attrs) |
919 | { | |
b61d271e | 920 | __iommu_dma_unmap(dev, handle, size); |
51f8cc9e RM |
921 | } |
922 | ||
8553f6e6 | 923 | static void __iommu_dma_free(struct device *dev, size_t size, void *cpu_addr) |
bcf4b9c4 RM |
924 | { |
925 | size_t alloc_size = PAGE_ALIGN(size); | |
926 | int count = alloc_size >> PAGE_SHIFT; | |
927 | struct page *page = NULL, **pages = NULL; | |
928 | ||
bcf4b9c4 | 929 | /* Non-coherent atomic allocation? Easy */ |
e6475eb0 CH |
930 | if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && |
931 | dma_free_from_pool(cpu_addr, alloc_size)) | |
bcf4b9c4 RM |
932 | return; |
933 | ||
e6475eb0 | 934 | if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) { |
bcf4b9c4 RM |
935 | /* |
936 | * If it the address is remapped, then it's either non-coherent | |
937 | * or highmem CMA, or an iommu_dma_alloc_remap() construction. | |
938 | */ | |
5cf45379 | 939 | pages = dma_common_find_pages(cpu_addr); |
bcf4b9c4 RM |
940 | if (!pages) |
941 | page = vmalloc_to_page(cpu_addr); | |
51231740 | 942 | dma_common_free_remap(cpu_addr, alloc_size); |
bcf4b9c4 RM |
943 | } else { |
944 | /* Lowmem means a coherent atomic or CMA allocation */ | |
945 | page = virt_to_page(cpu_addr); | |
946 | } | |
947 | ||
948 | if (pages) | |
949 | __iommu_dma_free_pages(pages, count); | |
591fcf3b NC |
950 | if (page) |
951 | dma_free_contiguous(dev, page, alloc_size); | |
bcf4b9c4 RM |
952 | } |
953 | ||
8553f6e6 RM |
954 | static void iommu_dma_free(struct device *dev, size_t size, void *cpu_addr, |
955 | dma_addr_t handle, unsigned long attrs) | |
956 | { | |
957 | __iommu_dma_unmap(dev, handle, size); | |
958 | __iommu_dma_free(dev, size, cpu_addr); | |
959 | } | |
960 | ||
ee1ef05d CH |
961 | static void *iommu_dma_alloc_pages(struct device *dev, size_t size, |
962 | struct page **pagep, gfp_t gfp, unsigned long attrs) | |
06d60728 CH |
963 | { |
964 | bool coherent = dev_is_dma_coherent(dev); | |
9ad5d6ed | 965 | size_t alloc_size = PAGE_ALIGN(size); |
90ae409f | 966 | int node = dev_to_node(dev); |
9a4ab94a | 967 | struct page *page = NULL; |
9ad5d6ed | 968 | void *cpu_addr; |
06d60728 | 969 | |
591fcf3b | 970 | page = dma_alloc_contiguous(dev, alloc_size, gfp); |
90ae409f CH |
971 | if (!page) |
972 | page = alloc_pages_node(node, gfp, get_order(alloc_size)); | |
072bebc0 RM |
973 | if (!page) |
974 | return NULL; | |
975 | ||
e6475eb0 | 976 | if (IS_ENABLED(CONFIG_DMA_REMAP) && (!coherent || PageHighMem(page))) { |
33dcb37c | 977 | pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs); |
072bebc0 | 978 | |
9ad5d6ed | 979 | cpu_addr = dma_common_contiguous_remap(page, alloc_size, |
51231740 | 980 | prot, __builtin_return_address(0)); |
9ad5d6ed | 981 | if (!cpu_addr) |
ee1ef05d | 982 | goto out_free_pages; |
8680aa5a RM |
983 | |
984 | if (!coherent) | |
9ad5d6ed | 985 | arch_dma_prep_coherent(page, size); |
8680aa5a | 986 | } else { |
9ad5d6ed | 987 | cpu_addr = page_address(page); |
8680aa5a | 988 | } |
ee1ef05d CH |
989 | |
990 | *pagep = page; | |
9ad5d6ed RM |
991 | memset(cpu_addr, 0, alloc_size); |
992 | return cpu_addr; | |
072bebc0 | 993 | out_free_pages: |
591fcf3b | 994 | dma_free_contiguous(dev, page, alloc_size); |
072bebc0 | 995 | return NULL; |
06d60728 CH |
996 | } |
997 | ||
ee1ef05d CH |
998 | static void *iommu_dma_alloc(struct device *dev, size_t size, |
999 | dma_addr_t *handle, gfp_t gfp, unsigned long attrs) | |
1000 | { | |
1001 | bool coherent = dev_is_dma_coherent(dev); | |
1002 | int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs); | |
1003 | struct page *page = NULL; | |
1004 | void *cpu_addr; | |
1005 | ||
1006 | gfp |= __GFP_ZERO; | |
1007 | ||
e6475eb0 | 1008 | if (IS_ENABLED(CONFIG_DMA_REMAP) && gfpflags_allow_blocking(gfp) && |
ee1ef05d CH |
1009 | !(attrs & DMA_ATTR_FORCE_CONTIGUOUS)) |
1010 | return iommu_dma_alloc_remap(dev, size, handle, gfp, attrs); | |
1011 | ||
e6475eb0 CH |
1012 | if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && |
1013 | !gfpflags_allow_blocking(gfp) && !coherent) | |
ee1ef05d CH |
1014 | cpu_addr = dma_alloc_from_pool(PAGE_ALIGN(size), &page, gfp); |
1015 | else | |
1016 | cpu_addr = iommu_dma_alloc_pages(dev, size, &page, gfp, attrs); | |
1017 | if (!cpu_addr) | |
1018 | return NULL; | |
1019 | ||
1020 | *handle = __iommu_dma_map(dev, page_to_phys(page), size, ioprot); | |
1021 | if (*handle == DMA_MAPPING_ERROR) { | |
1022 | __iommu_dma_free(dev, size, cpu_addr); | |
1023 | return NULL; | |
1024 | } | |
1025 | ||
1026 | return cpu_addr; | |
1027 | } | |
1028 | ||
06d60728 CH |
1029 | static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma, |
1030 | void *cpu_addr, dma_addr_t dma_addr, size_t size, | |
1031 | unsigned long attrs) | |
1032 | { | |
1033 | unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; | |
efd9f10b | 1034 | unsigned long pfn, off = vma->vm_pgoff; |
06d60728 CH |
1035 | int ret; |
1036 | ||
33dcb37c | 1037 | vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs); |
06d60728 CH |
1038 | |
1039 | if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) | |
1040 | return ret; | |
1041 | ||
1042 | if (off >= nr_pages || vma_pages(vma) > nr_pages - off) | |
1043 | return -ENXIO; | |
1044 | ||
e6475eb0 | 1045 | if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) { |
5cf45379 | 1046 | struct page **pages = dma_common_find_pages(cpu_addr); |
06d60728 | 1047 | |
efd9f10b CH |
1048 | if (pages) |
1049 | return __iommu_dma_mmap(pages, size, vma); | |
1050 | pfn = vmalloc_to_pfn(cpu_addr); | |
1051 | } else { | |
1052 | pfn = page_to_pfn(virt_to_page(cpu_addr)); | |
06d60728 CH |
1053 | } |
1054 | ||
efd9f10b CH |
1055 | return remap_pfn_range(vma, vma->vm_start, pfn + off, |
1056 | vma->vm_end - vma->vm_start, | |
1057 | vma->vm_page_prot); | |
06d60728 CH |
1058 | } |
1059 | ||
06d60728 CH |
1060 | static int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt, |
1061 | void *cpu_addr, dma_addr_t dma_addr, size_t size, | |
1062 | unsigned long attrs) | |
1063 | { | |
3fb3378b CH |
1064 | struct page *page; |
1065 | int ret; | |
06d60728 | 1066 | |
e6475eb0 | 1067 | if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) { |
5cf45379 | 1068 | struct page **pages = dma_common_find_pages(cpu_addr); |
06d60728 | 1069 | |
3fb3378b CH |
1070 | if (pages) { |
1071 | return sg_alloc_table_from_pages(sgt, pages, | |
1072 | PAGE_ALIGN(size) >> PAGE_SHIFT, | |
1073 | 0, size, GFP_KERNEL); | |
1074 | } | |
1075 | ||
1076 | page = vmalloc_to_page(cpu_addr); | |
1077 | } else { | |
1078 | page = virt_to_page(cpu_addr); | |
06d60728 CH |
1079 | } |
1080 | ||
3fb3378b CH |
1081 | ret = sg_alloc_table(sgt, 1, GFP_KERNEL); |
1082 | if (!ret) | |
1083 | sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); | |
1084 | return ret; | |
06d60728 CH |
1085 | } |
1086 | ||
158a6d3c YS |
1087 | static unsigned long iommu_dma_get_merge_boundary(struct device *dev) |
1088 | { | |
1089 | struct iommu_domain *domain = iommu_get_dma_domain(dev); | |
1090 | ||
1091 | return (1UL << __ffs(domain->pgsize_bitmap)) - 1; | |
1092 | } | |
1093 | ||
06d60728 CH |
1094 | static const struct dma_map_ops iommu_dma_ops = { |
1095 | .alloc = iommu_dma_alloc, | |
1096 | .free = iommu_dma_free, | |
1097 | .mmap = iommu_dma_mmap, | |
1098 | .get_sgtable = iommu_dma_get_sgtable, | |
1099 | .map_page = iommu_dma_map_page, | |
1100 | .unmap_page = iommu_dma_unmap_page, | |
1101 | .map_sg = iommu_dma_map_sg, | |
1102 | .unmap_sg = iommu_dma_unmap_sg, | |
1103 | .sync_single_for_cpu = iommu_dma_sync_single_for_cpu, | |
1104 | .sync_single_for_device = iommu_dma_sync_single_for_device, | |
1105 | .sync_sg_for_cpu = iommu_dma_sync_sg_for_cpu, | |
1106 | .sync_sg_for_device = iommu_dma_sync_sg_for_device, | |
1107 | .map_resource = iommu_dma_map_resource, | |
1108 | .unmap_resource = iommu_dma_unmap_resource, | |
158a6d3c | 1109 | .get_merge_boundary = iommu_dma_get_merge_boundary, |
06d60728 CH |
1110 | }; |
1111 | ||
1112 | /* | |
1113 | * The IOMMU core code allocates the default DMA domain, which the underlying | |
1114 | * IOMMU driver needs to support via the dma-iommu layer. | |
1115 | */ | |
1116 | void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size) | |
1117 | { | |
1118 | struct iommu_domain *domain = iommu_get_domain_for_dev(dev); | |
1119 | ||
1120 | if (!domain) | |
1121 | goto out_err; | |
1122 | ||
1123 | /* | |
1124 | * The IOMMU core code allocates the default DMA domain, which the | |
1125 | * underlying IOMMU driver needs to support via the dma-iommu layer. | |
1126 | */ | |
1127 | if (domain->type == IOMMU_DOMAIN_DMA) { | |
1128 | if (iommu_dma_init_domain(domain, dma_base, size, dev)) | |
1129 | goto out_err; | |
1130 | dev->dma_ops = &iommu_dma_ops; | |
1131 | } | |
1132 | ||
1133 | return; | |
1134 | out_err: | |
1135 | pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n", | |
1136 | dev_name(dev)); | |
51f8cc9e RM |
1137 | } |
1138 | ||
44bb7e24 RM |
1139 | static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev, |
1140 | phys_addr_t msi_addr, struct iommu_domain *domain) | |
1141 | { | |
1142 | struct iommu_dma_cookie *cookie = domain->iova_cookie; | |
1143 | struct iommu_dma_msi_page *msi_page; | |
842fe519 | 1144 | dma_addr_t iova; |
44bb7e24 | 1145 | int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; |
fdbe574e | 1146 | size_t size = cookie_msi_granule(cookie); |
44bb7e24 | 1147 | |
fdbe574e | 1148 | msi_addr &= ~(phys_addr_t)(size - 1); |
44bb7e24 RM |
1149 | list_for_each_entry(msi_page, &cookie->msi_page_list, list) |
1150 | if (msi_page->phys == msi_addr) | |
1151 | return msi_page; | |
1152 | ||
1153 | msi_page = kzalloc(sizeof(*msi_page), GFP_ATOMIC); | |
1154 | if (!msi_page) | |
1155 | return NULL; | |
1156 | ||
8af23fad RM |
1157 | iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev); |
1158 | if (!iova) | |
a44e6657 | 1159 | goto out_free_page; |
44bb7e24 | 1160 | |
8af23fad RM |
1161 | if (iommu_map(domain, iova, msi_addr, size, prot)) |
1162 | goto out_free_iova; | |
1163 | ||
44bb7e24 | 1164 | INIT_LIST_HEAD(&msi_page->list); |
a44e6657 RM |
1165 | msi_page->phys = msi_addr; |
1166 | msi_page->iova = iova; | |
44bb7e24 RM |
1167 | list_add(&msi_page->list, &cookie->msi_page_list); |
1168 | return msi_page; | |
1169 | ||
8af23fad RM |
1170 | out_free_iova: |
1171 | iommu_dma_free_iova(cookie, iova, size); | |
44bb7e24 RM |
1172 | out_free_page: |
1173 | kfree(msi_page); | |
1174 | return NULL; | |
1175 | } | |
1176 | ||
ece6e6f0 | 1177 | int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr) |
44bb7e24 | 1178 | { |
ece6e6f0 | 1179 | struct device *dev = msi_desc_to_dev(desc); |
44bb7e24 RM |
1180 | struct iommu_domain *domain = iommu_get_domain_for_dev(dev); |
1181 | struct iommu_dma_cookie *cookie; | |
1182 | struct iommu_dma_msi_page *msi_page; | |
44bb7e24 RM |
1183 | unsigned long flags; |
1184 | ||
ece6e6f0 JG |
1185 | if (!domain || !domain->iova_cookie) { |
1186 | desc->iommu_cookie = NULL; | |
1187 | return 0; | |
1188 | } | |
44bb7e24 RM |
1189 | |
1190 | cookie = domain->iova_cookie; | |
1191 | ||
1192 | /* | |
1193 | * We disable IRQs to rule out a possible inversion against | |
1194 | * irq_desc_lock if, say, someone tries to retarget the affinity | |
1195 | * of an MSI from within an IPI handler. | |
1196 | */ | |
1197 | spin_lock_irqsave(&cookie->msi_lock, flags); | |
1198 | msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain); | |
1199 | spin_unlock_irqrestore(&cookie->msi_lock, flags); | |
1200 | ||
ece6e6f0 JG |
1201 | msi_desc_set_iommu_cookie(desc, msi_page); |
1202 | ||
1203 | if (!msi_page) | |
1204 | return -ENOMEM; | |
1205 | return 0; | |
1206 | } | |
1207 | ||
1208 | void iommu_dma_compose_msi_msg(struct msi_desc *desc, | |
1209 | struct msi_msg *msg) | |
1210 | { | |
1211 | struct device *dev = msi_desc_to_dev(desc); | |
1212 | const struct iommu_domain *domain = iommu_get_domain_for_dev(dev); | |
1213 | const struct iommu_dma_msi_page *msi_page; | |
1214 | ||
1215 | msi_page = msi_desc_get_iommu_cookie(desc); | |
1216 | ||
1217 | if (!domain || !domain->iova_cookie || WARN_ON(!msi_page)) | |
1218 | return; | |
1219 | ||
1220 | msg->address_hi = upper_32_bits(msi_page->iova); | |
1221 | msg->address_lo &= cookie_msi_granule(domain->iova_cookie) - 1; | |
1222 | msg->address_lo += lower_32_bits(msi_page->iova); | |
44bb7e24 | 1223 | } |
06d60728 CH |
1224 | |
1225 | static int iommu_dma_init(void) | |
1226 | { | |
1227 | return iova_cache_get(); | |
44bb7e24 | 1228 | } |
06d60728 | 1229 | arch_initcall(iommu_dma_init); |