Commit | Line | Data |
---|---|---|
0db2e5d1 RM |
1 | /* |
2 | * A fairly generic DMA-API to IOMMU-API glue layer. | |
3 | * | |
4 | * Copyright (C) 2014-2015 ARM Ltd. | |
5 | * | |
6 | * based in part on arch/arm/mm/dma-mapping.c: | |
7 | * Copyright (C) 2000-2004 Russell King | |
8 | * | |
9 | * This program is free software; you can redistribute it and/or modify | |
10 | * it under the terms of the GNU General Public License version 2 as | |
11 | * published by the Free Software Foundation. | |
12 | * | |
13 | * This program is distributed in the hope that it will be useful, | |
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
16 | * GNU General Public License for more details. | |
17 | * | |
18 | * You should have received a copy of the GNU General Public License | |
19 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
20 | */ | |
21 | ||
f51dc892 | 22 | #include <linux/acpi_iort.h> |
0db2e5d1 | 23 | #include <linux/device.h> |
06d60728 | 24 | #include <linux/dma-contiguous.h> |
0db2e5d1 | 25 | #include <linux/dma-iommu.h> |
af751d43 | 26 | #include <linux/dma-noncoherent.h> |
5b11e9cd | 27 | #include <linux/gfp.h> |
0db2e5d1 RM |
28 | #include <linux/huge_mm.h> |
29 | #include <linux/iommu.h> | |
30 | #include <linux/iova.h> | |
44bb7e24 | 31 | #include <linux/irq.h> |
0db2e5d1 | 32 | #include <linux/mm.h> |
fade1ec0 | 33 | #include <linux/pci.h> |
5b11e9cd RM |
34 | #include <linux/scatterlist.h> |
35 | #include <linux/vmalloc.h> | |
0db2e5d1 | 36 | |
44bb7e24 RM |
37 | struct iommu_dma_msi_page { |
38 | struct list_head list; | |
39 | dma_addr_t iova; | |
40 | phys_addr_t phys; | |
41 | }; | |
42 | ||
fdbe574e RM |
43 | enum iommu_dma_cookie_type { |
44 | IOMMU_DMA_IOVA_COOKIE, | |
45 | IOMMU_DMA_MSI_COOKIE, | |
46 | }; | |
47 | ||
44bb7e24 | 48 | struct iommu_dma_cookie { |
fdbe574e RM |
49 | enum iommu_dma_cookie_type type; |
50 | union { | |
51 | /* Full allocator for IOMMU_DMA_IOVA_COOKIE */ | |
52 | struct iova_domain iovad; | |
53 | /* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */ | |
54 | dma_addr_t msi_iova; | |
55 | }; | |
56 | struct list_head msi_page_list; | |
57 | spinlock_t msi_lock; | |
2da274cd ZL |
58 | |
59 | /* Domain for flush queue callback; NULL if flush queue not in use */ | |
60 | struct iommu_domain *fq_domain; | |
44bb7e24 RM |
61 | }; |
62 | ||
fdbe574e RM |
63 | static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie) |
64 | { | |
65 | if (cookie->type == IOMMU_DMA_IOVA_COOKIE) | |
66 | return cookie->iovad.granule; | |
67 | return PAGE_SIZE; | |
68 | } | |
69 | ||
fdbe574e RM |
70 | static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type) |
71 | { | |
72 | struct iommu_dma_cookie *cookie; | |
73 | ||
74 | cookie = kzalloc(sizeof(*cookie), GFP_KERNEL); | |
75 | if (cookie) { | |
76 | spin_lock_init(&cookie->msi_lock); | |
77 | INIT_LIST_HEAD(&cookie->msi_page_list); | |
78 | cookie->type = type; | |
79 | } | |
80 | return cookie; | |
44bb7e24 RM |
81 | } |
82 | ||
0db2e5d1 RM |
83 | /** |
84 | * iommu_get_dma_cookie - Acquire DMA-API resources for a domain | |
85 | * @domain: IOMMU domain to prepare for DMA-API usage | |
86 | * | |
87 | * IOMMU drivers should normally call this from their domain_alloc | |
88 | * callback when domain->type == IOMMU_DOMAIN_DMA. | |
89 | */ | |
90 | int iommu_get_dma_cookie(struct iommu_domain *domain) | |
fdbe574e RM |
91 | { |
92 | if (domain->iova_cookie) | |
93 | return -EEXIST; | |
94 | ||
95 | domain->iova_cookie = cookie_alloc(IOMMU_DMA_IOVA_COOKIE); | |
96 | if (!domain->iova_cookie) | |
97 | return -ENOMEM; | |
98 | ||
99 | return 0; | |
100 | } | |
101 | EXPORT_SYMBOL(iommu_get_dma_cookie); | |
102 | ||
103 | /** | |
104 | * iommu_get_msi_cookie - Acquire just MSI remapping resources | |
105 | * @domain: IOMMU domain to prepare | |
106 | * @base: Start address of IOVA region for MSI mappings | |
107 | * | |
108 | * Users who manage their own IOVA allocation and do not want DMA API support, | |
109 | * but would still like to take advantage of automatic MSI remapping, can use | |
110 | * this to initialise their own domain appropriately. Users should reserve a | |
111 | * contiguous IOVA region, starting at @base, large enough to accommodate the | |
112 | * number of PAGE_SIZE mappings necessary to cover every MSI doorbell address | |
113 | * used by the devices attached to @domain. | |
114 | */ | |
115 | int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base) | |
0db2e5d1 | 116 | { |
44bb7e24 | 117 | struct iommu_dma_cookie *cookie; |
0db2e5d1 | 118 | |
fdbe574e RM |
119 | if (domain->type != IOMMU_DOMAIN_UNMANAGED) |
120 | return -EINVAL; | |
121 | ||
0db2e5d1 RM |
122 | if (domain->iova_cookie) |
123 | return -EEXIST; | |
124 | ||
fdbe574e | 125 | cookie = cookie_alloc(IOMMU_DMA_MSI_COOKIE); |
44bb7e24 RM |
126 | if (!cookie) |
127 | return -ENOMEM; | |
0db2e5d1 | 128 | |
fdbe574e | 129 | cookie->msi_iova = base; |
44bb7e24 RM |
130 | domain->iova_cookie = cookie; |
131 | return 0; | |
0db2e5d1 | 132 | } |
fdbe574e | 133 | EXPORT_SYMBOL(iommu_get_msi_cookie); |
0db2e5d1 RM |
134 | |
135 | /** | |
136 | * iommu_put_dma_cookie - Release a domain's DMA mapping resources | |
fdbe574e RM |
137 | * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() or |
138 | * iommu_get_msi_cookie() | |
0db2e5d1 RM |
139 | * |
140 | * IOMMU drivers should normally call this from their domain_free callback. | |
141 | */ | |
142 | void iommu_put_dma_cookie(struct iommu_domain *domain) | |
143 | { | |
44bb7e24 RM |
144 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
145 | struct iommu_dma_msi_page *msi, *tmp; | |
0db2e5d1 | 146 | |
44bb7e24 | 147 | if (!cookie) |
0db2e5d1 RM |
148 | return; |
149 | ||
fdbe574e | 150 | if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule) |
44bb7e24 RM |
151 | put_iova_domain(&cookie->iovad); |
152 | ||
153 | list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) { | |
154 | list_del(&msi->list); | |
155 | kfree(msi); | |
156 | } | |
157 | kfree(cookie); | |
0db2e5d1 RM |
158 | domain->iova_cookie = NULL; |
159 | } | |
160 | EXPORT_SYMBOL(iommu_put_dma_cookie); | |
161 | ||
273df963 RM |
162 | /** |
163 | * iommu_dma_get_resv_regions - Reserved region driver helper | |
164 | * @dev: Device from iommu_get_resv_regions() | |
165 | * @list: Reserved region list from iommu_get_resv_regions() | |
166 | * | |
167 | * IOMMU drivers can use this to implement their .get_resv_regions callback | |
cd2c9fcf SK |
168 | * for general non-IOMMU-specific reservations. Currently, this covers GICv3 |
169 | * ITS region reservation on ACPI based ARM platforms that may require HW MSI | |
170 | * reservation. | |
273df963 RM |
171 | */ |
172 | void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list) | |
fade1ec0 | 173 | { |
fade1ec0 | 174 | |
98cc4f71 | 175 | if (!is_of_node(dev_iommu_fwspec_get(dev)->iommu_fwnode)) |
cd2c9fcf | 176 | iort_iommu_msi_get_resv_regions(dev, list); |
273df963 | 177 | |
fade1ec0 | 178 | } |
273df963 | 179 | EXPORT_SYMBOL(iommu_dma_get_resv_regions); |
fade1ec0 | 180 | |
7c1b058c RM |
181 | static int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie, |
182 | phys_addr_t start, phys_addr_t end) | |
183 | { | |
184 | struct iova_domain *iovad = &cookie->iovad; | |
185 | struct iommu_dma_msi_page *msi_page; | |
186 | int i, num_pages; | |
187 | ||
188 | start -= iova_offset(iovad, start); | |
189 | num_pages = iova_align(iovad, end - start) >> iova_shift(iovad); | |
190 | ||
191 | msi_page = kcalloc(num_pages, sizeof(*msi_page), GFP_KERNEL); | |
192 | if (!msi_page) | |
193 | return -ENOMEM; | |
194 | ||
195 | for (i = 0; i < num_pages; i++) { | |
196 | msi_page[i].phys = start; | |
197 | msi_page[i].iova = start; | |
198 | INIT_LIST_HEAD(&msi_page[i].list); | |
199 | list_add(&msi_page[i].list, &cookie->msi_page_list); | |
200 | start += iovad->granule; | |
201 | } | |
202 | ||
203 | return 0; | |
204 | } | |
205 | ||
aadad097 | 206 | static int iova_reserve_pci_windows(struct pci_dev *dev, |
cd2c9fcf SK |
207 | struct iova_domain *iovad) |
208 | { | |
209 | struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus); | |
210 | struct resource_entry *window; | |
211 | unsigned long lo, hi; | |
aadad097 | 212 | phys_addr_t start = 0, end; |
cd2c9fcf SK |
213 | |
214 | resource_list_for_each_entry(window, &bridge->windows) { | |
215 | if (resource_type(window->res) != IORESOURCE_MEM) | |
216 | continue; | |
217 | ||
218 | lo = iova_pfn(iovad, window->res->start - window->offset); | |
219 | hi = iova_pfn(iovad, window->res->end - window->offset); | |
220 | reserve_iova(iovad, lo, hi); | |
221 | } | |
aadad097 SM |
222 | |
223 | /* Get reserved DMA windows from host bridge */ | |
224 | resource_list_for_each_entry(window, &bridge->dma_ranges) { | |
225 | end = window->res->start - window->offset; | |
226 | resv_iova: | |
227 | if (end > start) { | |
228 | lo = iova_pfn(iovad, start); | |
229 | hi = iova_pfn(iovad, end); | |
230 | reserve_iova(iovad, lo, hi); | |
231 | } else { | |
232 | /* dma_ranges list should be sorted */ | |
233 | dev_err(&dev->dev, "Failed to reserve IOVA\n"); | |
234 | return -EINVAL; | |
235 | } | |
236 | ||
237 | start = window->res->end - window->offset + 1; | |
238 | /* If window is last entry */ | |
239 | if (window->node.next == &bridge->dma_ranges && | |
240 | end != ~(dma_addr_t)0) { | |
241 | end = ~(dma_addr_t)0; | |
242 | goto resv_iova; | |
243 | } | |
244 | } | |
245 | ||
246 | return 0; | |
cd2c9fcf SK |
247 | } |
248 | ||
7c1b058c RM |
249 | static int iova_reserve_iommu_regions(struct device *dev, |
250 | struct iommu_domain *domain) | |
251 | { | |
252 | struct iommu_dma_cookie *cookie = domain->iova_cookie; | |
253 | struct iova_domain *iovad = &cookie->iovad; | |
254 | struct iommu_resv_region *region; | |
255 | LIST_HEAD(resv_regions); | |
256 | int ret = 0; | |
257 | ||
aadad097 SM |
258 | if (dev_is_pci(dev)) { |
259 | ret = iova_reserve_pci_windows(to_pci_dev(dev), iovad); | |
260 | if (ret) | |
261 | return ret; | |
262 | } | |
cd2c9fcf | 263 | |
7c1b058c RM |
264 | iommu_get_resv_regions(dev, &resv_regions); |
265 | list_for_each_entry(region, &resv_regions, list) { | |
266 | unsigned long lo, hi; | |
267 | ||
268 | /* We ARE the software that manages these! */ | |
269 | if (region->type == IOMMU_RESV_SW_MSI) | |
270 | continue; | |
271 | ||
272 | lo = iova_pfn(iovad, region->start); | |
273 | hi = iova_pfn(iovad, region->start + region->length - 1); | |
274 | reserve_iova(iovad, lo, hi); | |
275 | ||
276 | if (region->type == IOMMU_RESV_MSI) | |
277 | ret = cookie_init_hw_msi_region(cookie, region->start, | |
278 | region->start + region->length); | |
279 | if (ret) | |
280 | break; | |
281 | } | |
282 | iommu_put_resv_regions(dev, &resv_regions); | |
283 | ||
284 | return ret; | |
285 | } | |
286 | ||
2da274cd ZL |
287 | static void iommu_dma_flush_iotlb_all(struct iova_domain *iovad) |
288 | { | |
289 | struct iommu_dma_cookie *cookie; | |
290 | struct iommu_domain *domain; | |
291 | ||
292 | cookie = container_of(iovad, struct iommu_dma_cookie, iovad); | |
293 | domain = cookie->fq_domain; | |
294 | /* | |
295 | * The IOMMU driver supporting DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE | |
296 | * implies that ops->flush_iotlb_all must be non-NULL. | |
297 | */ | |
298 | domain->ops->flush_iotlb_all(domain); | |
299 | } | |
300 | ||
0db2e5d1 RM |
301 | /** |
302 | * iommu_dma_init_domain - Initialise a DMA mapping domain | |
303 | * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() | |
304 | * @base: IOVA at which the mappable address space starts | |
305 | * @size: Size of IOVA space | |
fade1ec0 | 306 | * @dev: Device the domain is being initialised for |
0db2e5d1 RM |
307 | * |
308 | * @base and @size should be exact multiples of IOMMU page granularity to | |
309 | * avoid rounding surprises. If necessary, we reserve the page at address 0 | |
310 | * to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but | |
311 | * any change which could make prior IOVAs invalid will fail. | |
312 | */ | |
06d60728 | 313 | static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, |
fade1ec0 | 314 | u64 size, struct device *dev) |
0db2e5d1 | 315 | { |
fdbe574e RM |
316 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
317 | struct iova_domain *iovad = &cookie->iovad; | |
c61a4633 | 318 | unsigned long order, base_pfn; |
2da274cd | 319 | int attr; |
0db2e5d1 | 320 | |
fdbe574e RM |
321 | if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE) |
322 | return -EINVAL; | |
0db2e5d1 RM |
323 | |
324 | /* Use the smallest supported page size for IOVA granularity */ | |
d16e0faa | 325 | order = __ffs(domain->pgsize_bitmap); |
0db2e5d1 | 326 | base_pfn = max_t(unsigned long, 1, base >> order); |
0db2e5d1 RM |
327 | |
328 | /* Check the domain allows at least some access to the device... */ | |
329 | if (domain->geometry.force_aperture) { | |
330 | if (base > domain->geometry.aperture_end || | |
331 | base + size <= domain->geometry.aperture_start) { | |
332 | pr_warn("specified DMA range outside IOMMU capability\n"); | |
333 | return -EFAULT; | |
334 | } | |
335 | /* ...then finally give it a kicking to make sure it fits */ | |
336 | base_pfn = max_t(unsigned long, base_pfn, | |
337 | domain->geometry.aperture_start >> order); | |
0db2e5d1 RM |
338 | } |
339 | ||
f51d7bb7 | 340 | /* start_pfn is always nonzero for an already-initialised domain */ |
0db2e5d1 RM |
341 | if (iovad->start_pfn) { |
342 | if (1UL << order != iovad->granule || | |
f51d7bb7 | 343 | base_pfn != iovad->start_pfn) { |
0db2e5d1 RM |
344 | pr_warn("Incompatible range for DMA domain\n"); |
345 | return -EFAULT; | |
346 | } | |
7c1b058c RM |
347 | |
348 | return 0; | |
0db2e5d1 | 349 | } |
7c1b058c | 350 | |
aa3ac946 | 351 | init_iova_domain(iovad, 1UL << order, base_pfn); |
2da274cd ZL |
352 | |
353 | if (!cookie->fq_domain && !iommu_domain_get_attr(domain, | |
354 | DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, &attr) && attr) { | |
355 | cookie->fq_domain = domain; | |
356 | init_iova_flush_queue(iovad, iommu_dma_flush_iotlb_all, NULL); | |
357 | } | |
358 | ||
7c1b058c RM |
359 | if (!dev) |
360 | return 0; | |
361 | ||
362 | return iova_reserve_iommu_regions(dev, domain); | |
0db2e5d1 | 363 | } |
0db2e5d1 RM |
364 | |
365 | /** | |
737c85ca MH |
366 | * dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API |
367 | * page flags. | |
0db2e5d1 RM |
368 | * @dir: Direction of DMA transfer |
369 | * @coherent: Is the DMA master cache-coherent? | |
737c85ca | 370 | * @attrs: DMA attributes for the mapping |
0db2e5d1 RM |
371 | * |
372 | * Return: corresponding IOMMU API page protection flags | |
373 | */ | |
06d60728 | 374 | static int dma_info_to_prot(enum dma_data_direction dir, bool coherent, |
737c85ca | 375 | unsigned long attrs) |
0db2e5d1 RM |
376 | { |
377 | int prot = coherent ? IOMMU_CACHE : 0; | |
378 | ||
737c85ca MH |
379 | if (attrs & DMA_ATTR_PRIVILEGED) |
380 | prot |= IOMMU_PRIV; | |
381 | ||
0db2e5d1 RM |
382 | switch (dir) { |
383 | case DMA_BIDIRECTIONAL: | |
384 | return prot | IOMMU_READ | IOMMU_WRITE; | |
385 | case DMA_TO_DEVICE: | |
386 | return prot | IOMMU_READ; | |
387 | case DMA_FROM_DEVICE: | |
388 | return prot | IOMMU_WRITE; | |
389 | default: | |
390 | return 0; | |
391 | } | |
392 | } | |
393 | ||
842fe519 RM |
394 | static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain, |
395 | size_t size, dma_addr_t dma_limit, struct device *dev) | |
0db2e5d1 | 396 | { |
a44e6657 RM |
397 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
398 | struct iova_domain *iovad = &cookie->iovad; | |
bb65a64c | 399 | unsigned long shift, iova_len, iova = 0; |
0db2e5d1 | 400 | |
a44e6657 RM |
401 | if (cookie->type == IOMMU_DMA_MSI_COOKIE) { |
402 | cookie->msi_iova += size; | |
403 | return cookie->msi_iova - size; | |
404 | } | |
405 | ||
406 | shift = iova_shift(iovad); | |
407 | iova_len = size >> shift; | |
bb65a64c RM |
408 | /* |
409 | * Freeing non-power-of-two-sized allocations back into the IOVA caches | |
410 | * will come back to bite us badly, so we have to waste a bit of space | |
411 | * rounding up anything cacheable to make sure that can't happen. The | |
412 | * order of the unadjusted size will still match upon freeing. | |
413 | */ | |
414 | if (iova_len < (1 << (IOVA_RANGE_CACHE_MAX_SIZE - 1))) | |
415 | iova_len = roundup_pow_of_two(iova_len); | |
a44e6657 | 416 | |
03bfdc31 RM |
417 | if (dev->bus_dma_mask) |
418 | dma_limit &= dev->bus_dma_mask; | |
419 | ||
c987ff0d RM |
420 | if (domain->geometry.force_aperture) |
421 | dma_limit = min(dma_limit, domain->geometry.aperture_end); | |
122fac03 RM |
422 | |
423 | /* Try to get PCI devices a SAC address */ | |
424 | if (dma_limit > DMA_BIT_MASK(32) && dev_is_pci(dev)) | |
538d5b33 TN |
425 | iova = alloc_iova_fast(iovad, iova_len, |
426 | DMA_BIT_MASK(32) >> shift, false); | |
bb65a64c | 427 | |
122fac03 | 428 | if (!iova) |
538d5b33 TN |
429 | iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift, |
430 | true); | |
122fac03 | 431 | |
bb65a64c | 432 | return (dma_addr_t)iova << shift; |
0db2e5d1 RM |
433 | } |
434 | ||
842fe519 RM |
435 | static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie, |
436 | dma_addr_t iova, size_t size) | |
0db2e5d1 | 437 | { |
842fe519 | 438 | struct iova_domain *iovad = &cookie->iovad; |
0db2e5d1 | 439 | |
a44e6657 | 440 | /* The MSI case is only ever cleaning up its most recent allocation */ |
bb65a64c | 441 | if (cookie->type == IOMMU_DMA_MSI_COOKIE) |
a44e6657 | 442 | cookie->msi_iova -= size; |
2da274cd ZL |
443 | else if (cookie->fq_domain) /* non-strict mode */ |
444 | queue_iova(iovad, iova_pfn(iovad, iova), | |
445 | size >> iova_shift(iovad), 0); | |
bb65a64c | 446 | else |
1cc896ed RM |
447 | free_iova_fast(iovad, iova_pfn(iovad, iova), |
448 | size >> iova_shift(iovad)); | |
842fe519 RM |
449 | } |
450 | ||
451 | static void __iommu_dma_unmap(struct iommu_domain *domain, dma_addr_t dma_addr, | |
452 | size_t size) | |
453 | { | |
a44e6657 RM |
454 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
455 | struct iova_domain *iovad = &cookie->iovad; | |
842fe519 RM |
456 | size_t iova_off = iova_offset(iovad, dma_addr); |
457 | ||
458 | dma_addr -= iova_off; | |
459 | size = iova_align(iovad, size + iova_off); | |
460 | ||
2da274cd ZL |
461 | WARN_ON(iommu_unmap_fast(domain, dma_addr, size) != size); |
462 | if (!cookie->fq_domain) | |
463 | iommu_tlb_sync(domain); | |
a44e6657 | 464 | iommu_dma_free_iova(cookie, dma_addr, size); |
0db2e5d1 RM |
465 | } |
466 | ||
92aec09c CH |
467 | static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys, |
468 | size_t size, int prot, struct iommu_domain *domain) | |
469 | { | |
470 | struct iommu_dma_cookie *cookie = domain->iova_cookie; | |
471 | size_t iova_off = 0; | |
472 | dma_addr_t iova; | |
473 | ||
474 | if (cookie->type == IOMMU_DMA_IOVA_COOKIE) { | |
475 | iova_off = iova_offset(&cookie->iovad, phys); | |
476 | size = iova_align(&cookie->iovad, size + iova_off); | |
477 | } | |
478 | ||
479 | iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev); | |
480 | if (!iova) | |
481 | return DMA_MAPPING_ERROR; | |
482 | ||
483 | if (iommu_map(domain, iova, phys - iova_off, size, prot)) { | |
484 | iommu_dma_free_iova(cookie, iova, size); | |
485 | return DMA_MAPPING_ERROR; | |
486 | } | |
487 | return iova + iova_off; | |
488 | } | |
489 | ||
0db2e5d1 RM |
490 | static void __iommu_dma_free_pages(struct page **pages, int count) |
491 | { | |
492 | while (count--) | |
493 | __free_page(pages[count]); | |
494 | kvfree(pages); | |
495 | } | |
496 | ||
c4b17afb GK |
497 | static struct page **__iommu_dma_alloc_pages(struct device *dev, |
498 | unsigned int count, unsigned long order_mask, gfp_t gfp) | |
0db2e5d1 RM |
499 | { |
500 | struct page **pages; | |
c4b17afb | 501 | unsigned int i = 0, nid = dev_to_node(dev); |
3b6b7e19 RM |
502 | |
503 | order_mask &= (2U << MAX_ORDER) - 1; | |
504 | if (!order_mask) | |
505 | return NULL; | |
0db2e5d1 | 506 | |
c4b17afb | 507 | pages = kvzalloc(count * sizeof(*pages), GFP_KERNEL); |
0db2e5d1 RM |
508 | if (!pages) |
509 | return NULL; | |
510 | ||
511 | /* IOMMU can map any pages, so himem can also be used here */ | |
512 | gfp |= __GFP_NOWARN | __GFP_HIGHMEM; | |
513 | ||
514 | while (count) { | |
515 | struct page *page = NULL; | |
3b6b7e19 | 516 | unsigned int order_size; |
0db2e5d1 RM |
517 | |
518 | /* | |
519 | * Higher-order allocations are a convenience rather | |
520 | * than a necessity, hence using __GFP_NORETRY until | |
3b6b7e19 | 521 | * falling back to minimum-order allocations. |
0db2e5d1 | 522 | */ |
3b6b7e19 RM |
523 | for (order_mask &= (2U << __fls(count)) - 1; |
524 | order_mask; order_mask &= ~order_size) { | |
525 | unsigned int order = __fls(order_mask); | |
c4b17afb | 526 | gfp_t alloc_flags = gfp; |
3b6b7e19 RM |
527 | |
528 | order_size = 1U << order; | |
c4b17afb GK |
529 | if (order_mask > order_size) |
530 | alloc_flags |= __GFP_NORETRY; | |
531 | page = alloc_pages_node(nid, alloc_flags, order); | |
0db2e5d1 RM |
532 | if (!page) |
533 | continue; | |
3b6b7e19 RM |
534 | if (!order) |
535 | break; | |
536 | if (!PageCompound(page)) { | |
0db2e5d1 RM |
537 | split_page(page, order); |
538 | break; | |
3b6b7e19 RM |
539 | } else if (!split_huge_page(page)) { |
540 | break; | |
0db2e5d1 | 541 | } |
3b6b7e19 | 542 | __free_pages(page, order); |
0db2e5d1 | 543 | } |
0db2e5d1 RM |
544 | if (!page) { |
545 | __iommu_dma_free_pages(pages, i); | |
546 | return NULL; | |
547 | } | |
3b6b7e19 RM |
548 | count -= order_size; |
549 | while (order_size--) | |
0db2e5d1 RM |
550 | pages[i++] = page++; |
551 | } | |
552 | return pages; | |
553 | } | |
554 | ||
555 | /** | |
06d60728 | 556 | * iommu_dma_free - Free a buffer allocated by __iommu_dma_alloc() |
0db2e5d1 | 557 | * @dev: Device which owns this buffer |
06d60728 | 558 | * @pages: Array of buffer pages as returned by __iommu_dma_alloc() |
0db2e5d1 RM |
559 | * @size: Size of buffer in bytes |
560 | * @handle: DMA address of buffer | |
561 | * | |
562 | * Frees both the pages associated with the buffer, and the array | |
563 | * describing them | |
564 | */ | |
06d60728 CH |
565 | static void __iommu_dma_free(struct device *dev, struct page **pages, |
566 | size_t size, dma_addr_t *handle) | |
0db2e5d1 | 567 | { |
43c5bf11 | 568 | __iommu_dma_unmap(iommu_get_dma_domain(dev), *handle, size); |
0db2e5d1 | 569 | __iommu_dma_free_pages(pages, PAGE_ALIGN(size) >> PAGE_SHIFT); |
cad34be7 | 570 | *handle = DMA_MAPPING_ERROR; |
0db2e5d1 RM |
571 | } |
572 | ||
573 | /** | |
06d60728 | 574 | * __iommu_dma_alloc - Allocate and map a buffer contiguous in IOVA space |
0db2e5d1 RM |
575 | * @dev: Device to allocate memory for. Must be a real device |
576 | * attached to an iommu_dma_domain | |
577 | * @size: Size of buffer in bytes | |
578 | * @gfp: Allocation flags | |
3b6b7e19 | 579 | * @attrs: DMA attributes for this allocation |
0db2e5d1 RM |
580 | * @prot: IOMMU mapping flags |
581 | * @handle: Out argument for allocated DMA handle | |
0db2e5d1 RM |
582 | * |
583 | * If @size is less than PAGE_SIZE, then a full CPU page will be allocated, | |
584 | * but an IOMMU which supports smaller pages might not map the whole thing. | |
585 | * | |
586 | * Return: Array of struct page pointers describing the buffer, | |
587 | * or NULL on failure. | |
588 | */ | |
06d60728 CH |
589 | static struct page **__iommu_dma_alloc(struct device *dev, size_t size, |
590 | gfp_t gfp, unsigned long attrs, int prot, dma_addr_t *handle) | |
0db2e5d1 | 591 | { |
43c5bf11 | 592 | struct iommu_domain *domain = iommu_get_dma_domain(dev); |
842fe519 RM |
593 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
594 | struct iova_domain *iovad = &cookie->iovad; | |
0db2e5d1 RM |
595 | struct page **pages; |
596 | struct sg_table sgt; | |
842fe519 | 597 | dma_addr_t iova; |
3b6b7e19 | 598 | unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap; |
0db2e5d1 | 599 | |
cad34be7 | 600 | *handle = DMA_MAPPING_ERROR; |
0db2e5d1 | 601 | |
3b6b7e19 RM |
602 | min_size = alloc_sizes & -alloc_sizes; |
603 | if (min_size < PAGE_SIZE) { | |
604 | min_size = PAGE_SIZE; | |
605 | alloc_sizes |= PAGE_SIZE; | |
606 | } else { | |
607 | size = ALIGN(size, min_size); | |
608 | } | |
00085f1e | 609 | if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES) |
3b6b7e19 RM |
610 | alloc_sizes = min_size; |
611 | ||
612 | count = PAGE_ALIGN(size) >> PAGE_SHIFT; | |
c4b17afb GK |
613 | pages = __iommu_dma_alloc_pages(dev, count, alloc_sizes >> PAGE_SHIFT, |
614 | gfp); | |
0db2e5d1 RM |
615 | if (!pages) |
616 | return NULL; | |
617 | ||
842fe519 RM |
618 | size = iova_align(iovad, size); |
619 | iova = iommu_dma_alloc_iova(domain, size, dev->coherent_dma_mask, dev); | |
0db2e5d1 RM |
620 | if (!iova) |
621 | goto out_free_pages; | |
622 | ||
0db2e5d1 RM |
623 | if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, GFP_KERNEL)) |
624 | goto out_free_iova; | |
625 | ||
626 | if (!(prot & IOMMU_CACHE)) { | |
23f88e0a CH |
627 | struct scatterlist *sg; |
628 | int i; | |
629 | ||
630 | for_each_sg(sgt.sgl, sg, sgt.orig_nents, i) | |
631 | arch_dma_prep_coherent(sg_page(sg), sg->length); | |
0db2e5d1 RM |
632 | } |
633 | ||
842fe519 | 634 | if (iommu_map_sg(domain, iova, sgt.sgl, sgt.orig_nents, prot) |
0db2e5d1 RM |
635 | < size) |
636 | goto out_free_sg; | |
637 | ||
842fe519 | 638 | *handle = iova; |
0db2e5d1 RM |
639 | sg_free_table(&sgt); |
640 | return pages; | |
641 | ||
642 | out_free_sg: | |
643 | sg_free_table(&sgt); | |
644 | out_free_iova: | |
842fe519 | 645 | iommu_dma_free_iova(cookie, iova, size); |
0db2e5d1 RM |
646 | out_free_pages: |
647 | __iommu_dma_free_pages(pages, count); | |
648 | return NULL; | |
649 | } | |
650 | ||
651 | /** | |
06d60728 CH |
652 | * __iommu_dma_mmap - Map a buffer into provided user VMA |
653 | * @pages: Array representing buffer from __iommu_dma_alloc() | |
0db2e5d1 RM |
654 | * @size: Size of buffer in bytes |
655 | * @vma: VMA describing requested userspace mapping | |
656 | * | |
657 | * Maps the pages of the buffer in @pages into @vma. The caller is responsible | |
658 | * for verifying the correct size and protection of @vma beforehand. | |
659 | */ | |
06d60728 CH |
660 | static int __iommu_dma_mmap(struct page **pages, size_t size, |
661 | struct vm_area_struct *vma) | |
0db2e5d1 | 662 | { |
b0d0084f | 663 | return vm_map_pages(vma, pages, PAGE_ALIGN(size) >> PAGE_SHIFT); |
0db2e5d1 RM |
664 | } |
665 | ||
06d60728 CH |
666 | static void iommu_dma_sync_single_for_cpu(struct device *dev, |
667 | dma_addr_t dma_handle, size_t size, enum dma_data_direction dir) | |
668 | { | |
669 | phys_addr_t phys; | |
670 | ||
671 | if (dev_is_dma_coherent(dev)) | |
672 | return; | |
673 | ||
674 | phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle); | |
675 | arch_sync_dma_for_cpu(dev, phys, size, dir); | |
676 | } | |
677 | ||
678 | static void iommu_dma_sync_single_for_device(struct device *dev, | |
679 | dma_addr_t dma_handle, size_t size, enum dma_data_direction dir) | |
680 | { | |
681 | phys_addr_t phys; | |
682 | ||
683 | if (dev_is_dma_coherent(dev)) | |
684 | return; | |
685 | ||
686 | phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle); | |
687 | arch_sync_dma_for_device(dev, phys, size, dir); | |
688 | } | |
689 | ||
690 | static void iommu_dma_sync_sg_for_cpu(struct device *dev, | |
691 | struct scatterlist *sgl, int nelems, | |
692 | enum dma_data_direction dir) | |
693 | { | |
694 | struct scatterlist *sg; | |
695 | int i; | |
696 | ||
697 | if (dev_is_dma_coherent(dev)) | |
698 | return; | |
699 | ||
700 | for_each_sg(sgl, sg, nelems, i) | |
701 | arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir); | |
702 | } | |
703 | ||
704 | static void iommu_dma_sync_sg_for_device(struct device *dev, | |
705 | struct scatterlist *sgl, int nelems, | |
706 | enum dma_data_direction dir) | |
707 | { | |
708 | struct scatterlist *sg; | |
709 | int i; | |
710 | ||
711 | if (dev_is_dma_coherent(dev)) | |
712 | return; | |
713 | ||
714 | for_each_sg(sgl, sg, nelems, i) | |
715 | arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, dir); | |
716 | } | |
717 | ||
06d60728 | 718 | static dma_addr_t __iommu_dma_map_page(struct device *dev, struct page *page, |
51f8cc9e RM |
719 | unsigned long offset, size_t size, int prot) |
720 | { | |
43c5bf11 RM |
721 | return __iommu_dma_map(dev, page_to_phys(page) + offset, size, prot, |
722 | iommu_get_dma_domain(dev)); | |
51f8cc9e RM |
723 | } |
724 | ||
06d60728 CH |
725 | static void __iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, |
726 | size_t size, enum dma_data_direction dir, unsigned long attrs) | |
0db2e5d1 | 727 | { |
43c5bf11 | 728 | __iommu_dma_unmap(iommu_get_dma_domain(dev), handle, size); |
0db2e5d1 RM |
729 | } |
730 | ||
06d60728 CH |
731 | static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page, |
732 | unsigned long offset, size_t size, enum dma_data_direction dir, | |
733 | unsigned long attrs) | |
734 | { | |
735 | phys_addr_t phys = page_to_phys(page) + offset; | |
736 | bool coherent = dev_is_dma_coherent(dev); | |
737 | dma_addr_t dma_handle; | |
738 | ||
739 | dma_handle =__iommu_dma_map(dev, phys, size, | |
740 | dma_info_to_prot(dir, coherent, attrs), | |
741 | iommu_get_dma_domain(dev)); | |
742 | if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) && | |
743 | dma_handle != DMA_MAPPING_ERROR) | |
744 | arch_sync_dma_for_device(dev, phys, size, dir); | |
745 | return dma_handle; | |
746 | } | |
747 | ||
748 | static void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle, | |
749 | size_t size, enum dma_data_direction dir, unsigned long attrs) | |
750 | { | |
751 | if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) | |
752 | iommu_dma_sync_single_for_cpu(dev, dma_handle, size, dir); | |
753 | __iommu_dma_unmap(iommu_get_dma_domain(dev), dma_handle, size); | |
754 | } | |
755 | ||
0db2e5d1 RM |
756 | /* |
757 | * Prepare a successfully-mapped scatterlist to give back to the caller. | |
809eac54 RM |
758 | * |
759 | * At this point the segments are already laid out by iommu_dma_map_sg() to | |
760 | * avoid individually crossing any boundaries, so we merely need to check a | |
761 | * segment's start address to avoid concatenating across one. | |
0db2e5d1 RM |
762 | */ |
763 | static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents, | |
764 | dma_addr_t dma_addr) | |
765 | { | |
809eac54 RM |
766 | struct scatterlist *s, *cur = sg; |
767 | unsigned long seg_mask = dma_get_seg_boundary(dev); | |
768 | unsigned int cur_len = 0, max_len = dma_get_max_seg_size(dev); | |
769 | int i, count = 0; | |
0db2e5d1 RM |
770 | |
771 | for_each_sg(sg, s, nents, i) { | |
809eac54 RM |
772 | /* Restore this segment's original unaligned fields first */ |
773 | unsigned int s_iova_off = sg_dma_address(s); | |
0db2e5d1 | 774 | unsigned int s_length = sg_dma_len(s); |
809eac54 | 775 | unsigned int s_iova_len = s->length; |
0db2e5d1 | 776 | |
809eac54 | 777 | s->offset += s_iova_off; |
0db2e5d1 | 778 | s->length = s_length; |
cad34be7 | 779 | sg_dma_address(s) = DMA_MAPPING_ERROR; |
809eac54 RM |
780 | sg_dma_len(s) = 0; |
781 | ||
782 | /* | |
783 | * Now fill in the real DMA data. If... | |
784 | * - there is a valid output segment to append to | |
785 | * - and this segment starts on an IOVA page boundary | |
786 | * - but doesn't fall at a segment boundary | |
787 | * - and wouldn't make the resulting output segment too long | |
788 | */ | |
789 | if (cur_len && !s_iova_off && (dma_addr & seg_mask) && | |
790 | (cur_len + s_length <= max_len)) { | |
791 | /* ...then concatenate it with the previous one */ | |
792 | cur_len += s_length; | |
793 | } else { | |
794 | /* Otherwise start the next output segment */ | |
795 | if (i > 0) | |
796 | cur = sg_next(cur); | |
797 | cur_len = s_length; | |
798 | count++; | |
799 | ||
800 | sg_dma_address(cur) = dma_addr + s_iova_off; | |
801 | } | |
802 | ||
803 | sg_dma_len(cur) = cur_len; | |
804 | dma_addr += s_iova_len; | |
805 | ||
806 | if (s_length + s_iova_off < s_iova_len) | |
807 | cur_len = 0; | |
0db2e5d1 | 808 | } |
809eac54 | 809 | return count; |
0db2e5d1 RM |
810 | } |
811 | ||
812 | /* | |
813 | * If mapping failed, then just restore the original list, | |
814 | * but making sure the DMA fields are invalidated. | |
815 | */ | |
816 | static void __invalidate_sg(struct scatterlist *sg, int nents) | |
817 | { | |
818 | struct scatterlist *s; | |
819 | int i; | |
820 | ||
821 | for_each_sg(sg, s, nents, i) { | |
cad34be7 | 822 | if (sg_dma_address(s) != DMA_MAPPING_ERROR) |
07b48ac4 | 823 | s->offset += sg_dma_address(s); |
0db2e5d1 RM |
824 | if (sg_dma_len(s)) |
825 | s->length = sg_dma_len(s); | |
cad34be7 | 826 | sg_dma_address(s) = DMA_MAPPING_ERROR; |
0db2e5d1 RM |
827 | sg_dma_len(s) = 0; |
828 | } | |
829 | } | |
830 | ||
831 | /* | |
832 | * The DMA API client is passing in a scatterlist which could describe | |
833 | * any old buffer layout, but the IOMMU API requires everything to be | |
834 | * aligned to IOMMU pages. Hence the need for this complicated bit of | |
835 | * impedance-matching, to be able to hand off a suitably-aligned list, | |
836 | * but still preserve the original offsets and sizes for the caller. | |
837 | */ | |
06d60728 CH |
838 | static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, |
839 | int nents, enum dma_data_direction dir, unsigned long attrs) | |
0db2e5d1 | 840 | { |
43c5bf11 | 841 | struct iommu_domain *domain = iommu_get_dma_domain(dev); |
842fe519 RM |
842 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
843 | struct iova_domain *iovad = &cookie->iovad; | |
0db2e5d1 | 844 | struct scatterlist *s, *prev = NULL; |
06d60728 | 845 | int prot = dma_info_to_prot(dir, dev_is_dma_coherent(dev), attrs); |
842fe519 | 846 | dma_addr_t iova; |
0db2e5d1 | 847 | size_t iova_len = 0; |
809eac54 | 848 | unsigned long mask = dma_get_seg_boundary(dev); |
0db2e5d1 RM |
849 | int i; |
850 | ||
06d60728 CH |
851 | if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) |
852 | iommu_dma_sync_sg_for_device(dev, sg, nents, dir); | |
853 | ||
0db2e5d1 RM |
854 | /* |
855 | * Work out how much IOVA space we need, and align the segments to | |
856 | * IOVA granules for the IOMMU driver to handle. With some clever | |
857 | * trickery we can modify the list in-place, but reversibly, by | |
809eac54 | 858 | * stashing the unaligned parts in the as-yet-unused DMA fields. |
0db2e5d1 RM |
859 | */ |
860 | for_each_sg(sg, s, nents, i) { | |
809eac54 | 861 | size_t s_iova_off = iova_offset(iovad, s->offset); |
0db2e5d1 | 862 | size_t s_length = s->length; |
809eac54 | 863 | size_t pad_len = (mask - iova_len + 1) & mask; |
0db2e5d1 | 864 | |
809eac54 | 865 | sg_dma_address(s) = s_iova_off; |
0db2e5d1 | 866 | sg_dma_len(s) = s_length; |
809eac54 RM |
867 | s->offset -= s_iova_off; |
868 | s_length = iova_align(iovad, s_length + s_iova_off); | |
0db2e5d1 RM |
869 | s->length = s_length; |
870 | ||
871 | /* | |
809eac54 RM |
872 | * Due to the alignment of our single IOVA allocation, we can |
873 | * depend on these assumptions about the segment boundary mask: | |
874 | * - If mask size >= IOVA size, then the IOVA range cannot | |
875 | * possibly fall across a boundary, so we don't care. | |
876 | * - If mask size < IOVA size, then the IOVA range must start | |
877 | * exactly on a boundary, therefore we can lay things out | |
878 | * based purely on segment lengths without needing to know | |
879 | * the actual addresses beforehand. | |
880 | * - The mask must be a power of 2, so pad_len == 0 if | |
881 | * iova_len == 0, thus we cannot dereference prev the first | |
882 | * time through here (i.e. before it has a meaningful value). | |
0db2e5d1 | 883 | */ |
809eac54 | 884 | if (pad_len && pad_len < s_length - 1) { |
0db2e5d1 RM |
885 | prev->length += pad_len; |
886 | iova_len += pad_len; | |
887 | } | |
888 | ||
889 | iova_len += s_length; | |
890 | prev = s; | |
891 | } | |
892 | ||
842fe519 | 893 | iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev); |
0db2e5d1 RM |
894 | if (!iova) |
895 | goto out_restore_sg; | |
896 | ||
897 | /* | |
898 | * We'll leave any physical concatenation to the IOMMU driver's | |
899 | * implementation - it knows better than we do. | |
900 | */ | |
842fe519 | 901 | if (iommu_map_sg(domain, iova, sg, nents, prot) < iova_len) |
0db2e5d1 RM |
902 | goto out_free_iova; |
903 | ||
842fe519 | 904 | return __finalise_sg(dev, sg, nents, iova); |
0db2e5d1 RM |
905 | |
906 | out_free_iova: | |
842fe519 | 907 | iommu_dma_free_iova(cookie, iova, iova_len); |
0db2e5d1 RM |
908 | out_restore_sg: |
909 | __invalidate_sg(sg, nents); | |
910 | return 0; | |
911 | } | |
912 | ||
06d60728 CH |
913 | static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, |
914 | int nents, enum dma_data_direction dir, unsigned long attrs) | |
0db2e5d1 | 915 | { |
842fe519 RM |
916 | dma_addr_t start, end; |
917 | struct scatterlist *tmp; | |
918 | int i; | |
06d60728 CH |
919 | |
920 | if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) | |
921 | iommu_dma_sync_sg_for_cpu(dev, sg, nents, dir); | |
922 | ||
0db2e5d1 RM |
923 | /* |
924 | * The scatterlist segments are mapped into a single | |
925 | * contiguous IOVA allocation, so this is incredibly easy. | |
926 | */ | |
842fe519 RM |
927 | start = sg_dma_address(sg); |
928 | for_each_sg(sg_next(sg), tmp, nents - 1, i) { | |
929 | if (sg_dma_len(tmp) == 0) | |
930 | break; | |
931 | sg = tmp; | |
932 | } | |
933 | end = sg_dma_address(sg) + sg_dma_len(sg); | |
43c5bf11 | 934 | __iommu_dma_unmap(iommu_get_dma_domain(dev), start, end - start); |
0db2e5d1 RM |
935 | } |
936 | ||
06d60728 | 937 | static dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys, |
51f8cc9e RM |
938 | size_t size, enum dma_data_direction dir, unsigned long attrs) |
939 | { | |
940 | return __iommu_dma_map(dev, phys, size, | |
43c5bf11 RM |
941 | dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO, |
942 | iommu_get_dma_domain(dev)); | |
51f8cc9e RM |
943 | } |
944 | ||
06d60728 | 945 | static void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle, |
51f8cc9e RM |
946 | size_t size, enum dma_data_direction dir, unsigned long attrs) |
947 | { | |
43c5bf11 | 948 | __iommu_dma_unmap(iommu_get_dma_domain(dev), handle, size); |
51f8cc9e RM |
949 | } |
950 | ||
06d60728 CH |
951 | static void *iommu_dma_alloc(struct device *dev, size_t size, |
952 | dma_addr_t *handle, gfp_t gfp, unsigned long attrs) | |
953 | { | |
954 | bool coherent = dev_is_dma_coherent(dev); | |
955 | int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs); | |
956 | size_t iosize = size; | |
957 | void *addr; | |
958 | ||
959 | size = PAGE_ALIGN(size); | |
960 | gfp |= __GFP_ZERO; | |
961 | ||
962 | if (!gfpflags_allow_blocking(gfp)) { | |
963 | struct page *page; | |
964 | /* | |
965 | * In atomic context we can't remap anything, so we'll only | |
966 | * get the virtually contiguous buffer we need by way of a | |
967 | * physically contiguous allocation. | |
968 | */ | |
969 | if (coherent) { | |
970 | page = alloc_pages(gfp, get_order(size)); | |
971 | addr = page ? page_address(page) : NULL; | |
972 | } else { | |
973 | addr = dma_alloc_from_pool(size, &page, gfp); | |
974 | } | |
975 | if (!addr) | |
976 | return NULL; | |
977 | ||
978 | *handle = __iommu_dma_map_page(dev, page, 0, iosize, ioprot); | |
979 | if (*handle == DMA_MAPPING_ERROR) { | |
980 | if (coherent) | |
981 | __free_pages(page, get_order(size)); | |
982 | else | |
983 | dma_free_from_pool(addr, size); | |
984 | addr = NULL; | |
985 | } | |
986 | } else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) { | |
987 | pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs); | |
988 | struct page *page; | |
989 | ||
990 | page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT, | |
991 | get_order(size), gfp & __GFP_NOWARN); | |
992 | if (!page) | |
993 | return NULL; | |
994 | ||
995 | *handle = __iommu_dma_map_page(dev, page, 0, iosize, ioprot); | |
996 | if (*handle == DMA_MAPPING_ERROR) { | |
997 | dma_release_from_contiguous(dev, page, | |
998 | size >> PAGE_SHIFT); | |
999 | return NULL; | |
1000 | } | |
1001 | addr = dma_common_contiguous_remap(page, size, VM_USERMAP, | |
1002 | prot, | |
1003 | __builtin_return_address(0)); | |
1004 | if (addr) { | |
1005 | if (!coherent) | |
1006 | arch_dma_prep_coherent(page, iosize); | |
1007 | memset(addr, 0, size); | |
1008 | } else { | |
1009 | __iommu_dma_unmap_page(dev, *handle, iosize, 0, attrs); | |
1010 | dma_release_from_contiguous(dev, page, | |
1011 | size >> PAGE_SHIFT); | |
1012 | } | |
1013 | } else { | |
1014 | pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs); | |
1015 | struct page **pages; | |
1016 | ||
1017 | pages = __iommu_dma_alloc(dev, iosize, gfp, attrs, ioprot, | |
1018 | handle); | |
1019 | if (!pages) | |
1020 | return NULL; | |
1021 | ||
1022 | addr = dma_common_pages_remap(pages, size, VM_USERMAP, prot, | |
1023 | __builtin_return_address(0)); | |
1024 | if (!addr) | |
1025 | __iommu_dma_free(dev, pages, iosize, handle); | |
1026 | } | |
1027 | return addr; | |
1028 | } | |
1029 | ||
1030 | static void iommu_dma_free(struct device *dev, size_t size, void *cpu_addr, | |
1031 | dma_addr_t handle, unsigned long attrs) | |
1032 | { | |
1033 | size_t iosize = size; | |
1034 | ||
1035 | size = PAGE_ALIGN(size); | |
1036 | /* | |
1037 | * @cpu_addr will be one of 4 things depending on how it was allocated: | |
1038 | * - A remapped array of pages for contiguous allocations. | |
1039 | * - A remapped array of pages from __iommu_dma_alloc(), for all | |
1040 | * non-atomic allocations. | |
1041 | * - A non-cacheable alias from the atomic pool, for atomic | |
1042 | * allocations by non-coherent devices. | |
1043 | * - A normal lowmem address, for atomic allocations by | |
1044 | * coherent devices. | |
1045 | * Hence how dodgy the below logic looks... | |
1046 | */ | |
1047 | if (dma_in_atomic_pool(cpu_addr, size)) { | |
1048 | __iommu_dma_unmap_page(dev, handle, iosize, 0, 0); | |
1049 | dma_free_from_pool(cpu_addr, size); | |
1050 | } else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) { | |
1051 | struct page *page = vmalloc_to_page(cpu_addr); | |
1052 | ||
1053 | __iommu_dma_unmap_page(dev, handle, iosize, 0, attrs); | |
1054 | dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT); | |
1055 | dma_common_free_remap(cpu_addr, size, VM_USERMAP); | |
1056 | } else if (is_vmalloc_addr(cpu_addr)){ | |
1057 | struct vm_struct *area = find_vm_area(cpu_addr); | |
1058 | ||
1059 | if (WARN_ON(!area || !area->pages)) | |
1060 | return; | |
1061 | __iommu_dma_free(dev, area->pages, iosize, &handle); | |
1062 | dma_common_free_remap(cpu_addr, size, VM_USERMAP); | |
1063 | } else { | |
1064 | __iommu_dma_unmap_page(dev, handle, iosize, 0, 0); | |
1065 | __free_pages(virt_to_page(cpu_addr), get_order(size)); | |
1066 | } | |
1067 | } | |
1068 | ||
1069 | static int __iommu_dma_mmap_pfn(struct vm_area_struct *vma, | |
1070 | unsigned long pfn, size_t size) | |
1071 | { | |
1072 | int ret = -ENXIO; | |
1073 | unsigned long nr_vma_pages = vma_pages(vma); | |
1074 | unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; | |
1075 | unsigned long off = vma->vm_pgoff; | |
1076 | ||
1077 | if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) { | |
1078 | ret = remap_pfn_range(vma, vma->vm_start, | |
1079 | pfn + off, | |
1080 | vma->vm_end - vma->vm_start, | |
1081 | vma->vm_page_prot); | |
1082 | } | |
1083 | ||
1084 | return ret; | |
1085 | } | |
1086 | ||
1087 | static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma, | |
1088 | void *cpu_addr, dma_addr_t dma_addr, size_t size, | |
1089 | unsigned long attrs) | |
1090 | { | |
1091 | unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; | |
1092 | unsigned long off = vma->vm_pgoff; | |
1093 | struct vm_struct *area; | |
1094 | int ret; | |
1095 | ||
1096 | vma->vm_page_prot = arch_dma_mmap_pgprot(dev, vma->vm_page_prot, attrs); | |
1097 | ||
1098 | if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) | |
1099 | return ret; | |
1100 | ||
1101 | if (off >= nr_pages || vma_pages(vma) > nr_pages - off) | |
1102 | return -ENXIO; | |
1103 | ||
1104 | if (!is_vmalloc_addr(cpu_addr)) { | |
1105 | unsigned long pfn = page_to_pfn(virt_to_page(cpu_addr)); | |
1106 | return __iommu_dma_mmap_pfn(vma, pfn, size); | |
1107 | } | |
1108 | ||
1109 | if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) { | |
1110 | /* | |
1111 | * DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped, | |
1112 | * hence in the vmalloc space. | |
1113 | */ | |
1114 | unsigned long pfn = vmalloc_to_pfn(cpu_addr); | |
1115 | return __iommu_dma_mmap_pfn(vma, pfn, size); | |
1116 | } | |
1117 | ||
1118 | area = find_vm_area(cpu_addr); | |
1119 | if (WARN_ON(!area || !area->pages)) | |
1120 | return -ENXIO; | |
1121 | ||
1122 | return __iommu_dma_mmap(area->pages, size, vma); | |
1123 | } | |
1124 | ||
1125 | static int __iommu_dma_get_sgtable_page(struct sg_table *sgt, struct page *page, | |
1126 | size_t size) | |
1127 | { | |
1128 | int ret = sg_alloc_table(sgt, 1, GFP_KERNEL); | |
1129 | ||
1130 | if (!ret) | |
1131 | sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); | |
1132 | return ret; | |
1133 | } | |
1134 | ||
1135 | static int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt, | |
1136 | void *cpu_addr, dma_addr_t dma_addr, size_t size, | |
1137 | unsigned long attrs) | |
1138 | { | |
1139 | unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; | |
1140 | struct vm_struct *area = find_vm_area(cpu_addr); | |
1141 | ||
1142 | if (!is_vmalloc_addr(cpu_addr)) { | |
1143 | struct page *page = virt_to_page(cpu_addr); | |
1144 | return __iommu_dma_get_sgtable_page(sgt, page, size); | |
1145 | } | |
1146 | ||
1147 | if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) { | |
1148 | /* | |
1149 | * DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped, | |
1150 | * hence in the vmalloc space. | |
1151 | */ | |
1152 | struct page *page = vmalloc_to_page(cpu_addr); | |
1153 | return __iommu_dma_get_sgtable_page(sgt, page, size); | |
1154 | } | |
1155 | ||
1156 | if (WARN_ON(!area || !area->pages)) | |
1157 | return -ENXIO; | |
1158 | ||
1159 | return sg_alloc_table_from_pages(sgt, area->pages, count, 0, size, | |
1160 | GFP_KERNEL); | |
1161 | } | |
1162 | ||
1163 | static const struct dma_map_ops iommu_dma_ops = { | |
1164 | .alloc = iommu_dma_alloc, | |
1165 | .free = iommu_dma_free, | |
1166 | .mmap = iommu_dma_mmap, | |
1167 | .get_sgtable = iommu_dma_get_sgtable, | |
1168 | .map_page = iommu_dma_map_page, | |
1169 | .unmap_page = iommu_dma_unmap_page, | |
1170 | .map_sg = iommu_dma_map_sg, | |
1171 | .unmap_sg = iommu_dma_unmap_sg, | |
1172 | .sync_single_for_cpu = iommu_dma_sync_single_for_cpu, | |
1173 | .sync_single_for_device = iommu_dma_sync_single_for_device, | |
1174 | .sync_sg_for_cpu = iommu_dma_sync_sg_for_cpu, | |
1175 | .sync_sg_for_device = iommu_dma_sync_sg_for_device, | |
1176 | .map_resource = iommu_dma_map_resource, | |
1177 | .unmap_resource = iommu_dma_unmap_resource, | |
1178 | }; | |
1179 | ||
1180 | /* | |
1181 | * The IOMMU core code allocates the default DMA domain, which the underlying | |
1182 | * IOMMU driver needs to support via the dma-iommu layer. | |
1183 | */ | |
1184 | void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size) | |
1185 | { | |
1186 | struct iommu_domain *domain = iommu_get_domain_for_dev(dev); | |
1187 | ||
1188 | if (!domain) | |
1189 | goto out_err; | |
1190 | ||
1191 | /* | |
1192 | * The IOMMU core code allocates the default DMA domain, which the | |
1193 | * underlying IOMMU driver needs to support via the dma-iommu layer. | |
1194 | */ | |
1195 | if (domain->type == IOMMU_DOMAIN_DMA) { | |
1196 | if (iommu_dma_init_domain(domain, dma_base, size, dev)) | |
1197 | goto out_err; | |
1198 | dev->dma_ops = &iommu_dma_ops; | |
1199 | } | |
1200 | ||
1201 | return; | |
1202 | out_err: | |
1203 | pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n", | |
1204 | dev_name(dev)); | |
1205 | } | |
1206 | ||
44bb7e24 RM |
1207 | static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev, |
1208 | phys_addr_t msi_addr, struct iommu_domain *domain) | |
1209 | { | |
1210 | struct iommu_dma_cookie *cookie = domain->iova_cookie; | |
1211 | struct iommu_dma_msi_page *msi_page; | |
842fe519 | 1212 | dma_addr_t iova; |
44bb7e24 | 1213 | int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; |
fdbe574e | 1214 | size_t size = cookie_msi_granule(cookie); |
44bb7e24 | 1215 | |
fdbe574e | 1216 | msi_addr &= ~(phys_addr_t)(size - 1); |
44bb7e24 RM |
1217 | list_for_each_entry(msi_page, &cookie->msi_page_list, list) |
1218 | if (msi_page->phys == msi_addr) | |
1219 | return msi_page; | |
1220 | ||
1221 | msi_page = kzalloc(sizeof(*msi_page), GFP_ATOMIC); | |
1222 | if (!msi_page) | |
1223 | return NULL; | |
1224 | ||
43c5bf11 | 1225 | iova = __iommu_dma_map(dev, msi_addr, size, prot, domain); |
cad34be7 | 1226 | if (iova == DMA_MAPPING_ERROR) |
a44e6657 | 1227 | goto out_free_page; |
44bb7e24 RM |
1228 | |
1229 | INIT_LIST_HEAD(&msi_page->list); | |
a44e6657 RM |
1230 | msi_page->phys = msi_addr; |
1231 | msi_page->iova = iova; | |
44bb7e24 RM |
1232 | list_add(&msi_page->list, &cookie->msi_page_list); |
1233 | return msi_page; | |
1234 | ||
44bb7e24 RM |
1235 | out_free_page: |
1236 | kfree(msi_page); | |
1237 | return NULL; | |
1238 | } | |
1239 | ||
ece6e6f0 | 1240 | int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr) |
44bb7e24 | 1241 | { |
ece6e6f0 | 1242 | struct device *dev = msi_desc_to_dev(desc); |
44bb7e24 RM |
1243 | struct iommu_domain *domain = iommu_get_domain_for_dev(dev); |
1244 | struct iommu_dma_cookie *cookie; | |
1245 | struct iommu_dma_msi_page *msi_page; | |
44bb7e24 RM |
1246 | unsigned long flags; |
1247 | ||
ece6e6f0 JG |
1248 | if (!domain || !domain->iova_cookie) { |
1249 | desc->iommu_cookie = NULL; | |
1250 | return 0; | |
1251 | } | |
44bb7e24 RM |
1252 | |
1253 | cookie = domain->iova_cookie; | |
1254 | ||
1255 | /* | |
1256 | * We disable IRQs to rule out a possible inversion against | |
1257 | * irq_desc_lock if, say, someone tries to retarget the affinity | |
1258 | * of an MSI from within an IPI handler. | |
1259 | */ | |
1260 | spin_lock_irqsave(&cookie->msi_lock, flags); | |
1261 | msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain); | |
1262 | spin_unlock_irqrestore(&cookie->msi_lock, flags); | |
1263 | ||
ece6e6f0 JG |
1264 | msi_desc_set_iommu_cookie(desc, msi_page); |
1265 | ||
1266 | if (!msi_page) | |
1267 | return -ENOMEM; | |
1268 | return 0; | |
1269 | } | |
1270 | ||
1271 | void iommu_dma_compose_msi_msg(struct msi_desc *desc, | |
1272 | struct msi_msg *msg) | |
1273 | { | |
1274 | struct device *dev = msi_desc_to_dev(desc); | |
1275 | const struct iommu_domain *domain = iommu_get_domain_for_dev(dev); | |
1276 | const struct iommu_dma_msi_page *msi_page; | |
1277 | ||
1278 | msi_page = msi_desc_get_iommu_cookie(desc); | |
1279 | ||
1280 | if (!domain || !domain->iova_cookie || WARN_ON(!msi_page)) | |
1281 | return; | |
1282 | ||
1283 | msg->address_hi = upper_32_bits(msi_page->iova); | |
1284 | msg->address_lo &= cookie_msi_granule(domain->iova_cookie) - 1; | |
1285 | msg->address_lo += lower_32_bits(msi_page->iova); | |
44bb7e24 | 1286 | } |
06d60728 CH |
1287 | |
1288 | static int iommu_dma_init(void) | |
1289 | { | |
1290 | return iova_cache_get(); | |
1291 | } | |
1292 | arch_initcall(iommu_dma_init); |