Commit | Line | Data |
---|---|---|
0db2e5d1 RM |
1 | /* |
2 | * A fairly generic DMA-API to IOMMU-API glue layer. | |
3 | * | |
4 | * Copyright (C) 2014-2015 ARM Ltd. | |
5 | * | |
6 | * based in part on arch/arm/mm/dma-mapping.c: | |
7 | * Copyright (C) 2000-2004 Russell King | |
8 | * | |
9 | * This program is free software; you can redistribute it and/or modify | |
10 | * it under the terms of the GNU General Public License version 2 as | |
11 | * published by the Free Software Foundation. | |
12 | * | |
13 | * This program is distributed in the hope that it will be useful, | |
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
16 | * GNU General Public License for more details. | |
17 | * | |
18 | * You should have received a copy of the GNU General Public License | |
19 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
20 | */ | |
21 | ||
22 | #include <linux/device.h> | |
23 | #include <linux/dma-iommu.h> | |
5b11e9cd | 24 | #include <linux/gfp.h> |
0db2e5d1 RM |
25 | #include <linux/huge_mm.h> |
26 | #include <linux/iommu.h> | |
27 | #include <linux/iova.h> | |
44bb7e24 | 28 | #include <linux/irq.h> |
0db2e5d1 | 29 | #include <linux/mm.h> |
fade1ec0 | 30 | #include <linux/pci.h> |
5b11e9cd RM |
31 | #include <linux/scatterlist.h> |
32 | #include <linux/vmalloc.h> | |
0db2e5d1 | 33 | |
44bb7e24 RM |
34 | struct iommu_dma_msi_page { |
35 | struct list_head list; | |
36 | dma_addr_t iova; | |
37 | phys_addr_t phys; | |
38 | }; | |
39 | ||
fdbe574e RM |
40 | enum iommu_dma_cookie_type { |
41 | IOMMU_DMA_IOVA_COOKIE, | |
42 | IOMMU_DMA_MSI_COOKIE, | |
43 | }; | |
44 | ||
44bb7e24 | 45 | struct iommu_dma_cookie { |
fdbe574e RM |
46 | enum iommu_dma_cookie_type type; |
47 | union { | |
48 | /* Full allocator for IOMMU_DMA_IOVA_COOKIE */ | |
49 | struct iova_domain iovad; | |
50 | /* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */ | |
51 | dma_addr_t msi_iova; | |
52 | }; | |
53 | struct list_head msi_page_list; | |
54 | spinlock_t msi_lock; | |
44bb7e24 RM |
55 | }; |
56 | ||
fdbe574e RM |
57 | static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie) |
58 | { | |
59 | if (cookie->type == IOMMU_DMA_IOVA_COOKIE) | |
60 | return cookie->iovad.granule; | |
61 | return PAGE_SIZE; | |
62 | } | |
63 | ||
44bb7e24 RM |
64 | static inline struct iova_domain *cookie_iovad(struct iommu_domain *domain) |
65 | { | |
fdbe574e RM |
66 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
67 | ||
68 | if (cookie->type == IOMMU_DMA_IOVA_COOKIE) | |
69 | return &cookie->iovad; | |
70 | return NULL; | |
71 | } | |
72 | ||
73 | static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type) | |
74 | { | |
75 | struct iommu_dma_cookie *cookie; | |
76 | ||
77 | cookie = kzalloc(sizeof(*cookie), GFP_KERNEL); | |
78 | if (cookie) { | |
79 | spin_lock_init(&cookie->msi_lock); | |
80 | INIT_LIST_HEAD(&cookie->msi_page_list); | |
81 | cookie->type = type; | |
82 | } | |
83 | return cookie; | |
44bb7e24 RM |
84 | } |
85 | ||
0db2e5d1 RM |
86 | int iommu_dma_init(void) |
87 | { | |
88 | return iova_cache_get(); | |
89 | } | |
90 | ||
91 | /** | |
92 | * iommu_get_dma_cookie - Acquire DMA-API resources for a domain | |
93 | * @domain: IOMMU domain to prepare for DMA-API usage | |
94 | * | |
95 | * IOMMU drivers should normally call this from their domain_alloc | |
96 | * callback when domain->type == IOMMU_DOMAIN_DMA. | |
97 | */ | |
98 | int iommu_get_dma_cookie(struct iommu_domain *domain) | |
fdbe574e RM |
99 | { |
100 | if (domain->iova_cookie) | |
101 | return -EEXIST; | |
102 | ||
103 | domain->iova_cookie = cookie_alloc(IOMMU_DMA_IOVA_COOKIE); | |
104 | if (!domain->iova_cookie) | |
105 | return -ENOMEM; | |
106 | ||
107 | return 0; | |
108 | } | |
109 | EXPORT_SYMBOL(iommu_get_dma_cookie); | |
110 | ||
111 | /** | |
112 | * iommu_get_msi_cookie - Acquire just MSI remapping resources | |
113 | * @domain: IOMMU domain to prepare | |
114 | * @base: Start address of IOVA region for MSI mappings | |
115 | * | |
116 | * Users who manage their own IOVA allocation and do not want DMA API support, | |
117 | * but would still like to take advantage of automatic MSI remapping, can use | |
118 | * this to initialise their own domain appropriately. Users should reserve a | |
119 | * contiguous IOVA region, starting at @base, large enough to accommodate the | |
120 | * number of PAGE_SIZE mappings necessary to cover every MSI doorbell address | |
121 | * used by the devices attached to @domain. | |
122 | */ | |
123 | int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base) | |
0db2e5d1 | 124 | { |
44bb7e24 | 125 | struct iommu_dma_cookie *cookie; |
0db2e5d1 | 126 | |
fdbe574e RM |
127 | if (domain->type != IOMMU_DOMAIN_UNMANAGED) |
128 | return -EINVAL; | |
129 | ||
0db2e5d1 RM |
130 | if (domain->iova_cookie) |
131 | return -EEXIST; | |
132 | ||
fdbe574e | 133 | cookie = cookie_alloc(IOMMU_DMA_MSI_COOKIE); |
44bb7e24 RM |
134 | if (!cookie) |
135 | return -ENOMEM; | |
0db2e5d1 | 136 | |
fdbe574e | 137 | cookie->msi_iova = base; |
44bb7e24 RM |
138 | domain->iova_cookie = cookie; |
139 | return 0; | |
0db2e5d1 | 140 | } |
fdbe574e | 141 | EXPORT_SYMBOL(iommu_get_msi_cookie); |
0db2e5d1 RM |
142 | |
143 | /** | |
144 | * iommu_put_dma_cookie - Release a domain's DMA mapping resources | |
fdbe574e RM |
145 | * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() or |
146 | * iommu_get_msi_cookie() | |
0db2e5d1 RM |
147 | * |
148 | * IOMMU drivers should normally call this from their domain_free callback. | |
149 | */ | |
150 | void iommu_put_dma_cookie(struct iommu_domain *domain) | |
151 | { | |
44bb7e24 RM |
152 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
153 | struct iommu_dma_msi_page *msi, *tmp; | |
0db2e5d1 | 154 | |
44bb7e24 | 155 | if (!cookie) |
0db2e5d1 RM |
156 | return; |
157 | ||
fdbe574e | 158 | if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule) |
44bb7e24 RM |
159 | put_iova_domain(&cookie->iovad); |
160 | ||
161 | list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) { | |
162 | list_del(&msi->list); | |
163 | kfree(msi); | |
164 | } | |
165 | kfree(cookie); | |
0db2e5d1 RM |
166 | domain->iova_cookie = NULL; |
167 | } | |
168 | EXPORT_SYMBOL(iommu_put_dma_cookie); | |
169 | ||
fade1ec0 RM |
170 | static void iova_reserve_pci_windows(struct pci_dev *dev, |
171 | struct iova_domain *iovad) | |
172 | { | |
173 | struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus); | |
174 | struct resource_entry *window; | |
175 | unsigned long lo, hi; | |
176 | ||
177 | resource_list_for_each_entry(window, &bridge->windows) { | |
178 | if (resource_type(window->res) != IORESOURCE_MEM && | |
179 | resource_type(window->res) != IORESOURCE_IO) | |
180 | continue; | |
181 | ||
182 | lo = iova_pfn(iovad, window->res->start - window->offset); | |
183 | hi = iova_pfn(iovad, window->res->end - window->offset); | |
184 | reserve_iova(iovad, lo, hi); | |
185 | } | |
186 | } | |
187 | ||
0db2e5d1 RM |
188 | /** |
189 | * iommu_dma_init_domain - Initialise a DMA mapping domain | |
190 | * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() | |
191 | * @base: IOVA at which the mappable address space starts | |
192 | * @size: Size of IOVA space | |
fade1ec0 | 193 | * @dev: Device the domain is being initialised for |
0db2e5d1 RM |
194 | * |
195 | * @base and @size should be exact multiples of IOMMU page granularity to | |
196 | * avoid rounding surprises. If necessary, we reserve the page at address 0 | |
197 | * to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but | |
198 | * any change which could make prior IOVAs invalid will fail. | |
199 | */ | |
fade1ec0 RM |
200 | int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, |
201 | u64 size, struct device *dev) | |
0db2e5d1 | 202 | { |
fdbe574e RM |
203 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
204 | struct iova_domain *iovad = &cookie->iovad; | |
0db2e5d1 | 205 | unsigned long order, base_pfn, end_pfn; |
f51d7bb7 | 206 | bool pci = dev && dev_is_pci(dev); |
0db2e5d1 | 207 | |
fdbe574e RM |
208 | if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE) |
209 | return -EINVAL; | |
0db2e5d1 RM |
210 | |
211 | /* Use the smallest supported page size for IOVA granularity */ | |
d16e0faa | 212 | order = __ffs(domain->pgsize_bitmap); |
0db2e5d1 RM |
213 | base_pfn = max_t(unsigned long, 1, base >> order); |
214 | end_pfn = (base + size - 1) >> order; | |
215 | ||
216 | /* Check the domain allows at least some access to the device... */ | |
217 | if (domain->geometry.force_aperture) { | |
218 | if (base > domain->geometry.aperture_end || | |
219 | base + size <= domain->geometry.aperture_start) { | |
220 | pr_warn("specified DMA range outside IOMMU capability\n"); | |
221 | return -EFAULT; | |
222 | } | |
223 | /* ...then finally give it a kicking to make sure it fits */ | |
224 | base_pfn = max_t(unsigned long, base_pfn, | |
225 | domain->geometry.aperture_start >> order); | |
226 | end_pfn = min_t(unsigned long, end_pfn, | |
227 | domain->geometry.aperture_end >> order); | |
228 | } | |
f51d7bb7 RM |
229 | /* |
230 | * PCI devices may have larger DMA masks, but still prefer allocating | |
231 | * within a 32-bit mask to avoid DAC addressing. Such limitations don't | |
232 | * apply to the typical platform device, so for those we may as well | |
233 | * leave the cache limit at the top of their range to save an rb_last() | |
234 | * traversal on every allocation. | |
235 | */ | |
236 | if (pci) | |
237 | end_pfn &= DMA_BIT_MASK(32) >> order; | |
0db2e5d1 | 238 | |
f51d7bb7 | 239 | /* start_pfn is always nonzero for an already-initialised domain */ |
0db2e5d1 RM |
240 | if (iovad->start_pfn) { |
241 | if (1UL << order != iovad->granule || | |
f51d7bb7 | 242 | base_pfn != iovad->start_pfn) { |
0db2e5d1 RM |
243 | pr_warn("Incompatible range for DMA domain\n"); |
244 | return -EFAULT; | |
245 | } | |
f51d7bb7 RM |
246 | /* |
247 | * If we have devices with different DMA masks, move the free | |
248 | * area cache limit down for the benefit of the smaller one. | |
249 | */ | |
250 | iovad->dma_32bit_pfn = min(end_pfn, iovad->dma_32bit_pfn); | |
0db2e5d1 RM |
251 | } else { |
252 | init_iova_domain(iovad, 1UL << order, base_pfn, end_pfn); | |
f51d7bb7 | 253 | if (pci) |
fade1ec0 | 254 | iova_reserve_pci_windows(to_pci_dev(dev), iovad); |
0db2e5d1 RM |
255 | } |
256 | return 0; | |
257 | } | |
258 | EXPORT_SYMBOL(iommu_dma_init_domain); | |
259 | ||
260 | /** | |
737c85ca MH |
261 | * dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API |
262 | * page flags. | |
0db2e5d1 RM |
263 | * @dir: Direction of DMA transfer |
264 | * @coherent: Is the DMA master cache-coherent? | |
737c85ca | 265 | * @attrs: DMA attributes for the mapping |
0db2e5d1 RM |
266 | * |
267 | * Return: corresponding IOMMU API page protection flags | |
268 | */ | |
737c85ca MH |
269 | int dma_info_to_prot(enum dma_data_direction dir, bool coherent, |
270 | unsigned long attrs) | |
0db2e5d1 RM |
271 | { |
272 | int prot = coherent ? IOMMU_CACHE : 0; | |
273 | ||
737c85ca MH |
274 | if (attrs & DMA_ATTR_PRIVILEGED) |
275 | prot |= IOMMU_PRIV; | |
276 | ||
0db2e5d1 RM |
277 | switch (dir) { |
278 | case DMA_BIDIRECTIONAL: | |
279 | return prot | IOMMU_READ | IOMMU_WRITE; | |
280 | case DMA_TO_DEVICE: | |
281 | return prot | IOMMU_READ; | |
282 | case DMA_FROM_DEVICE: | |
283 | return prot | IOMMU_WRITE; | |
284 | default: | |
285 | return 0; | |
286 | } | |
287 | } | |
288 | ||
c987ff0d | 289 | static struct iova *__alloc_iova(struct iommu_domain *domain, size_t size, |
122fac03 | 290 | dma_addr_t dma_limit, struct device *dev) |
0db2e5d1 | 291 | { |
44bb7e24 | 292 | struct iova_domain *iovad = cookie_iovad(domain); |
0db2e5d1 RM |
293 | unsigned long shift = iova_shift(iovad); |
294 | unsigned long length = iova_align(iovad, size) >> shift; | |
122fac03 | 295 | struct iova *iova = NULL; |
0db2e5d1 | 296 | |
c987ff0d RM |
297 | if (domain->geometry.force_aperture) |
298 | dma_limit = min(dma_limit, domain->geometry.aperture_end); | |
122fac03 RM |
299 | |
300 | /* Try to get PCI devices a SAC address */ | |
301 | if (dma_limit > DMA_BIT_MASK(32) && dev_is_pci(dev)) | |
302 | iova = alloc_iova(iovad, length, DMA_BIT_MASK(32) >> shift, | |
303 | true); | |
0db2e5d1 RM |
304 | /* |
305 | * Enforce size-alignment to be safe - there could perhaps be an | |
306 | * attribute to control this per-device, or at least per-domain... | |
307 | */ | |
122fac03 RM |
308 | if (!iova) |
309 | iova = alloc_iova(iovad, length, dma_limit >> shift, true); | |
310 | ||
311 | return iova; | |
0db2e5d1 RM |
312 | } |
313 | ||
314 | /* The IOVA allocator knows what we mapped, so just unmap whatever that was */ | |
315 | static void __iommu_dma_unmap(struct iommu_domain *domain, dma_addr_t dma_addr) | |
316 | { | |
44bb7e24 | 317 | struct iova_domain *iovad = cookie_iovad(domain); |
0db2e5d1 RM |
318 | unsigned long shift = iova_shift(iovad); |
319 | unsigned long pfn = dma_addr >> shift; | |
320 | struct iova *iova = find_iova(iovad, pfn); | |
321 | size_t size; | |
322 | ||
323 | if (WARN_ON(!iova)) | |
324 | return; | |
325 | ||
326 | size = iova_size(iova) << shift; | |
327 | size -= iommu_unmap(domain, pfn << shift, size); | |
328 | /* ...and if we can't, then something is horribly, horribly wrong */ | |
329 | WARN_ON(size > 0); | |
330 | __free_iova(iovad, iova); | |
331 | } | |
332 | ||
333 | static void __iommu_dma_free_pages(struct page **pages, int count) | |
334 | { | |
335 | while (count--) | |
336 | __free_page(pages[count]); | |
337 | kvfree(pages); | |
338 | } | |
339 | ||
3b6b7e19 RM |
340 | static struct page **__iommu_dma_alloc_pages(unsigned int count, |
341 | unsigned long order_mask, gfp_t gfp) | |
0db2e5d1 RM |
342 | { |
343 | struct page **pages; | |
344 | unsigned int i = 0, array_size = count * sizeof(*pages); | |
3b6b7e19 RM |
345 | |
346 | order_mask &= (2U << MAX_ORDER) - 1; | |
347 | if (!order_mask) | |
348 | return NULL; | |
0db2e5d1 RM |
349 | |
350 | if (array_size <= PAGE_SIZE) | |
351 | pages = kzalloc(array_size, GFP_KERNEL); | |
352 | else | |
353 | pages = vzalloc(array_size); | |
354 | if (!pages) | |
355 | return NULL; | |
356 | ||
357 | /* IOMMU can map any pages, so himem can also be used here */ | |
358 | gfp |= __GFP_NOWARN | __GFP_HIGHMEM; | |
359 | ||
360 | while (count) { | |
361 | struct page *page = NULL; | |
3b6b7e19 | 362 | unsigned int order_size; |
0db2e5d1 RM |
363 | |
364 | /* | |
365 | * Higher-order allocations are a convenience rather | |
366 | * than a necessity, hence using __GFP_NORETRY until | |
3b6b7e19 | 367 | * falling back to minimum-order allocations. |
0db2e5d1 | 368 | */ |
3b6b7e19 RM |
369 | for (order_mask &= (2U << __fls(count)) - 1; |
370 | order_mask; order_mask &= ~order_size) { | |
371 | unsigned int order = __fls(order_mask); | |
372 | ||
373 | order_size = 1U << order; | |
374 | page = alloc_pages((order_mask - order_size) ? | |
375 | gfp | __GFP_NORETRY : gfp, order); | |
0db2e5d1 RM |
376 | if (!page) |
377 | continue; | |
3b6b7e19 RM |
378 | if (!order) |
379 | break; | |
380 | if (!PageCompound(page)) { | |
0db2e5d1 RM |
381 | split_page(page, order); |
382 | break; | |
3b6b7e19 RM |
383 | } else if (!split_huge_page(page)) { |
384 | break; | |
0db2e5d1 | 385 | } |
3b6b7e19 | 386 | __free_pages(page, order); |
0db2e5d1 | 387 | } |
0db2e5d1 RM |
388 | if (!page) { |
389 | __iommu_dma_free_pages(pages, i); | |
390 | return NULL; | |
391 | } | |
3b6b7e19 RM |
392 | count -= order_size; |
393 | while (order_size--) | |
0db2e5d1 RM |
394 | pages[i++] = page++; |
395 | } | |
396 | return pages; | |
397 | } | |
398 | ||
399 | /** | |
400 | * iommu_dma_free - Free a buffer allocated by iommu_dma_alloc() | |
401 | * @dev: Device which owns this buffer | |
402 | * @pages: Array of buffer pages as returned by iommu_dma_alloc() | |
403 | * @size: Size of buffer in bytes | |
404 | * @handle: DMA address of buffer | |
405 | * | |
406 | * Frees both the pages associated with the buffer, and the array | |
407 | * describing them | |
408 | */ | |
409 | void iommu_dma_free(struct device *dev, struct page **pages, size_t size, | |
410 | dma_addr_t *handle) | |
411 | { | |
412 | __iommu_dma_unmap(iommu_get_domain_for_dev(dev), *handle); | |
413 | __iommu_dma_free_pages(pages, PAGE_ALIGN(size) >> PAGE_SHIFT); | |
414 | *handle = DMA_ERROR_CODE; | |
415 | } | |
416 | ||
417 | /** | |
418 | * iommu_dma_alloc - Allocate and map a buffer contiguous in IOVA space | |
419 | * @dev: Device to allocate memory for. Must be a real device | |
420 | * attached to an iommu_dma_domain | |
421 | * @size: Size of buffer in bytes | |
422 | * @gfp: Allocation flags | |
3b6b7e19 | 423 | * @attrs: DMA attributes for this allocation |
0db2e5d1 RM |
424 | * @prot: IOMMU mapping flags |
425 | * @handle: Out argument for allocated DMA handle | |
426 | * @flush_page: Arch callback which must ensure PAGE_SIZE bytes from the | |
427 | * given VA/PA are visible to the given non-coherent device. | |
428 | * | |
429 | * If @size is less than PAGE_SIZE, then a full CPU page will be allocated, | |
430 | * but an IOMMU which supports smaller pages might not map the whole thing. | |
431 | * | |
432 | * Return: Array of struct page pointers describing the buffer, | |
433 | * or NULL on failure. | |
434 | */ | |
3b6b7e19 | 435 | struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp, |
00085f1e | 436 | unsigned long attrs, int prot, dma_addr_t *handle, |
0db2e5d1 RM |
437 | void (*flush_page)(struct device *, const void *, phys_addr_t)) |
438 | { | |
439 | struct iommu_domain *domain = iommu_get_domain_for_dev(dev); | |
44bb7e24 | 440 | struct iova_domain *iovad = cookie_iovad(domain); |
0db2e5d1 RM |
441 | struct iova *iova; |
442 | struct page **pages; | |
443 | struct sg_table sgt; | |
444 | dma_addr_t dma_addr; | |
3b6b7e19 | 445 | unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap; |
0db2e5d1 RM |
446 | |
447 | *handle = DMA_ERROR_CODE; | |
448 | ||
3b6b7e19 RM |
449 | min_size = alloc_sizes & -alloc_sizes; |
450 | if (min_size < PAGE_SIZE) { | |
451 | min_size = PAGE_SIZE; | |
452 | alloc_sizes |= PAGE_SIZE; | |
453 | } else { | |
454 | size = ALIGN(size, min_size); | |
455 | } | |
00085f1e | 456 | if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES) |
3b6b7e19 RM |
457 | alloc_sizes = min_size; |
458 | ||
459 | count = PAGE_ALIGN(size) >> PAGE_SHIFT; | |
460 | pages = __iommu_dma_alloc_pages(count, alloc_sizes >> PAGE_SHIFT, gfp); | |
0db2e5d1 RM |
461 | if (!pages) |
462 | return NULL; | |
463 | ||
122fac03 | 464 | iova = __alloc_iova(domain, size, dev->coherent_dma_mask, dev); |
0db2e5d1 RM |
465 | if (!iova) |
466 | goto out_free_pages; | |
467 | ||
468 | size = iova_align(iovad, size); | |
469 | if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, GFP_KERNEL)) | |
470 | goto out_free_iova; | |
471 | ||
472 | if (!(prot & IOMMU_CACHE)) { | |
473 | struct sg_mapping_iter miter; | |
474 | /* | |
475 | * The CPU-centric flushing implied by SG_MITER_TO_SG isn't | |
476 | * sufficient here, so skip it by using the "wrong" direction. | |
477 | */ | |
478 | sg_miter_start(&miter, sgt.sgl, sgt.orig_nents, SG_MITER_FROM_SG); | |
479 | while (sg_miter_next(&miter)) | |
480 | flush_page(dev, miter.addr, page_to_phys(miter.page)); | |
481 | sg_miter_stop(&miter); | |
482 | } | |
483 | ||
484 | dma_addr = iova_dma_addr(iovad, iova); | |
485 | if (iommu_map_sg(domain, dma_addr, sgt.sgl, sgt.orig_nents, prot) | |
486 | < size) | |
487 | goto out_free_sg; | |
488 | ||
489 | *handle = dma_addr; | |
490 | sg_free_table(&sgt); | |
491 | return pages; | |
492 | ||
493 | out_free_sg: | |
494 | sg_free_table(&sgt); | |
495 | out_free_iova: | |
496 | __free_iova(iovad, iova); | |
497 | out_free_pages: | |
498 | __iommu_dma_free_pages(pages, count); | |
499 | return NULL; | |
500 | } | |
501 | ||
502 | /** | |
503 | * iommu_dma_mmap - Map a buffer into provided user VMA | |
504 | * @pages: Array representing buffer from iommu_dma_alloc() | |
505 | * @size: Size of buffer in bytes | |
506 | * @vma: VMA describing requested userspace mapping | |
507 | * | |
508 | * Maps the pages of the buffer in @pages into @vma. The caller is responsible | |
509 | * for verifying the correct size and protection of @vma beforehand. | |
510 | */ | |
511 | ||
512 | int iommu_dma_mmap(struct page **pages, size_t size, struct vm_area_struct *vma) | |
513 | { | |
514 | unsigned long uaddr = vma->vm_start; | |
515 | unsigned int i, count = PAGE_ALIGN(size) >> PAGE_SHIFT; | |
516 | int ret = -ENXIO; | |
517 | ||
518 | for (i = vma->vm_pgoff; i < count && uaddr < vma->vm_end; i++) { | |
519 | ret = vm_insert_page(vma, uaddr, pages[i]); | |
520 | if (ret) | |
521 | break; | |
522 | uaddr += PAGE_SIZE; | |
523 | } | |
524 | return ret; | |
525 | } | |
526 | ||
51f8cc9e RM |
527 | static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys, |
528 | size_t size, int prot) | |
0db2e5d1 RM |
529 | { |
530 | dma_addr_t dma_addr; | |
531 | struct iommu_domain *domain = iommu_get_domain_for_dev(dev); | |
44bb7e24 | 532 | struct iova_domain *iovad = cookie_iovad(domain); |
0db2e5d1 RM |
533 | size_t iova_off = iova_offset(iovad, phys); |
534 | size_t len = iova_align(iovad, size + iova_off); | |
122fac03 | 535 | struct iova *iova = __alloc_iova(domain, len, dma_get_mask(dev), dev); |
0db2e5d1 RM |
536 | |
537 | if (!iova) | |
538 | return DMA_ERROR_CODE; | |
539 | ||
540 | dma_addr = iova_dma_addr(iovad, iova); | |
541 | if (iommu_map(domain, dma_addr, phys - iova_off, len, prot)) { | |
542 | __free_iova(iovad, iova); | |
543 | return DMA_ERROR_CODE; | |
544 | } | |
545 | return dma_addr + iova_off; | |
546 | } | |
547 | ||
51f8cc9e RM |
548 | dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page, |
549 | unsigned long offset, size_t size, int prot) | |
550 | { | |
551 | return __iommu_dma_map(dev, page_to_phys(page) + offset, size, prot); | |
552 | } | |
553 | ||
0db2e5d1 | 554 | void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size, |
00085f1e | 555 | enum dma_data_direction dir, unsigned long attrs) |
0db2e5d1 RM |
556 | { |
557 | __iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle); | |
558 | } | |
559 | ||
560 | /* | |
561 | * Prepare a successfully-mapped scatterlist to give back to the caller. | |
809eac54 RM |
562 | * |
563 | * At this point the segments are already laid out by iommu_dma_map_sg() to | |
564 | * avoid individually crossing any boundaries, so we merely need to check a | |
565 | * segment's start address to avoid concatenating across one. | |
0db2e5d1 RM |
566 | */ |
567 | static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents, | |
568 | dma_addr_t dma_addr) | |
569 | { | |
809eac54 RM |
570 | struct scatterlist *s, *cur = sg; |
571 | unsigned long seg_mask = dma_get_seg_boundary(dev); | |
572 | unsigned int cur_len = 0, max_len = dma_get_max_seg_size(dev); | |
573 | int i, count = 0; | |
0db2e5d1 RM |
574 | |
575 | for_each_sg(sg, s, nents, i) { | |
809eac54 RM |
576 | /* Restore this segment's original unaligned fields first */ |
577 | unsigned int s_iova_off = sg_dma_address(s); | |
0db2e5d1 | 578 | unsigned int s_length = sg_dma_len(s); |
809eac54 | 579 | unsigned int s_iova_len = s->length; |
0db2e5d1 | 580 | |
809eac54 | 581 | s->offset += s_iova_off; |
0db2e5d1 | 582 | s->length = s_length; |
809eac54 RM |
583 | sg_dma_address(s) = DMA_ERROR_CODE; |
584 | sg_dma_len(s) = 0; | |
585 | ||
586 | /* | |
587 | * Now fill in the real DMA data. If... | |
588 | * - there is a valid output segment to append to | |
589 | * - and this segment starts on an IOVA page boundary | |
590 | * - but doesn't fall at a segment boundary | |
591 | * - and wouldn't make the resulting output segment too long | |
592 | */ | |
593 | if (cur_len && !s_iova_off && (dma_addr & seg_mask) && | |
594 | (cur_len + s_length <= max_len)) { | |
595 | /* ...then concatenate it with the previous one */ | |
596 | cur_len += s_length; | |
597 | } else { | |
598 | /* Otherwise start the next output segment */ | |
599 | if (i > 0) | |
600 | cur = sg_next(cur); | |
601 | cur_len = s_length; | |
602 | count++; | |
603 | ||
604 | sg_dma_address(cur) = dma_addr + s_iova_off; | |
605 | } | |
606 | ||
607 | sg_dma_len(cur) = cur_len; | |
608 | dma_addr += s_iova_len; | |
609 | ||
610 | if (s_length + s_iova_off < s_iova_len) | |
611 | cur_len = 0; | |
0db2e5d1 | 612 | } |
809eac54 | 613 | return count; |
0db2e5d1 RM |
614 | } |
615 | ||
616 | /* | |
617 | * If mapping failed, then just restore the original list, | |
618 | * but making sure the DMA fields are invalidated. | |
619 | */ | |
620 | static void __invalidate_sg(struct scatterlist *sg, int nents) | |
621 | { | |
622 | struct scatterlist *s; | |
623 | int i; | |
624 | ||
625 | for_each_sg(sg, s, nents, i) { | |
626 | if (sg_dma_address(s) != DMA_ERROR_CODE) | |
07b48ac4 | 627 | s->offset += sg_dma_address(s); |
0db2e5d1 RM |
628 | if (sg_dma_len(s)) |
629 | s->length = sg_dma_len(s); | |
630 | sg_dma_address(s) = DMA_ERROR_CODE; | |
631 | sg_dma_len(s) = 0; | |
632 | } | |
633 | } | |
634 | ||
635 | /* | |
636 | * The DMA API client is passing in a scatterlist which could describe | |
637 | * any old buffer layout, but the IOMMU API requires everything to be | |
638 | * aligned to IOMMU pages. Hence the need for this complicated bit of | |
639 | * impedance-matching, to be able to hand off a suitably-aligned list, | |
640 | * but still preserve the original offsets and sizes for the caller. | |
641 | */ | |
642 | int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, | |
643 | int nents, int prot) | |
644 | { | |
645 | struct iommu_domain *domain = iommu_get_domain_for_dev(dev); | |
44bb7e24 | 646 | struct iova_domain *iovad = cookie_iovad(domain); |
0db2e5d1 RM |
647 | struct iova *iova; |
648 | struct scatterlist *s, *prev = NULL; | |
649 | dma_addr_t dma_addr; | |
650 | size_t iova_len = 0; | |
809eac54 | 651 | unsigned long mask = dma_get_seg_boundary(dev); |
0db2e5d1 RM |
652 | int i; |
653 | ||
654 | /* | |
655 | * Work out how much IOVA space we need, and align the segments to | |
656 | * IOVA granules for the IOMMU driver to handle. With some clever | |
657 | * trickery we can modify the list in-place, but reversibly, by | |
809eac54 | 658 | * stashing the unaligned parts in the as-yet-unused DMA fields. |
0db2e5d1 RM |
659 | */ |
660 | for_each_sg(sg, s, nents, i) { | |
809eac54 | 661 | size_t s_iova_off = iova_offset(iovad, s->offset); |
0db2e5d1 | 662 | size_t s_length = s->length; |
809eac54 | 663 | size_t pad_len = (mask - iova_len + 1) & mask; |
0db2e5d1 | 664 | |
809eac54 | 665 | sg_dma_address(s) = s_iova_off; |
0db2e5d1 | 666 | sg_dma_len(s) = s_length; |
809eac54 RM |
667 | s->offset -= s_iova_off; |
668 | s_length = iova_align(iovad, s_length + s_iova_off); | |
0db2e5d1 RM |
669 | s->length = s_length; |
670 | ||
671 | /* | |
809eac54 RM |
672 | * Due to the alignment of our single IOVA allocation, we can |
673 | * depend on these assumptions about the segment boundary mask: | |
674 | * - If mask size >= IOVA size, then the IOVA range cannot | |
675 | * possibly fall across a boundary, so we don't care. | |
676 | * - If mask size < IOVA size, then the IOVA range must start | |
677 | * exactly on a boundary, therefore we can lay things out | |
678 | * based purely on segment lengths without needing to know | |
679 | * the actual addresses beforehand. | |
680 | * - The mask must be a power of 2, so pad_len == 0 if | |
681 | * iova_len == 0, thus we cannot dereference prev the first | |
682 | * time through here (i.e. before it has a meaningful value). | |
0db2e5d1 | 683 | */ |
809eac54 | 684 | if (pad_len && pad_len < s_length - 1) { |
0db2e5d1 RM |
685 | prev->length += pad_len; |
686 | iova_len += pad_len; | |
687 | } | |
688 | ||
689 | iova_len += s_length; | |
690 | prev = s; | |
691 | } | |
692 | ||
122fac03 | 693 | iova = __alloc_iova(domain, iova_len, dma_get_mask(dev), dev); |
0db2e5d1 RM |
694 | if (!iova) |
695 | goto out_restore_sg; | |
696 | ||
697 | /* | |
698 | * We'll leave any physical concatenation to the IOMMU driver's | |
699 | * implementation - it knows better than we do. | |
700 | */ | |
701 | dma_addr = iova_dma_addr(iovad, iova); | |
702 | if (iommu_map_sg(domain, dma_addr, sg, nents, prot) < iova_len) | |
703 | goto out_free_iova; | |
704 | ||
705 | return __finalise_sg(dev, sg, nents, dma_addr); | |
706 | ||
707 | out_free_iova: | |
708 | __free_iova(iovad, iova); | |
709 | out_restore_sg: | |
710 | __invalidate_sg(sg, nents); | |
711 | return 0; | |
712 | } | |
713 | ||
714 | void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, | |
00085f1e | 715 | enum dma_data_direction dir, unsigned long attrs) |
0db2e5d1 RM |
716 | { |
717 | /* | |
718 | * The scatterlist segments are mapped into a single | |
719 | * contiguous IOVA allocation, so this is incredibly easy. | |
720 | */ | |
721 | __iommu_dma_unmap(iommu_get_domain_for_dev(dev), sg_dma_address(sg)); | |
722 | } | |
723 | ||
51f8cc9e RM |
724 | dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys, |
725 | size_t size, enum dma_data_direction dir, unsigned long attrs) | |
726 | { | |
727 | return __iommu_dma_map(dev, phys, size, | |
737c85ca | 728 | dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO); |
51f8cc9e RM |
729 | } |
730 | ||
731 | void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle, | |
732 | size_t size, enum dma_data_direction dir, unsigned long attrs) | |
733 | { | |
734 | __iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle); | |
735 | } | |
736 | ||
0db2e5d1 RM |
737 | int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
738 | { | |
739 | return dma_addr == DMA_ERROR_CODE; | |
740 | } | |
44bb7e24 RM |
741 | |
742 | static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev, | |
743 | phys_addr_t msi_addr, struct iommu_domain *domain) | |
744 | { | |
745 | struct iommu_dma_cookie *cookie = domain->iova_cookie; | |
746 | struct iommu_dma_msi_page *msi_page; | |
fdbe574e | 747 | struct iova_domain *iovad = cookie_iovad(domain); |
44bb7e24 RM |
748 | struct iova *iova; |
749 | int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; | |
fdbe574e | 750 | size_t size = cookie_msi_granule(cookie); |
44bb7e24 | 751 | |
fdbe574e | 752 | msi_addr &= ~(phys_addr_t)(size - 1); |
44bb7e24 RM |
753 | list_for_each_entry(msi_page, &cookie->msi_page_list, list) |
754 | if (msi_page->phys == msi_addr) | |
755 | return msi_page; | |
756 | ||
757 | msi_page = kzalloc(sizeof(*msi_page), GFP_ATOMIC); | |
758 | if (!msi_page) | |
759 | return NULL; | |
760 | ||
44bb7e24 | 761 | msi_page->phys = msi_addr; |
fdbe574e | 762 | if (iovad) { |
122fac03 | 763 | iova = __alloc_iova(domain, size, dma_get_mask(dev), dev); |
fdbe574e RM |
764 | if (!iova) |
765 | goto out_free_page; | |
766 | msi_page->iova = iova_dma_addr(iovad, iova); | |
767 | } else { | |
768 | msi_page->iova = cookie->msi_iova; | |
769 | cookie->msi_iova += size; | |
770 | } | |
771 | ||
772 | if (iommu_map(domain, msi_page->iova, msi_addr, size, prot)) | |
44bb7e24 RM |
773 | goto out_free_iova; |
774 | ||
775 | INIT_LIST_HEAD(&msi_page->list); | |
776 | list_add(&msi_page->list, &cookie->msi_page_list); | |
777 | return msi_page; | |
778 | ||
779 | out_free_iova: | |
fdbe574e RM |
780 | if (iovad) |
781 | __free_iova(iovad, iova); | |
782 | else | |
783 | cookie->msi_iova -= size; | |
44bb7e24 RM |
784 | out_free_page: |
785 | kfree(msi_page); | |
786 | return NULL; | |
787 | } | |
788 | ||
789 | void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg) | |
790 | { | |
791 | struct device *dev = msi_desc_to_dev(irq_get_msi_desc(irq)); | |
792 | struct iommu_domain *domain = iommu_get_domain_for_dev(dev); | |
793 | struct iommu_dma_cookie *cookie; | |
794 | struct iommu_dma_msi_page *msi_page; | |
795 | phys_addr_t msi_addr = (u64)msg->address_hi << 32 | msg->address_lo; | |
796 | unsigned long flags; | |
797 | ||
798 | if (!domain || !domain->iova_cookie) | |
799 | return; | |
800 | ||
801 | cookie = domain->iova_cookie; | |
802 | ||
803 | /* | |
804 | * We disable IRQs to rule out a possible inversion against | |
805 | * irq_desc_lock if, say, someone tries to retarget the affinity | |
806 | * of an MSI from within an IPI handler. | |
807 | */ | |
808 | spin_lock_irqsave(&cookie->msi_lock, flags); | |
809 | msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain); | |
810 | spin_unlock_irqrestore(&cookie->msi_lock, flags); | |
811 | ||
812 | if (WARN_ON(!msi_page)) { | |
813 | /* | |
814 | * We're called from a void callback, so the best we can do is | |
815 | * 'fail' by filling the message with obviously bogus values. | |
816 | * Since we got this far due to an IOMMU being present, it's | |
817 | * not like the existing address would have worked anyway... | |
818 | */ | |
819 | msg->address_hi = ~0U; | |
820 | msg->address_lo = ~0U; | |
821 | msg->data = ~0U; | |
822 | } else { | |
823 | msg->address_hi = upper_32_bits(msi_page->iova); | |
fdbe574e | 824 | msg->address_lo &= cookie_msi_granule(cookie) - 1; |
44bb7e24 RM |
825 | msg->address_lo += lower_32_bits(msi_page->iova); |
826 | } | |
827 | } |