iommu/dma: Clean up MSI IOVA allocation
[linux-block.git] / drivers / iommu / dma-iommu.c
CommitLineData
0db2e5d1
RM
1/*
2 * A fairly generic DMA-API to IOMMU-API glue layer.
3 *
4 * Copyright (C) 2014-2015 ARM Ltd.
5 *
6 * based in part on arch/arm/mm/dma-mapping.c:
7 * Copyright (C) 2000-2004 Russell King
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 */
21
22#include <linux/device.h>
23#include <linux/dma-iommu.h>
5b11e9cd 24#include <linux/gfp.h>
0db2e5d1
RM
25#include <linux/huge_mm.h>
26#include <linux/iommu.h>
27#include <linux/iova.h>
44bb7e24 28#include <linux/irq.h>
0db2e5d1 29#include <linux/mm.h>
fade1ec0 30#include <linux/pci.h>
5b11e9cd
RM
31#include <linux/scatterlist.h>
32#include <linux/vmalloc.h>
0db2e5d1 33
44bb7e24
RM
34struct iommu_dma_msi_page {
35 struct list_head list;
36 dma_addr_t iova;
37 phys_addr_t phys;
38};
39
fdbe574e
RM
40enum iommu_dma_cookie_type {
41 IOMMU_DMA_IOVA_COOKIE,
42 IOMMU_DMA_MSI_COOKIE,
43};
44
44bb7e24 45struct iommu_dma_cookie {
fdbe574e
RM
46 enum iommu_dma_cookie_type type;
47 union {
48 /* Full allocator for IOMMU_DMA_IOVA_COOKIE */
49 struct iova_domain iovad;
50 /* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */
51 dma_addr_t msi_iova;
52 };
53 struct list_head msi_page_list;
54 spinlock_t msi_lock;
44bb7e24
RM
55};
56
fdbe574e
RM
57static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie)
58{
59 if (cookie->type == IOMMU_DMA_IOVA_COOKIE)
60 return cookie->iovad.granule;
61 return PAGE_SIZE;
62}
63
fdbe574e
RM
64static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type)
65{
66 struct iommu_dma_cookie *cookie;
67
68 cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
69 if (cookie) {
70 spin_lock_init(&cookie->msi_lock);
71 INIT_LIST_HEAD(&cookie->msi_page_list);
72 cookie->type = type;
73 }
74 return cookie;
44bb7e24
RM
75}
76
0db2e5d1
RM
77int iommu_dma_init(void)
78{
79 return iova_cache_get();
80}
81
82/**
83 * iommu_get_dma_cookie - Acquire DMA-API resources for a domain
84 * @domain: IOMMU domain to prepare for DMA-API usage
85 *
86 * IOMMU drivers should normally call this from their domain_alloc
87 * callback when domain->type == IOMMU_DOMAIN_DMA.
88 */
89int iommu_get_dma_cookie(struct iommu_domain *domain)
fdbe574e
RM
90{
91 if (domain->iova_cookie)
92 return -EEXIST;
93
94 domain->iova_cookie = cookie_alloc(IOMMU_DMA_IOVA_COOKIE);
95 if (!domain->iova_cookie)
96 return -ENOMEM;
97
98 return 0;
99}
100EXPORT_SYMBOL(iommu_get_dma_cookie);
101
102/**
103 * iommu_get_msi_cookie - Acquire just MSI remapping resources
104 * @domain: IOMMU domain to prepare
105 * @base: Start address of IOVA region for MSI mappings
106 *
107 * Users who manage their own IOVA allocation and do not want DMA API support,
108 * but would still like to take advantage of automatic MSI remapping, can use
109 * this to initialise their own domain appropriately. Users should reserve a
110 * contiguous IOVA region, starting at @base, large enough to accommodate the
111 * number of PAGE_SIZE mappings necessary to cover every MSI doorbell address
112 * used by the devices attached to @domain.
113 */
114int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
0db2e5d1 115{
44bb7e24 116 struct iommu_dma_cookie *cookie;
0db2e5d1 117
fdbe574e
RM
118 if (domain->type != IOMMU_DOMAIN_UNMANAGED)
119 return -EINVAL;
120
0db2e5d1
RM
121 if (domain->iova_cookie)
122 return -EEXIST;
123
fdbe574e 124 cookie = cookie_alloc(IOMMU_DMA_MSI_COOKIE);
44bb7e24
RM
125 if (!cookie)
126 return -ENOMEM;
0db2e5d1 127
fdbe574e 128 cookie->msi_iova = base;
44bb7e24
RM
129 domain->iova_cookie = cookie;
130 return 0;
0db2e5d1 131}
fdbe574e 132EXPORT_SYMBOL(iommu_get_msi_cookie);
0db2e5d1
RM
133
134/**
135 * iommu_put_dma_cookie - Release a domain's DMA mapping resources
fdbe574e
RM
136 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() or
137 * iommu_get_msi_cookie()
0db2e5d1
RM
138 *
139 * IOMMU drivers should normally call this from their domain_free callback.
140 */
141void iommu_put_dma_cookie(struct iommu_domain *domain)
142{
44bb7e24
RM
143 struct iommu_dma_cookie *cookie = domain->iova_cookie;
144 struct iommu_dma_msi_page *msi, *tmp;
0db2e5d1 145
44bb7e24 146 if (!cookie)
0db2e5d1
RM
147 return;
148
fdbe574e 149 if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule)
44bb7e24
RM
150 put_iova_domain(&cookie->iovad);
151
152 list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) {
153 list_del(&msi->list);
154 kfree(msi);
155 }
156 kfree(cookie);
0db2e5d1
RM
157 domain->iova_cookie = NULL;
158}
159EXPORT_SYMBOL(iommu_put_dma_cookie);
160
273df963
RM
161/**
162 * iommu_dma_get_resv_regions - Reserved region driver helper
163 * @dev: Device from iommu_get_resv_regions()
164 * @list: Reserved region list from iommu_get_resv_regions()
165 *
166 * IOMMU drivers can use this to implement their .get_resv_regions callback
167 * for general non-IOMMU-specific reservations. Currently, this covers host
168 * bridge windows for PCI devices.
169 */
170void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list)
fade1ec0 171{
273df963 172 struct pci_host_bridge *bridge;
fade1ec0 173 struct resource_entry *window;
fade1ec0 174
273df963
RM
175 if (!dev_is_pci(dev))
176 return;
177
178 bridge = pci_find_host_bridge(to_pci_dev(dev)->bus);
fade1ec0 179 resource_list_for_each_entry(window, &bridge->windows) {
273df963
RM
180 struct iommu_resv_region *region;
181 phys_addr_t start;
182 size_t length;
183
938f1bbe 184 if (resource_type(window->res) != IORESOURCE_MEM)
fade1ec0
RM
185 continue;
186
273df963
RM
187 start = window->res->start - window->offset;
188 length = window->res->end - window->res->start + 1;
189 region = iommu_alloc_resv_region(start, length, 0,
190 IOMMU_RESV_RESERVED);
191 if (!region)
192 return;
193
194 list_add_tail(&region->list, list);
fade1ec0
RM
195 }
196}
273df963 197EXPORT_SYMBOL(iommu_dma_get_resv_regions);
fade1ec0 198
7c1b058c
RM
199static int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie,
200 phys_addr_t start, phys_addr_t end)
201{
202 struct iova_domain *iovad = &cookie->iovad;
203 struct iommu_dma_msi_page *msi_page;
204 int i, num_pages;
205
206 start -= iova_offset(iovad, start);
207 num_pages = iova_align(iovad, end - start) >> iova_shift(iovad);
208
209 msi_page = kcalloc(num_pages, sizeof(*msi_page), GFP_KERNEL);
210 if (!msi_page)
211 return -ENOMEM;
212
213 for (i = 0; i < num_pages; i++) {
214 msi_page[i].phys = start;
215 msi_page[i].iova = start;
216 INIT_LIST_HEAD(&msi_page[i].list);
217 list_add(&msi_page[i].list, &cookie->msi_page_list);
218 start += iovad->granule;
219 }
220
221 return 0;
222}
223
224static int iova_reserve_iommu_regions(struct device *dev,
225 struct iommu_domain *domain)
226{
227 struct iommu_dma_cookie *cookie = domain->iova_cookie;
228 struct iova_domain *iovad = &cookie->iovad;
229 struct iommu_resv_region *region;
230 LIST_HEAD(resv_regions);
231 int ret = 0;
232
7c1b058c
RM
233 iommu_get_resv_regions(dev, &resv_regions);
234 list_for_each_entry(region, &resv_regions, list) {
235 unsigned long lo, hi;
236
237 /* We ARE the software that manages these! */
238 if (region->type == IOMMU_RESV_SW_MSI)
239 continue;
240
241 lo = iova_pfn(iovad, region->start);
242 hi = iova_pfn(iovad, region->start + region->length - 1);
243 reserve_iova(iovad, lo, hi);
244
245 if (region->type == IOMMU_RESV_MSI)
246 ret = cookie_init_hw_msi_region(cookie, region->start,
247 region->start + region->length);
248 if (ret)
249 break;
250 }
251 iommu_put_resv_regions(dev, &resv_regions);
252
253 return ret;
254}
255
0db2e5d1
RM
256/**
257 * iommu_dma_init_domain - Initialise a DMA mapping domain
258 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
259 * @base: IOVA at which the mappable address space starts
260 * @size: Size of IOVA space
fade1ec0 261 * @dev: Device the domain is being initialised for
0db2e5d1
RM
262 *
263 * @base and @size should be exact multiples of IOMMU page granularity to
264 * avoid rounding surprises. If necessary, we reserve the page at address 0
265 * to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but
266 * any change which could make prior IOVAs invalid will fail.
267 */
fade1ec0
RM
268int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
269 u64 size, struct device *dev)
0db2e5d1 270{
fdbe574e
RM
271 struct iommu_dma_cookie *cookie = domain->iova_cookie;
272 struct iova_domain *iovad = &cookie->iovad;
0db2e5d1
RM
273 unsigned long order, base_pfn, end_pfn;
274
fdbe574e
RM
275 if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE)
276 return -EINVAL;
0db2e5d1
RM
277
278 /* Use the smallest supported page size for IOVA granularity */
d16e0faa 279 order = __ffs(domain->pgsize_bitmap);
0db2e5d1
RM
280 base_pfn = max_t(unsigned long, 1, base >> order);
281 end_pfn = (base + size - 1) >> order;
282
283 /* Check the domain allows at least some access to the device... */
284 if (domain->geometry.force_aperture) {
285 if (base > domain->geometry.aperture_end ||
286 base + size <= domain->geometry.aperture_start) {
287 pr_warn("specified DMA range outside IOMMU capability\n");
288 return -EFAULT;
289 }
290 /* ...then finally give it a kicking to make sure it fits */
291 base_pfn = max_t(unsigned long, base_pfn,
292 domain->geometry.aperture_start >> order);
293 end_pfn = min_t(unsigned long, end_pfn,
294 domain->geometry.aperture_end >> order);
295 }
f51d7bb7
RM
296 /*
297 * PCI devices may have larger DMA masks, but still prefer allocating
298 * within a 32-bit mask to avoid DAC addressing. Such limitations don't
299 * apply to the typical platform device, so for those we may as well
300 * leave the cache limit at the top of their range to save an rb_last()
301 * traversal on every allocation.
302 */
7c1b058c 303 if (dev && dev_is_pci(dev))
f51d7bb7 304 end_pfn &= DMA_BIT_MASK(32) >> order;
0db2e5d1 305
f51d7bb7 306 /* start_pfn is always nonzero for an already-initialised domain */
0db2e5d1
RM
307 if (iovad->start_pfn) {
308 if (1UL << order != iovad->granule ||
f51d7bb7 309 base_pfn != iovad->start_pfn) {
0db2e5d1
RM
310 pr_warn("Incompatible range for DMA domain\n");
311 return -EFAULT;
312 }
f51d7bb7
RM
313 /*
314 * If we have devices with different DMA masks, move the free
315 * area cache limit down for the benefit of the smaller one.
316 */
317 iovad->dma_32bit_pfn = min(end_pfn, iovad->dma_32bit_pfn);
7c1b058c
RM
318
319 return 0;
0db2e5d1 320 }
7c1b058c
RM
321
322 init_iova_domain(iovad, 1UL << order, base_pfn, end_pfn);
323 if (!dev)
324 return 0;
325
326 return iova_reserve_iommu_regions(dev, domain);
0db2e5d1
RM
327}
328EXPORT_SYMBOL(iommu_dma_init_domain);
329
330/**
737c85ca
MH
331 * dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API
332 * page flags.
0db2e5d1
RM
333 * @dir: Direction of DMA transfer
334 * @coherent: Is the DMA master cache-coherent?
737c85ca 335 * @attrs: DMA attributes for the mapping
0db2e5d1
RM
336 *
337 * Return: corresponding IOMMU API page protection flags
338 */
737c85ca
MH
339int dma_info_to_prot(enum dma_data_direction dir, bool coherent,
340 unsigned long attrs)
0db2e5d1
RM
341{
342 int prot = coherent ? IOMMU_CACHE : 0;
343
737c85ca
MH
344 if (attrs & DMA_ATTR_PRIVILEGED)
345 prot |= IOMMU_PRIV;
346
0db2e5d1
RM
347 switch (dir) {
348 case DMA_BIDIRECTIONAL:
349 return prot | IOMMU_READ | IOMMU_WRITE;
350 case DMA_TO_DEVICE:
351 return prot | IOMMU_READ;
352 case DMA_FROM_DEVICE:
353 return prot | IOMMU_WRITE;
354 default:
355 return 0;
356 }
357}
358
842fe519
RM
359static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
360 size_t size, dma_addr_t dma_limit, struct device *dev)
0db2e5d1 361{
a44e6657
RM
362 struct iommu_dma_cookie *cookie = domain->iova_cookie;
363 struct iova_domain *iovad = &cookie->iovad;
364 unsigned long shift, iova_len;
122fac03 365 struct iova *iova = NULL;
0db2e5d1 366
a44e6657
RM
367 if (cookie->type == IOMMU_DMA_MSI_COOKIE) {
368 cookie->msi_iova += size;
369 return cookie->msi_iova - size;
370 }
371
372 shift = iova_shift(iovad);
373 iova_len = size >> shift;
374
c987ff0d
RM
375 if (domain->geometry.force_aperture)
376 dma_limit = min(dma_limit, domain->geometry.aperture_end);
122fac03
RM
377
378 /* Try to get PCI devices a SAC address */
379 if (dma_limit > DMA_BIT_MASK(32) && dev_is_pci(dev))
842fe519 380 iova = alloc_iova(iovad, iova_len, DMA_BIT_MASK(32) >> shift,
122fac03 381 true);
0db2e5d1
RM
382 /*
383 * Enforce size-alignment to be safe - there could perhaps be an
384 * attribute to control this per-device, or at least per-domain...
385 */
122fac03 386 if (!iova)
842fe519 387 iova = alloc_iova(iovad, iova_len, dma_limit >> shift, true);
122fac03 388
842fe519 389 return (dma_addr_t)iova->pfn_lo << shift;
0db2e5d1
RM
390}
391
842fe519
RM
392static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie,
393 dma_addr_t iova, size_t size)
0db2e5d1 394{
842fe519
RM
395 struct iova_domain *iovad = &cookie->iovad;
396 struct iova *iova_rbnode;
0db2e5d1 397
a44e6657
RM
398 /* The MSI case is only ever cleaning up its most recent allocation */
399 if (cookie->type == IOMMU_DMA_MSI_COOKIE) {
400 cookie->msi_iova -= size;
401 return;
402 }
403
842fe519
RM
404 iova_rbnode = find_iova(iovad, iova_pfn(iovad, iova));
405 if (WARN_ON(!iova_rbnode))
0db2e5d1
RM
406 return;
407
842fe519
RM
408 __free_iova(iovad, iova_rbnode);
409}
410
411static void __iommu_dma_unmap(struct iommu_domain *domain, dma_addr_t dma_addr,
412 size_t size)
413{
a44e6657
RM
414 struct iommu_dma_cookie *cookie = domain->iova_cookie;
415 struct iova_domain *iovad = &cookie->iovad;
842fe519
RM
416 size_t iova_off = iova_offset(iovad, dma_addr);
417
418 dma_addr -= iova_off;
419 size = iova_align(iovad, size + iova_off);
420
421 WARN_ON(iommu_unmap(domain, dma_addr, size) != size);
a44e6657 422 iommu_dma_free_iova(cookie, dma_addr, size);
0db2e5d1
RM
423}
424
425static void __iommu_dma_free_pages(struct page **pages, int count)
426{
427 while (count--)
428 __free_page(pages[count]);
429 kvfree(pages);
430}
431
3b6b7e19
RM
432static struct page **__iommu_dma_alloc_pages(unsigned int count,
433 unsigned long order_mask, gfp_t gfp)
0db2e5d1
RM
434{
435 struct page **pages;
436 unsigned int i = 0, array_size = count * sizeof(*pages);
3b6b7e19
RM
437
438 order_mask &= (2U << MAX_ORDER) - 1;
439 if (!order_mask)
440 return NULL;
0db2e5d1
RM
441
442 if (array_size <= PAGE_SIZE)
443 pages = kzalloc(array_size, GFP_KERNEL);
444 else
445 pages = vzalloc(array_size);
446 if (!pages)
447 return NULL;
448
449 /* IOMMU can map any pages, so himem can also be used here */
450 gfp |= __GFP_NOWARN | __GFP_HIGHMEM;
451
452 while (count) {
453 struct page *page = NULL;
3b6b7e19 454 unsigned int order_size;
0db2e5d1
RM
455
456 /*
457 * Higher-order allocations are a convenience rather
458 * than a necessity, hence using __GFP_NORETRY until
3b6b7e19 459 * falling back to minimum-order allocations.
0db2e5d1 460 */
3b6b7e19
RM
461 for (order_mask &= (2U << __fls(count)) - 1;
462 order_mask; order_mask &= ~order_size) {
463 unsigned int order = __fls(order_mask);
464
465 order_size = 1U << order;
466 page = alloc_pages((order_mask - order_size) ?
467 gfp | __GFP_NORETRY : gfp, order);
0db2e5d1
RM
468 if (!page)
469 continue;
3b6b7e19
RM
470 if (!order)
471 break;
472 if (!PageCompound(page)) {
0db2e5d1
RM
473 split_page(page, order);
474 break;
3b6b7e19
RM
475 } else if (!split_huge_page(page)) {
476 break;
0db2e5d1 477 }
3b6b7e19 478 __free_pages(page, order);
0db2e5d1 479 }
0db2e5d1
RM
480 if (!page) {
481 __iommu_dma_free_pages(pages, i);
482 return NULL;
483 }
3b6b7e19
RM
484 count -= order_size;
485 while (order_size--)
0db2e5d1
RM
486 pages[i++] = page++;
487 }
488 return pages;
489}
490
491/**
492 * iommu_dma_free - Free a buffer allocated by iommu_dma_alloc()
493 * @dev: Device which owns this buffer
494 * @pages: Array of buffer pages as returned by iommu_dma_alloc()
495 * @size: Size of buffer in bytes
496 * @handle: DMA address of buffer
497 *
498 * Frees both the pages associated with the buffer, and the array
499 * describing them
500 */
501void iommu_dma_free(struct device *dev, struct page **pages, size_t size,
502 dma_addr_t *handle)
503{
842fe519 504 __iommu_dma_unmap(iommu_get_domain_for_dev(dev), *handle, size);
0db2e5d1
RM
505 __iommu_dma_free_pages(pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
506 *handle = DMA_ERROR_CODE;
507}
508
509/**
510 * iommu_dma_alloc - Allocate and map a buffer contiguous in IOVA space
511 * @dev: Device to allocate memory for. Must be a real device
512 * attached to an iommu_dma_domain
513 * @size: Size of buffer in bytes
514 * @gfp: Allocation flags
3b6b7e19 515 * @attrs: DMA attributes for this allocation
0db2e5d1
RM
516 * @prot: IOMMU mapping flags
517 * @handle: Out argument for allocated DMA handle
518 * @flush_page: Arch callback which must ensure PAGE_SIZE bytes from the
519 * given VA/PA are visible to the given non-coherent device.
520 *
521 * If @size is less than PAGE_SIZE, then a full CPU page will be allocated,
522 * but an IOMMU which supports smaller pages might not map the whole thing.
523 *
524 * Return: Array of struct page pointers describing the buffer,
525 * or NULL on failure.
526 */
3b6b7e19 527struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp,
00085f1e 528 unsigned long attrs, int prot, dma_addr_t *handle,
0db2e5d1
RM
529 void (*flush_page)(struct device *, const void *, phys_addr_t))
530{
531 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
842fe519
RM
532 struct iommu_dma_cookie *cookie = domain->iova_cookie;
533 struct iova_domain *iovad = &cookie->iovad;
0db2e5d1
RM
534 struct page **pages;
535 struct sg_table sgt;
842fe519 536 dma_addr_t iova;
3b6b7e19 537 unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
0db2e5d1
RM
538
539 *handle = DMA_ERROR_CODE;
540
3b6b7e19
RM
541 min_size = alloc_sizes & -alloc_sizes;
542 if (min_size < PAGE_SIZE) {
543 min_size = PAGE_SIZE;
544 alloc_sizes |= PAGE_SIZE;
545 } else {
546 size = ALIGN(size, min_size);
547 }
00085f1e 548 if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES)
3b6b7e19
RM
549 alloc_sizes = min_size;
550
551 count = PAGE_ALIGN(size) >> PAGE_SHIFT;
552 pages = __iommu_dma_alloc_pages(count, alloc_sizes >> PAGE_SHIFT, gfp);
0db2e5d1
RM
553 if (!pages)
554 return NULL;
555
842fe519
RM
556 size = iova_align(iovad, size);
557 iova = iommu_dma_alloc_iova(domain, size, dev->coherent_dma_mask, dev);
0db2e5d1
RM
558 if (!iova)
559 goto out_free_pages;
560
0db2e5d1
RM
561 if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, GFP_KERNEL))
562 goto out_free_iova;
563
564 if (!(prot & IOMMU_CACHE)) {
565 struct sg_mapping_iter miter;
566 /*
567 * The CPU-centric flushing implied by SG_MITER_TO_SG isn't
568 * sufficient here, so skip it by using the "wrong" direction.
569 */
570 sg_miter_start(&miter, sgt.sgl, sgt.orig_nents, SG_MITER_FROM_SG);
571 while (sg_miter_next(&miter))
572 flush_page(dev, miter.addr, page_to_phys(miter.page));
573 sg_miter_stop(&miter);
574 }
575
842fe519 576 if (iommu_map_sg(domain, iova, sgt.sgl, sgt.orig_nents, prot)
0db2e5d1
RM
577 < size)
578 goto out_free_sg;
579
842fe519 580 *handle = iova;
0db2e5d1
RM
581 sg_free_table(&sgt);
582 return pages;
583
584out_free_sg:
585 sg_free_table(&sgt);
586out_free_iova:
842fe519 587 iommu_dma_free_iova(cookie, iova, size);
0db2e5d1
RM
588out_free_pages:
589 __iommu_dma_free_pages(pages, count);
590 return NULL;
591}
592
593/**
594 * iommu_dma_mmap - Map a buffer into provided user VMA
595 * @pages: Array representing buffer from iommu_dma_alloc()
596 * @size: Size of buffer in bytes
597 * @vma: VMA describing requested userspace mapping
598 *
599 * Maps the pages of the buffer in @pages into @vma. The caller is responsible
600 * for verifying the correct size and protection of @vma beforehand.
601 */
602
603int iommu_dma_mmap(struct page **pages, size_t size, struct vm_area_struct *vma)
604{
605 unsigned long uaddr = vma->vm_start;
606 unsigned int i, count = PAGE_ALIGN(size) >> PAGE_SHIFT;
607 int ret = -ENXIO;
608
609 for (i = vma->vm_pgoff; i < count && uaddr < vma->vm_end; i++) {
610 ret = vm_insert_page(vma, uaddr, pages[i]);
611 if (ret)
612 break;
613 uaddr += PAGE_SIZE;
614 }
615 return ret;
616}
617
51f8cc9e
RM
618static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
619 size_t size, int prot)
0db2e5d1 620{
0db2e5d1 621 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
842fe519
RM
622 struct iommu_dma_cookie *cookie = domain->iova_cookie;
623 struct iova_domain *iovad = &cookie->iovad;
0db2e5d1 624 size_t iova_off = iova_offset(iovad, phys);
842fe519 625 dma_addr_t iova;
0db2e5d1 626
842fe519
RM
627 size = iova_align(iovad, size + iova_off);
628 iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
0db2e5d1
RM
629 if (!iova)
630 return DMA_ERROR_CODE;
631
842fe519
RM
632 if (iommu_map(domain, iova, phys - iova_off, size, prot)) {
633 iommu_dma_free_iova(cookie, iova, size);
0db2e5d1
RM
634 return DMA_ERROR_CODE;
635 }
842fe519 636 return iova + iova_off;
0db2e5d1
RM
637}
638
51f8cc9e
RM
639dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
640 unsigned long offset, size_t size, int prot)
641{
642 return __iommu_dma_map(dev, page_to_phys(page) + offset, size, prot);
643}
644
0db2e5d1 645void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size,
00085f1e 646 enum dma_data_direction dir, unsigned long attrs)
0db2e5d1 647{
842fe519 648 __iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle, size);
0db2e5d1
RM
649}
650
651/*
652 * Prepare a successfully-mapped scatterlist to give back to the caller.
809eac54
RM
653 *
654 * At this point the segments are already laid out by iommu_dma_map_sg() to
655 * avoid individually crossing any boundaries, so we merely need to check a
656 * segment's start address to avoid concatenating across one.
0db2e5d1
RM
657 */
658static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
659 dma_addr_t dma_addr)
660{
809eac54
RM
661 struct scatterlist *s, *cur = sg;
662 unsigned long seg_mask = dma_get_seg_boundary(dev);
663 unsigned int cur_len = 0, max_len = dma_get_max_seg_size(dev);
664 int i, count = 0;
0db2e5d1
RM
665
666 for_each_sg(sg, s, nents, i) {
809eac54
RM
667 /* Restore this segment's original unaligned fields first */
668 unsigned int s_iova_off = sg_dma_address(s);
0db2e5d1 669 unsigned int s_length = sg_dma_len(s);
809eac54 670 unsigned int s_iova_len = s->length;
0db2e5d1 671
809eac54 672 s->offset += s_iova_off;
0db2e5d1 673 s->length = s_length;
809eac54
RM
674 sg_dma_address(s) = DMA_ERROR_CODE;
675 sg_dma_len(s) = 0;
676
677 /*
678 * Now fill in the real DMA data. If...
679 * - there is a valid output segment to append to
680 * - and this segment starts on an IOVA page boundary
681 * - but doesn't fall at a segment boundary
682 * - and wouldn't make the resulting output segment too long
683 */
684 if (cur_len && !s_iova_off && (dma_addr & seg_mask) &&
685 (cur_len + s_length <= max_len)) {
686 /* ...then concatenate it with the previous one */
687 cur_len += s_length;
688 } else {
689 /* Otherwise start the next output segment */
690 if (i > 0)
691 cur = sg_next(cur);
692 cur_len = s_length;
693 count++;
694
695 sg_dma_address(cur) = dma_addr + s_iova_off;
696 }
697
698 sg_dma_len(cur) = cur_len;
699 dma_addr += s_iova_len;
700
701 if (s_length + s_iova_off < s_iova_len)
702 cur_len = 0;
0db2e5d1 703 }
809eac54 704 return count;
0db2e5d1
RM
705}
706
707/*
708 * If mapping failed, then just restore the original list,
709 * but making sure the DMA fields are invalidated.
710 */
711static void __invalidate_sg(struct scatterlist *sg, int nents)
712{
713 struct scatterlist *s;
714 int i;
715
716 for_each_sg(sg, s, nents, i) {
717 if (sg_dma_address(s) != DMA_ERROR_CODE)
07b48ac4 718 s->offset += sg_dma_address(s);
0db2e5d1
RM
719 if (sg_dma_len(s))
720 s->length = sg_dma_len(s);
721 sg_dma_address(s) = DMA_ERROR_CODE;
722 sg_dma_len(s) = 0;
723 }
724}
725
726/*
727 * The DMA API client is passing in a scatterlist which could describe
728 * any old buffer layout, but the IOMMU API requires everything to be
729 * aligned to IOMMU pages. Hence the need for this complicated bit of
730 * impedance-matching, to be able to hand off a suitably-aligned list,
731 * but still preserve the original offsets and sizes for the caller.
732 */
733int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
734 int nents, int prot)
735{
736 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
842fe519
RM
737 struct iommu_dma_cookie *cookie = domain->iova_cookie;
738 struct iova_domain *iovad = &cookie->iovad;
0db2e5d1 739 struct scatterlist *s, *prev = NULL;
842fe519 740 dma_addr_t iova;
0db2e5d1 741 size_t iova_len = 0;
809eac54 742 unsigned long mask = dma_get_seg_boundary(dev);
0db2e5d1
RM
743 int i;
744
745 /*
746 * Work out how much IOVA space we need, and align the segments to
747 * IOVA granules for the IOMMU driver to handle. With some clever
748 * trickery we can modify the list in-place, but reversibly, by
809eac54 749 * stashing the unaligned parts in the as-yet-unused DMA fields.
0db2e5d1
RM
750 */
751 for_each_sg(sg, s, nents, i) {
809eac54 752 size_t s_iova_off = iova_offset(iovad, s->offset);
0db2e5d1 753 size_t s_length = s->length;
809eac54 754 size_t pad_len = (mask - iova_len + 1) & mask;
0db2e5d1 755
809eac54 756 sg_dma_address(s) = s_iova_off;
0db2e5d1 757 sg_dma_len(s) = s_length;
809eac54
RM
758 s->offset -= s_iova_off;
759 s_length = iova_align(iovad, s_length + s_iova_off);
0db2e5d1
RM
760 s->length = s_length;
761
762 /*
809eac54
RM
763 * Due to the alignment of our single IOVA allocation, we can
764 * depend on these assumptions about the segment boundary mask:
765 * - If mask size >= IOVA size, then the IOVA range cannot
766 * possibly fall across a boundary, so we don't care.
767 * - If mask size < IOVA size, then the IOVA range must start
768 * exactly on a boundary, therefore we can lay things out
769 * based purely on segment lengths without needing to know
770 * the actual addresses beforehand.
771 * - The mask must be a power of 2, so pad_len == 0 if
772 * iova_len == 0, thus we cannot dereference prev the first
773 * time through here (i.e. before it has a meaningful value).
0db2e5d1 774 */
809eac54 775 if (pad_len && pad_len < s_length - 1) {
0db2e5d1
RM
776 prev->length += pad_len;
777 iova_len += pad_len;
778 }
779
780 iova_len += s_length;
781 prev = s;
782 }
783
842fe519 784 iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev);
0db2e5d1
RM
785 if (!iova)
786 goto out_restore_sg;
787
788 /*
789 * We'll leave any physical concatenation to the IOMMU driver's
790 * implementation - it knows better than we do.
791 */
842fe519 792 if (iommu_map_sg(domain, iova, sg, nents, prot) < iova_len)
0db2e5d1
RM
793 goto out_free_iova;
794
842fe519 795 return __finalise_sg(dev, sg, nents, iova);
0db2e5d1
RM
796
797out_free_iova:
842fe519 798 iommu_dma_free_iova(cookie, iova, iova_len);
0db2e5d1
RM
799out_restore_sg:
800 __invalidate_sg(sg, nents);
801 return 0;
802}
803
804void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
00085f1e 805 enum dma_data_direction dir, unsigned long attrs)
0db2e5d1 806{
842fe519
RM
807 dma_addr_t start, end;
808 struct scatterlist *tmp;
809 int i;
0db2e5d1
RM
810 /*
811 * The scatterlist segments are mapped into a single
812 * contiguous IOVA allocation, so this is incredibly easy.
813 */
842fe519
RM
814 start = sg_dma_address(sg);
815 for_each_sg(sg_next(sg), tmp, nents - 1, i) {
816 if (sg_dma_len(tmp) == 0)
817 break;
818 sg = tmp;
819 }
820 end = sg_dma_address(sg) + sg_dma_len(sg);
821 __iommu_dma_unmap(iommu_get_domain_for_dev(dev), start, end - start);
0db2e5d1
RM
822}
823
51f8cc9e
RM
824dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
825 size_t size, enum dma_data_direction dir, unsigned long attrs)
826{
827 return __iommu_dma_map(dev, phys, size,
737c85ca 828 dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO);
51f8cc9e
RM
829}
830
831void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
832 size_t size, enum dma_data_direction dir, unsigned long attrs)
833{
842fe519 834 __iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle, size);
51f8cc9e
RM
835}
836
0db2e5d1
RM
837int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
838{
839 return dma_addr == DMA_ERROR_CODE;
840}
44bb7e24
RM
841
842static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
843 phys_addr_t msi_addr, struct iommu_domain *domain)
844{
845 struct iommu_dma_cookie *cookie = domain->iova_cookie;
846 struct iommu_dma_msi_page *msi_page;
842fe519 847 dma_addr_t iova;
44bb7e24 848 int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
fdbe574e 849 size_t size = cookie_msi_granule(cookie);
44bb7e24 850
fdbe574e 851 msi_addr &= ~(phys_addr_t)(size - 1);
44bb7e24
RM
852 list_for_each_entry(msi_page, &cookie->msi_page_list, list)
853 if (msi_page->phys == msi_addr)
854 return msi_page;
855
856 msi_page = kzalloc(sizeof(*msi_page), GFP_ATOMIC);
857 if (!msi_page)
858 return NULL;
859
a44e6657
RM
860 iova = __iommu_dma_map(dev, msi_addr, size, prot);
861 if (iommu_dma_mapping_error(dev, iova))
862 goto out_free_page;
44bb7e24
RM
863
864 INIT_LIST_HEAD(&msi_page->list);
a44e6657
RM
865 msi_page->phys = msi_addr;
866 msi_page->iova = iova;
44bb7e24
RM
867 list_add(&msi_page->list, &cookie->msi_page_list);
868 return msi_page;
869
44bb7e24
RM
870out_free_page:
871 kfree(msi_page);
872 return NULL;
873}
874
875void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg)
876{
877 struct device *dev = msi_desc_to_dev(irq_get_msi_desc(irq));
878 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
879 struct iommu_dma_cookie *cookie;
880 struct iommu_dma_msi_page *msi_page;
881 phys_addr_t msi_addr = (u64)msg->address_hi << 32 | msg->address_lo;
882 unsigned long flags;
883
884 if (!domain || !domain->iova_cookie)
885 return;
886
887 cookie = domain->iova_cookie;
888
889 /*
890 * We disable IRQs to rule out a possible inversion against
891 * irq_desc_lock if, say, someone tries to retarget the affinity
892 * of an MSI from within an IPI handler.
893 */
894 spin_lock_irqsave(&cookie->msi_lock, flags);
895 msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain);
896 spin_unlock_irqrestore(&cookie->msi_lock, flags);
897
898 if (WARN_ON(!msi_page)) {
899 /*
900 * We're called from a void callback, so the best we can do is
901 * 'fail' by filling the message with obviously bogus values.
902 * Since we got this far due to an IOMMU being present, it's
903 * not like the existing address would have worked anyway...
904 */
905 msg->address_hi = ~0U;
906 msg->address_lo = ~0U;
907 msg->data = ~0U;
908 } else {
909 msg->address_hi = upper_32_bits(msi_page->iova);
fdbe574e 910 msg->address_lo &= cookie_msi_granule(cookie) - 1;
44bb7e24
RM
911 msg->address_lo += lower_32_bits(msi_page->iova);
912 }
913}