Merge tag 'iommu-updates-v4.20' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-block.git] / drivers / iommu / dma-iommu.c
CommitLineData
0db2e5d1
RM
1/*
2 * A fairly generic DMA-API to IOMMU-API glue layer.
3 *
4 * Copyright (C) 2014-2015 ARM Ltd.
5 *
6 * based in part on arch/arm/mm/dma-mapping.c:
7 * Copyright (C) 2000-2004 Russell King
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 */
21
f51dc892 22#include <linux/acpi_iort.h>
0db2e5d1
RM
23#include <linux/device.h>
24#include <linux/dma-iommu.h>
5b11e9cd 25#include <linux/gfp.h>
0db2e5d1
RM
26#include <linux/huge_mm.h>
27#include <linux/iommu.h>
28#include <linux/iova.h>
44bb7e24 29#include <linux/irq.h>
0db2e5d1 30#include <linux/mm.h>
fade1ec0 31#include <linux/pci.h>
5b11e9cd
RM
32#include <linux/scatterlist.h>
33#include <linux/vmalloc.h>
0db2e5d1 34
81a5a316
CH
35#define IOMMU_MAPPING_ERROR 0
36
44bb7e24
RM
37struct iommu_dma_msi_page {
38 struct list_head list;
39 dma_addr_t iova;
40 phys_addr_t phys;
41};
42
fdbe574e
RM
43enum iommu_dma_cookie_type {
44 IOMMU_DMA_IOVA_COOKIE,
45 IOMMU_DMA_MSI_COOKIE,
46};
47
44bb7e24 48struct iommu_dma_cookie {
fdbe574e
RM
49 enum iommu_dma_cookie_type type;
50 union {
51 /* Full allocator for IOMMU_DMA_IOVA_COOKIE */
52 struct iova_domain iovad;
53 /* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */
54 dma_addr_t msi_iova;
55 };
56 struct list_head msi_page_list;
57 spinlock_t msi_lock;
2da274cd
ZL
58
59 /* Domain for flush queue callback; NULL if flush queue not in use */
60 struct iommu_domain *fq_domain;
44bb7e24
RM
61};
62
fdbe574e
RM
63static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie)
64{
65 if (cookie->type == IOMMU_DMA_IOVA_COOKIE)
66 return cookie->iovad.granule;
67 return PAGE_SIZE;
68}
69
fdbe574e
RM
70static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type)
71{
72 struct iommu_dma_cookie *cookie;
73
74 cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
75 if (cookie) {
76 spin_lock_init(&cookie->msi_lock);
77 INIT_LIST_HEAD(&cookie->msi_page_list);
78 cookie->type = type;
79 }
80 return cookie;
44bb7e24
RM
81}
82
0db2e5d1
RM
83int iommu_dma_init(void)
84{
85 return iova_cache_get();
86}
87
88/**
89 * iommu_get_dma_cookie - Acquire DMA-API resources for a domain
90 * @domain: IOMMU domain to prepare for DMA-API usage
91 *
92 * IOMMU drivers should normally call this from their domain_alloc
93 * callback when domain->type == IOMMU_DOMAIN_DMA.
94 */
95int iommu_get_dma_cookie(struct iommu_domain *domain)
fdbe574e
RM
96{
97 if (domain->iova_cookie)
98 return -EEXIST;
99
100 domain->iova_cookie = cookie_alloc(IOMMU_DMA_IOVA_COOKIE);
101 if (!domain->iova_cookie)
102 return -ENOMEM;
103
104 return 0;
105}
106EXPORT_SYMBOL(iommu_get_dma_cookie);
107
108/**
109 * iommu_get_msi_cookie - Acquire just MSI remapping resources
110 * @domain: IOMMU domain to prepare
111 * @base: Start address of IOVA region for MSI mappings
112 *
113 * Users who manage their own IOVA allocation and do not want DMA API support,
114 * but would still like to take advantage of automatic MSI remapping, can use
115 * this to initialise their own domain appropriately. Users should reserve a
116 * contiguous IOVA region, starting at @base, large enough to accommodate the
117 * number of PAGE_SIZE mappings necessary to cover every MSI doorbell address
118 * used by the devices attached to @domain.
119 */
120int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
0db2e5d1 121{
44bb7e24 122 struct iommu_dma_cookie *cookie;
0db2e5d1 123
fdbe574e
RM
124 if (domain->type != IOMMU_DOMAIN_UNMANAGED)
125 return -EINVAL;
126
0db2e5d1
RM
127 if (domain->iova_cookie)
128 return -EEXIST;
129
fdbe574e 130 cookie = cookie_alloc(IOMMU_DMA_MSI_COOKIE);
44bb7e24
RM
131 if (!cookie)
132 return -ENOMEM;
0db2e5d1 133
fdbe574e 134 cookie->msi_iova = base;
44bb7e24
RM
135 domain->iova_cookie = cookie;
136 return 0;
0db2e5d1 137}
fdbe574e 138EXPORT_SYMBOL(iommu_get_msi_cookie);
0db2e5d1
RM
139
140/**
141 * iommu_put_dma_cookie - Release a domain's DMA mapping resources
fdbe574e
RM
142 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() or
143 * iommu_get_msi_cookie()
0db2e5d1
RM
144 *
145 * IOMMU drivers should normally call this from their domain_free callback.
146 */
147void iommu_put_dma_cookie(struct iommu_domain *domain)
148{
44bb7e24
RM
149 struct iommu_dma_cookie *cookie = domain->iova_cookie;
150 struct iommu_dma_msi_page *msi, *tmp;
0db2e5d1 151
44bb7e24 152 if (!cookie)
0db2e5d1
RM
153 return;
154
fdbe574e 155 if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule)
44bb7e24
RM
156 put_iova_domain(&cookie->iovad);
157
158 list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) {
159 list_del(&msi->list);
160 kfree(msi);
161 }
162 kfree(cookie);
0db2e5d1
RM
163 domain->iova_cookie = NULL;
164}
165EXPORT_SYMBOL(iommu_put_dma_cookie);
166
273df963
RM
167/**
168 * iommu_dma_get_resv_regions - Reserved region driver helper
169 * @dev: Device from iommu_get_resv_regions()
170 * @list: Reserved region list from iommu_get_resv_regions()
171 *
172 * IOMMU drivers can use this to implement their .get_resv_regions callback
cd2c9fcf
SK
173 * for general non-IOMMU-specific reservations. Currently, this covers GICv3
174 * ITS region reservation on ACPI based ARM platforms that may require HW MSI
175 * reservation.
273df963
RM
176 */
177void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list)
fade1ec0 178{
fade1ec0 179
cd2c9fcf
SK
180 if (!is_of_node(dev->iommu_fwspec->iommu_fwnode))
181 iort_iommu_msi_get_resv_regions(dev, list);
273df963 182
fade1ec0 183}
273df963 184EXPORT_SYMBOL(iommu_dma_get_resv_regions);
fade1ec0 185
7c1b058c
RM
186static int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie,
187 phys_addr_t start, phys_addr_t end)
188{
189 struct iova_domain *iovad = &cookie->iovad;
190 struct iommu_dma_msi_page *msi_page;
191 int i, num_pages;
192
193 start -= iova_offset(iovad, start);
194 num_pages = iova_align(iovad, end - start) >> iova_shift(iovad);
195
196 msi_page = kcalloc(num_pages, sizeof(*msi_page), GFP_KERNEL);
197 if (!msi_page)
198 return -ENOMEM;
199
200 for (i = 0; i < num_pages; i++) {
201 msi_page[i].phys = start;
202 msi_page[i].iova = start;
203 INIT_LIST_HEAD(&msi_page[i].list);
204 list_add(&msi_page[i].list, &cookie->msi_page_list);
205 start += iovad->granule;
206 }
207
208 return 0;
209}
210
cd2c9fcf
SK
211static void iova_reserve_pci_windows(struct pci_dev *dev,
212 struct iova_domain *iovad)
213{
214 struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus);
215 struct resource_entry *window;
216 unsigned long lo, hi;
217
218 resource_list_for_each_entry(window, &bridge->windows) {
219 if (resource_type(window->res) != IORESOURCE_MEM)
220 continue;
221
222 lo = iova_pfn(iovad, window->res->start - window->offset);
223 hi = iova_pfn(iovad, window->res->end - window->offset);
224 reserve_iova(iovad, lo, hi);
225 }
226}
227
7c1b058c
RM
228static int iova_reserve_iommu_regions(struct device *dev,
229 struct iommu_domain *domain)
230{
231 struct iommu_dma_cookie *cookie = domain->iova_cookie;
232 struct iova_domain *iovad = &cookie->iovad;
233 struct iommu_resv_region *region;
234 LIST_HEAD(resv_regions);
235 int ret = 0;
236
cd2c9fcf
SK
237 if (dev_is_pci(dev))
238 iova_reserve_pci_windows(to_pci_dev(dev), iovad);
239
7c1b058c
RM
240 iommu_get_resv_regions(dev, &resv_regions);
241 list_for_each_entry(region, &resv_regions, list) {
242 unsigned long lo, hi;
243
244 /* We ARE the software that manages these! */
245 if (region->type == IOMMU_RESV_SW_MSI)
246 continue;
247
248 lo = iova_pfn(iovad, region->start);
249 hi = iova_pfn(iovad, region->start + region->length - 1);
250 reserve_iova(iovad, lo, hi);
251
252 if (region->type == IOMMU_RESV_MSI)
253 ret = cookie_init_hw_msi_region(cookie, region->start,
254 region->start + region->length);
255 if (ret)
256 break;
257 }
258 iommu_put_resv_regions(dev, &resv_regions);
259
260 return ret;
261}
262
2da274cd
ZL
263static void iommu_dma_flush_iotlb_all(struct iova_domain *iovad)
264{
265 struct iommu_dma_cookie *cookie;
266 struct iommu_domain *domain;
267
268 cookie = container_of(iovad, struct iommu_dma_cookie, iovad);
269 domain = cookie->fq_domain;
270 /*
271 * The IOMMU driver supporting DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE
272 * implies that ops->flush_iotlb_all must be non-NULL.
273 */
274 domain->ops->flush_iotlb_all(domain);
275}
276
0db2e5d1
RM
277/**
278 * iommu_dma_init_domain - Initialise a DMA mapping domain
279 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
280 * @base: IOVA at which the mappable address space starts
281 * @size: Size of IOVA space
fade1ec0 282 * @dev: Device the domain is being initialised for
0db2e5d1
RM
283 *
284 * @base and @size should be exact multiples of IOMMU page granularity to
285 * avoid rounding surprises. If necessary, we reserve the page at address 0
286 * to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but
287 * any change which could make prior IOVAs invalid will fail.
288 */
fade1ec0
RM
289int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
290 u64 size, struct device *dev)
0db2e5d1 291{
fdbe574e
RM
292 struct iommu_dma_cookie *cookie = domain->iova_cookie;
293 struct iova_domain *iovad = &cookie->iovad;
0db2e5d1 294 unsigned long order, base_pfn, end_pfn;
2da274cd 295 int attr;
0db2e5d1 296
fdbe574e
RM
297 if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE)
298 return -EINVAL;
0db2e5d1
RM
299
300 /* Use the smallest supported page size for IOVA granularity */
d16e0faa 301 order = __ffs(domain->pgsize_bitmap);
0db2e5d1
RM
302 base_pfn = max_t(unsigned long, 1, base >> order);
303 end_pfn = (base + size - 1) >> order;
304
305 /* Check the domain allows at least some access to the device... */
306 if (domain->geometry.force_aperture) {
307 if (base > domain->geometry.aperture_end ||
308 base + size <= domain->geometry.aperture_start) {
309 pr_warn("specified DMA range outside IOMMU capability\n");
310 return -EFAULT;
311 }
312 /* ...then finally give it a kicking to make sure it fits */
313 base_pfn = max_t(unsigned long, base_pfn,
314 domain->geometry.aperture_start >> order);
0db2e5d1
RM
315 }
316
f51d7bb7 317 /* start_pfn is always nonzero for an already-initialised domain */
0db2e5d1
RM
318 if (iovad->start_pfn) {
319 if (1UL << order != iovad->granule ||
f51d7bb7 320 base_pfn != iovad->start_pfn) {
0db2e5d1
RM
321 pr_warn("Incompatible range for DMA domain\n");
322 return -EFAULT;
323 }
7c1b058c
RM
324
325 return 0;
0db2e5d1 326 }
7c1b058c 327
aa3ac946 328 init_iova_domain(iovad, 1UL << order, base_pfn);
2da274cd
ZL
329
330 if (!cookie->fq_domain && !iommu_domain_get_attr(domain,
331 DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, &attr) && attr) {
332 cookie->fq_domain = domain;
333 init_iova_flush_queue(iovad, iommu_dma_flush_iotlb_all, NULL);
334 }
335
7c1b058c
RM
336 if (!dev)
337 return 0;
338
339 return iova_reserve_iommu_regions(dev, domain);
0db2e5d1
RM
340}
341EXPORT_SYMBOL(iommu_dma_init_domain);
342
343/**
737c85ca
MH
344 * dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API
345 * page flags.
0db2e5d1
RM
346 * @dir: Direction of DMA transfer
347 * @coherent: Is the DMA master cache-coherent?
737c85ca 348 * @attrs: DMA attributes for the mapping
0db2e5d1
RM
349 *
350 * Return: corresponding IOMMU API page protection flags
351 */
737c85ca
MH
352int dma_info_to_prot(enum dma_data_direction dir, bool coherent,
353 unsigned long attrs)
0db2e5d1
RM
354{
355 int prot = coherent ? IOMMU_CACHE : 0;
356
737c85ca
MH
357 if (attrs & DMA_ATTR_PRIVILEGED)
358 prot |= IOMMU_PRIV;
359
0db2e5d1
RM
360 switch (dir) {
361 case DMA_BIDIRECTIONAL:
362 return prot | IOMMU_READ | IOMMU_WRITE;
363 case DMA_TO_DEVICE:
364 return prot | IOMMU_READ;
365 case DMA_FROM_DEVICE:
366 return prot | IOMMU_WRITE;
367 default:
368 return 0;
369 }
370}
371
842fe519
RM
372static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
373 size_t size, dma_addr_t dma_limit, struct device *dev)
0db2e5d1 374{
a44e6657
RM
375 struct iommu_dma_cookie *cookie = domain->iova_cookie;
376 struct iova_domain *iovad = &cookie->iovad;
bb65a64c 377 unsigned long shift, iova_len, iova = 0;
0db2e5d1 378
a44e6657
RM
379 if (cookie->type == IOMMU_DMA_MSI_COOKIE) {
380 cookie->msi_iova += size;
381 return cookie->msi_iova - size;
382 }
383
384 shift = iova_shift(iovad);
385 iova_len = size >> shift;
bb65a64c
RM
386 /*
387 * Freeing non-power-of-two-sized allocations back into the IOVA caches
388 * will come back to bite us badly, so we have to waste a bit of space
389 * rounding up anything cacheable to make sure that can't happen. The
390 * order of the unadjusted size will still match upon freeing.
391 */
392 if (iova_len < (1 << (IOVA_RANGE_CACHE_MAX_SIZE - 1)))
393 iova_len = roundup_pow_of_two(iova_len);
a44e6657 394
03bfdc31
RM
395 if (dev->bus_dma_mask)
396 dma_limit &= dev->bus_dma_mask;
397
c987ff0d
RM
398 if (domain->geometry.force_aperture)
399 dma_limit = min(dma_limit, domain->geometry.aperture_end);
122fac03
RM
400
401 /* Try to get PCI devices a SAC address */
402 if (dma_limit > DMA_BIT_MASK(32) && dev_is_pci(dev))
538d5b33
TN
403 iova = alloc_iova_fast(iovad, iova_len,
404 DMA_BIT_MASK(32) >> shift, false);
bb65a64c 405
122fac03 406 if (!iova)
538d5b33
TN
407 iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift,
408 true);
122fac03 409
bb65a64c 410 return (dma_addr_t)iova << shift;
0db2e5d1
RM
411}
412
842fe519
RM
413static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie,
414 dma_addr_t iova, size_t size)
0db2e5d1 415{
842fe519 416 struct iova_domain *iovad = &cookie->iovad;
0db2e5d1 417
a44e6657 418 /* The MSI case is only ever cleaning up its most recent allocation */
bb65a64c 419 if (cookie->type == IOMMU_DMA_MSI_COOKIE)
a44e6657 420 cookie->msi_iova -= size;
2da274cd
ZL
421 else if (cookie->fq_domain) /* non-strict mode */
422 queue_iova(iovad, iova_pfn(iovad, iova),
423 size >> iova_shift(iovad), 0);
bb65a64c 424 else
1cc896ed
RM
425 free_iova_fast(iovad, iova_pfn(iovad, iova),
426 size >> iova_shift(iovad));
842fe519
RM
427}
428
429static void __iommu_dma_unmap(struct iommu_domain *domain, dma_addr_t dma_addr,
430 size_t size)
431{
a44e6657
RM
432 struct iommu_dma_cookie *cookie = domain->iova_cookie;
433 struct iova_domain *iovad = &cookie->iovad;
842fe519
RM
434 size_t iova_off = iova_offset(iovad, dma_addr);
435
436 dma_addr -= iova_off;
437 size = iova_align(iovad, size + iova_off);
438
2da274cd
ZL
439 WARN_ON(iommu_unmap_fast(domain, dma_addr, size) != size);
440 if (!cookie->fq_domain)
441 iommu_tlb_sync(domain);
a44e6657 442 iommu_dma_free_iova(cookie, dma_addr, size);
0db2e5d1
RM
443}
444
445static void __iommu_dma_free_pages(struct page **pages, int count)
446{
447 while (count--)
448 __free_page(pages[count]);
449 kvfree(pages);
450}
451
3b6b7e19
RM
452static struct page **__iommu_dma_alloc_pages(unsigned int count,
453 unsigned long order_mask, gfp_t gfp)
0db2e5d1
RM
454{
455 struct page **pages;
456 unsigned int i = 0, array_size = count * sizeof(*pages);
3b6b7e19
RM
457
458 order_mask &= (2U << MAX_ORDER) - 1;
459 if (!order_mask)
460 return NULL;
0db2e5d1
RM
461
462 if (array_size <= PAGE_SIZE)
463 pages = kzalloc(array_size, GFP_KERNEL);
464 else
465 pages = vzalloc(array_size);
466 if (!pages)
467 return NULL;
468
469 /* IOMMU can map any pages, so himem can also be used here */
470 gfp |= __GFP_NOWARN | __GFP_HIGHMEM;
471
472 while (count) {
473 struct page *page = NULL;
3b6b7e19 474 unsigned int order_size;
0db2e5d1
RM
475
476 /*
477 * Higher-order allocations are a convenience rather
478 * than a necessity, hence using __GFP_NORETRY until
3b6b7e19 479 * falling back to minimum-order allocations.
0db2e5d1 480 */
3b6b7e19
RM
481 for (order_mask &= (2U << __fls(count)) - 1;
482 order_mask; order_mask &= ~order_size) {
483 unsigned int order = __fls(order_mask);
484
485 order_size = 1U << order;
486 page = alloc_pages((order_mask - order_size) ?
487 gfp | __GFP_NORETRY : gfp, order);
0db2e5d1
RM
488 if (!page)
489 continue;
3b6b7e19
RM
490 if (!order)
491 break;
492 if (!PageCompound(page)) {
0db2e5d1
RM
493 split_page(page, order);
494 break;
3b6b7e19
RM
495 } else if (!split_huge_page(page)) {
496 break;
0db2e5d1 497 }
3b6b7e19 498 __free_pages(page, order);
0db2e5d1 499 }
0db2e5d1
RM
500 if (!page) {
501 __iommu_dma_free_pages(pages, i);
502 return NULL;
503 }
3b6b7e19
RM
504 count -= order_size;
505 while (order_size--)
0db2e5d1
RM
506 pages[i++] = page++;
507 }
508 return pages;
509}
510
511/**
512 * iommu_dma_free - Free a buffer allocated by iommu_dma_alloc()
513 * @dev: Device which owns this buffer
514 * @pages: Array of buffer pages as returned by iommu_dma_alloc()
515 * @size: Size of buffer in bytes
516 * @handle: DMA address of buffer
517 *
518 * Frees both the pages associated with the buffer, and the array
519 * describing them
520 */
521void iommu_dma_free(struct device *dev, struct page **pages, size_t size,
522 dma_addr_t *handle)
523{
43c5bf11 524 __iommu_dma_unmap(iommu_get_dma_domain(dev), *handle, size);
0db2e5d1 525 __iommu_dma_free_pages(pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
81a5a316 526 *handle = IOMMU_MAPPING_ERROR;
0db2e5d1
RM
527}
528
529/**
530 * iommu_dma_alloc - Allocate and map a buffer contiguous in IOVA space
531 * @dev: Device to allocate memory for. Must be a real device
532 * attached to an iommu_dma_domain
533 * @size: Size of buffer in bytes
534 * @gfp: Allocation flags
3b6b7e19 535 * @attrs: DMA attributes for this allocation
0db2e5d1
RM
536 * @prot: IOMMU mapping flags
537 * @handle: Out argument for allocated DMA handle
538 * @flush_page: Arch callback which must ensure PAGE_SIZE bytes from the
539 * given VA/PA are visible to the given non-coherent device.
540 *
541 * If @size is less than PAGE_SIZE, then a full CPU page will be allocated,
542 * but an IOMMU which supports smaller pages might not map the whole thing.
543 *
544 * Return: Array of struct page pointers describing the buffer,
545 * or NULL on failure.
546 */
3b6b7e19 547struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp,
00085f1e 548 unsigned long attrs, int prot, dma_addr_t *handle,
0db2e5d1
RM
549 void (*flush_page)(struct device *, const void *, phys_addr_t))
550{
43c5bf11 551 struct iommu_domain *domain = iommu_get_dma_domain(dev);
842fe519
RM
552 struct iommu_dma_cookie *cookie = domain->iova_cookie;
553 struct iova_domain *iovad = &cookie->iovad;
0db2e5d1
RM
554 struct page **pages;
555 struct sg_table sgt;
842fe519 556 dma_addr_t iova;
3b6b7e19 557 unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
0db2e5d1 558
81a5a316 559 *handle = IOMMU_MAPPING_ERROR;
0db2e5d1 560
3b6b7e19
RM
561 min_size = alloc_sizes & -alloc_sizes;
562 if (min_size < PAGE_SIZE) {
563 min_size = PAGE_SIZE;
564 alloc_sizes |= PAGE_SIZE;
565 } else {
566 size = ALIGN(size, min_size);
567 }
00085f1e 568 if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES)
3b6b7e19
RM
569 alloc_sizes = min_size;
570
571 count = PAGE_ALIGN(size) >> PAGE_SHIFT;
572 pages = __iommu_dma_alloc_pages(count, alloc_sizes >> PAGE_SHIFT, gfp);
0db2e5d1
RM
573 if (!pages)
574 return NULL;
575
842fe519
RM
576 size = iova_align(iovad, size);
577 iova = iommu_dma_alloc_iova(domain, size, dev->coherent_dma_mask, dev);
0db2e5d1
RM
578 if (!iova)
579 goto out_free_pages;
580
0db2e5d1
RM
581 if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, GFP_KERNEL))
582 goto out_free_iova;
583
584 if (!(prot & IOMMU_CACHE)) {
585 struct sg_mapping_iter miter;
586 /*
587 * The CPU-centric flushing implied by SG_MITER_TO_SG isn't
588 * sufficient here, so skip it by using the "wrong" direction.
589 */
590 sg_miter_start(&miter, sgt.sgl, sgt.orig_nents, SG_MITER_FROM_SG);
591 while (sg_miter_next(&miter))
592 flush_page(dev, miter.addr, page_to_phys(miter.page));
593 sg_miter_stop(&miter);
594 }
595
842fe519 596 if (iommu_map_sg(domain, iova, sgt.sgl, sgt.orig_nents, prot)
0db2e5d1
RM
597 < size)
598 goto out_free_sg;
599
842fe519 600 *handle = iova;
0db2e5d1
RM
601 sg_free_table(&sgt);
602 return pages;
603
604out_free_sg:
605 sg_free_table(&sgt);
606out_free_iova:
842fe519 607 iommu_dma_free_iova(cookie, iova, size);
0db2e5d1
RM
608out_free_pages:
609 __iommu_dma_free_pages(pages, count);
610 return NULL;
611}
612
613/**
614 * iommu_dma_mmap - Map a buffer into provided user VMA
615 * @pages: Array representing buffer from iommu_dma_alloc()
616 * @size: Size of buffer in bytes
617 * @vma: VMA describing requested userspace mapping
618 *
619 * Maps the pages of the buffer in @pages into @vma. The caller is responsible
620 * for verifying the correct size and protection of @vma beforehand.
621 */
622
623int iommu_dma_mmap(struct page **pages, size_t size, struct vm_area_struct *vma)
624{
625 unsigned long uaddr = vma->vm_start;
626 unsigned int i, count = PAGE_ALIGN(size) >> PAGE_SHIFT;
627 int ret = -ENXIO;
628
629 for (i = vma->vm_pgoff; i < count && uaddr < vma->vm_end; i++) {
630 ret = vm_insert_page(vma, uaddr, pages[i]);
631 if (ret)
632 break;
633 uaddr += PAGE_SIZE;
634 }
635 return ret;
636}
637
51f8cc9e 638static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
43c5bf11 639 size_t size, int prot, struct iommu_domain *domain)
0db2e5d1 640{
842fe519 641 struct iommu_dma_cookie *cookie = domain->iova_cookie;
1cc896ed 642 size_t iova_off = 0;
842fe519 643 dma_addr_t iova;
0db2e5d1 644
1cc896ed
RM
645 if (cookie->type == IOMMU_DMA_IOVA_COOKIE) {
646 iova_off = iova_offset(&cookie->iovad, phys);
647 size = iova_align(&cookie->iovad, size + iova_off);
648 }
649
842fe519 650 iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
0db2e5d1 651 if (!iova)
81a5a316 652 return IOMMU_MAPPING_ERROR;
0db2e5d1 653
842fe519
RM
654 if (iommu_map(domain, iova, phys - iova_off, size, prot)) {
655 iommu_dma_free_iova(cookie, iova, size);
81a5a316 656 return IOMMU_MAPPING_ERROR;
0db2e5d1 657 }
842fe519 658 return iova + iova_off;
0db2e5d1
RM
659}
660
51f8cc9e
RM
661dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
662 unsigned long offset, size_t size, int prot)
663{
43c5bf11
RM
664 return __iommu_dma_map(dev, page_to_phys(page) + offset, size, prot,
665 iommu_get_dma_domain(dev));
51f8cc9e
RM
666}
667
0db2e5d1 668void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size,
00085f1e 669 enum dma_data_direction dir, unsigned long attrs)
0db2e5d1 670{
43c5bf11 671 __iommu_dma_unmap(iommu_get_dma_domain(dev), handle, size);
0db2e5d1
RM
672}
673
674/*
675 * Prepare a successfully-mapped scatterlist to give back to the caller.
809eac54
RM
676 *
677 * At this point the segments are already laid out by iommu_dma_map_sg() to
678 * avoid individually crossing any boundaries, so we merely need to check a
679 * segment's start address to avoid concatenating across one.
0db2e5d1
RM
680 */
681static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
682 dma_addr_t dma_addr)
683{
809eac54
RM
684 struct scatterlist *s, *cur = sg;
685 unsigned long seg_mask = dma_get_seg_boundary(dev);
686 unsigned int cur_len = 0, max_len = dma_get_max_seg_size(dev);
687 int i, count = 0;
0db2e5d1
RM
688
689 for_each_sg(sg, s, nents, i) {
809eac54
RM
690 /* Restore this segment's original unaligned fields first */
691 unsigned int s_iova_off = sg_dma_address(s);
0db2e5d1 692 unsigned int s_length = sg_dma_len(s);
809eac54 693 unsigned int s_iova_len = s->length;
0db2e5d1 694
809eac54 695 s->offset += s_iova_off;
0db2e5d1 696 s->length = s_length;
81a5a316 697 sg_dma_address(s) = IOMMU_MAPPING_ERROR;
809eac54
RM
698 sg_dma_len(s) = 0;
699
700 /*
701 * Now fill in the real DMA data. If...
702 * - there is a valid output segment to append to
703 * - and this segment starts on an IOVA page boundary
704 * - but doesn't fall at a segment boundary
705 * - and wouldn't make the resulting output segment too long
706 */
707 if (cur_len && !s_iova_off && (dma_addr & seg_mask) &&
708 (cur_len + s_length <= max_len)) {
709 /* ...then concatenate it with the previous one */
710 cur_len += s_length;
711 } else {
712 /* Otherwise start the next output segment */
713 if (i > 0)
714 cur = sg_next(cur);
715 cur_len = s_length;
716 count++;
717
718 sg_dma_address(cur) = dma_addr + s_iova_off;
719 }
720
721 sg_dma_len(cur) = cur_len;
722 dma_addr += s_iova_len;
723
724 if (s_length + s_iova_off < s_iova_len)
725 cur_len = 0;
0db2e5d1 726 }
809eac54 727 return count;
0db2e5d1
RM
728}
729
730/*
731 * If mapping failed, then just restore the original list,
732 * but making sure the DMA fields are invalidated.
733 */
734static void __invalidate_sg(struct scatterlist *sg, int nents)
735{
736 struct scatterlist *s;
737 int i;
738
739 for_each_sg(sg, s, nents, i) {
81a5a316 740 if (sg_dma_address(s) != IOMMU_MAPPING_ERROR)
07b48ac4 741 s->offset += sg_dma_address(s);
0db2e5d1
RM
742 if (sg_dma_len(s))
743 s->length = sg_dma_len(s);
81a5a316 744 sg_dma_address(s) = IOMMU_MAPPING_ERROR;
0db2e5d1
RM
745 sg_dma_len(s) = 0;
746 }
747}
748
749/*
750 * The DMA API client is passing in a scatterlist which could describe
751 * any old buffer layout, but the IOMMU API requires everything to be
752 * aligned to IOMMU pages. Hence the need for this complicated bit of
753 * impedance-matching, to be able to hand off a suitably-aligned list,
754 * but still preserve the original offsets and sizes for the caller.
755 */
756int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
757 int nents, int prot)
758{
43c5bf11 759 struct iommu_domain *domain = iommu_get_dma_domain(dev);
842fe519
RM
760 struct iommu_dma_cookie *cookie = domain->iova_cookie;
761 struct iova_domain *iovad = &cookie->iovad;
0db2e5d1 762 struct scatterlist *s, *prev = NULL;
842fe519 763 dma_addr_t iova;
0db2e5d1 764 size_t iova_len = 0;
809eac54 765 unsigned long mask = dma_get_seg_boundary(dev);
0db2e5d1
RM
766 int i;
767
768 /*
769 * Work out how much IOVA space we need, and align the segments to
770 * IOVA granules for the IOMMU driver to handle. With some clever
771 * trickery we can modify the list in-place, but reversibly, by
809eac54 772 * stashing the unaligned parts in the as-yet-unused DMA fields.
0db2e5d1
RM
773 */
774 for_each_sg(sg, s, nents, i) {
809eac54 775 size_t s_iova_off = iova_offset(iovad, s->offset);
0db2e5d1 776 size_t s_length = s->length;
809eac54 777 size_t pad_len = (mask - iova_len + 1) & mask;
0db2e5d1 778
809eac54 779 sg_dma_address(s) = s_iova_off;
0db2e5d1 780 sg_dma_len(s) = s_length;
809eac54
RM
781 s->offset -= s_iova_off;
782 s_length = iova_align(iovad, s_length + s_iova_off);
0db2e5d1
RM
783 s->length = s_length;
784
785 /*
809eac54
RM
786 * Due to the alignment of our single IOVA allocation, we can
787 * depend on these assumptions about the segment boundary mask:
788 * - If mask size >= IOVA size, then the IOVA range cannot
789 * possibly fall across a boundary, so we don't care.
790 * - If mask size < IOVA size, then the IOVA range must start
791 * exactly on a boundary, therefore we can lay things out
792 * based purely on segment lengths without needing to know
793 * the actual addresses beforehand.
794 * - The mask must be a power of 2, so pad_len == 0 if
795 * iova_len == 0, thus we cannot dereference prev the first
796 * time through here (i.e. before it has a meaningful value).
0db2e5d1 797 */
809eac54 798 if (pad_len && pad_len < s_length - 1) {
0db2e5d1
RM
799 prev->length += pad_len;
800 iova_len += pad_len;
801 }
802
803 iova_len += s_length;
804 prev = s;
805 }
806
842fe519 807 iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev);
0db2e5d1
RM
808 if (!iova)
809 goto out_restore_sg;
810
811 /*
812 * We'll leave any physical concatenation to the IOMMU driver's
813 * implementation - it knows better than we do.
814 */
842fe519 815 if (iommu_map_sg(domain, iova, sg, nents, prot) < iova_len)
0db2e5d1
RM
816 goto out_free_iova;
817
842fe519 818 return __finalise_sg(dev, sg, nents, iova);
0db2e5d1
RM
819
820out_free_iova:
842fe519 821 iommu_dma_free_iova(cookie, iova, iova_len);
0db2e5d1
RM
822out_restore_sg:
823 __invalidate_sg(sg, nents);
824 return 0;
825}
826
827void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
00085f1e 828 enum dma_data_direction dir, unsigned long attrs)
0db2e5d1 829{
842fe519
RM
830 dma_addr_t start, end;
831 struct scatterlist *tmp;
832 int i;
0db2e5d1
RM
833 /*
834 * The scatterlist segments are mapped into a single
835 * contiguous IOVA allocation, so this is incredibly easy.
836 */
842fe519
RM
837 start = sg_dma_address(sg);
838 for_each_sg(sg_next(sg), tmp, nents - 1, i) {
839 if (sg_dma_len(tmp) == 0)
840 break;
841 sg = tmp;
842 }
843 end = sg_dma_address(sg) + sg_dma_len(sg);
43c5bf11 844 __iommu_dma_unmap(iommu_get_dma_domain(dev), start, end - start);
0db2e5d1
RM
845}
846
51f8cc9e
RM
847dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
848 size_t size, enum dma_data_direction dir, unsigned long attrs)
849{
850 return __iommu_dma_map(dev, phys, size,
43c5bf11
RM
851 dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO,
852 iommu_get_dma_domain(dev));
51f8cc9e
RM
853}
854
855void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
856 size_t size, enum dma_data_direction dir, unsigned long attrs)
857{
43c5bf11 858 __iommu_dma_unmap(iommu_get_dma_domain(dev), handle, size);
51f8cc9e
RM
859}
860
0db2e5d1
RM
861int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
862{
81a5a316 863 return dma_addr == IOMMU_MAPPING_ERROR;
0db2e5d1 864}
44bb7e24
RM
865
866static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
867 phys_addr_t msi_addr, struct iommu_domain *domain)
868{
869 struct iommu_dma_cookie *cookie = domain->iova_cookie;
870 struct iommu_dma_msi_page *msi_page;
842fe519 871 dma_addr_t iova;
44bb7e24 872 int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
fdbe574e 873 size_t size = cookie_msi_granule(cookie);
44bb7e24 874
fdbe574e 875 msi_addr &= ~(phys_addr_t)(size - 1);
44bb7e24
RM
876 list_for_each_entry(msi_page, &cookie->msi_page_list, list)
877 if (msi_page->phys == msi_addr)
878 return msi_page;
879
880 msi_page = kzalloc(sizeof(*msi_page), GFP_ATOMIC);
881 if (!msi_page)
882 return NULL;
883
43c5bf11 884 iova = __iommu_dma_map(dev, msi_addr, size, prot, domain);
a44e6657
RM
885 if (iommu_dma_mapping_error(dev, iova))
886 goto out_free_page;
44bb7e24
RM
887
888 INIT_LIST_HEAD(&msi_page->list);
a44e6657
RM
889 msi_page->phys = msi_addr;
890 msi_page->iova = iova;
44bb7e24
RM
891 list_add(&msi_page->list, &cookie->msi_page_list);
892 return msi_page;
893
44bb7e24
RM
894out_free_page:
895 kfree(msi_page);
896 return NULL;
897}
898
899void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg)
900{
901 struct device *dev = msi_desc_to_dev(irq_get_msi_desc(irq));
902 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
903 struct iommu_dma_cookie *cookie;
904 struct iommu_dma_msi_page *msi_page;
905 phys_addr_t msi_addr = (u64)msg->address_hi << 32 | msg->address_lo;
906 unsigned long flags;
907
908 if (!domain || !domain->iova_cookie)
909 return;
910
911 cookie = domain->iova_cookie;
912
913 /*
914 * We disable IRQs to rule out a possible inversion against
915 * irq_desc_lock if, say, someone tries to retarget the affinity
916 * of an MSI from within an IPI handler.
917 */
918 spin_lock_irqsave(&cookie->msi_lock, flags);
919 msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain);
920 spin_unlock_irqrestore(&cookie->msi_lock, flags);
921
922 if (WARN_ON(!msi_page)) {
923 /*
924 * We're called from a void callback, so the best we can do is
925 * 'fail' by filling the message with obviously bogus values.
926 * Since we got this far due to an IOMMU being present, it's
927 * not like the existing address would have worked anyway...
928 */
929 msg->address_hi = ~0U;
930 msg->address_lo = ~0U;
931 msg->data = ~0U;
932 } else {
933 msg->address_hi = upper_32_bits(msi_page->iova);
fdbe574e 934 msg->address_lo &= cookie_msi_granule(cookie) - 1;
44bb7e24
RM
935 msg->address_lo += lower_32_bits(msi_page->iova);
936 }
937}