Merge tag 'char-misc-5.9-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh...
[linux-2.6-block.git] / drivers / xen / swiotlb-xen.c
CommitLineData
d9523678 1// SPDX-License-Identifier: GPL-2.0-only
b097186f
KRW
2/*
3 * Copyright 2010
4 * by Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
5 *
6 * This code provides a IOMMU for Xen PV guests with PCI passthrough.
7 *
b097186f
KRW
8 * PV guests under Xen are running in an non-contiguous memory architecture.
9 *
10 * When PCI pass-through is utilized, this necessitates an IOMMU for
11 * translating bus (DMA) to virtual and vice-versa and also providing a
12 * mechanism to have contiguous pages for device drivers operations (say DMA
13 * operations).
14 *
15 * Specifically, under Xen the Linux idea of pages is an illusion. It
16 * assumes that pages start at zero and go up to the available memory. To
17 * help with that, the Linux Xen MMU provides a lookup mechanism to
18 * translate the page frame numbers (PFN) to machine frame numbers (MFN)
19 * and vice-versa. The MFN are the "real" frame numbers. Furthermore
20 * memory is not contiguous. Xen hypervisor stitches memory for guests
21 * from different pools, which means there is no guarantee that PFN==MFN
22 * and PFN+1==MFN+1. Lastly with Xen 4.0, pages (in debug mode) are
23 * allocated in descending order (high to low), meaning the guest might
24 * never get any MFN's under the 4GB mark.
b097186f
KRW
25 */
26
283c0972
JP
27#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
28
2013288f 29#include <linux/memblock.h>
ea8c64ac 30#include <linux/dma-direct.h>
b4dca151 31#include <linux/dma-noncoherent.h>
63c9744b 32#include <linux/export.h>
b097186f
KRW
33#include <xen/swiotlb-xen.h>
34#include <xen/page.h>
35#include <xen/xen-ops.h>
f4b2f07b 36#include <xen/hvc-console.h>
2b2b614d 37
83862ccf 38#include <asm/dma-mapping.h>
1b65c4e5 39#include <asm/xen/page-coherent.h>
e1d8f62a 40
2b2b614d 41#include <trace/events/swiotlb.h>
e6fa0dc8 42#define MAX_DMA_BITS 32
b097186f
KRW
43/*
44 * Used to do a quick range check in swiotlb_tbl_unmap_single and
45 * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this
46 * API.
47 */
48
49static char *xen_io_tlb_start, *xen_io_tlb_end;
50static unsigned long xen_io_tlb_nslabs;
51/*
52 * Quick lookup value of the bus address of the IOTLB.
53 */
54
91ffe4ad 55static inline phys_addr_t xen_phys_to_bus(struct device *dev, phys_addr_t paddr)
b097186f 56{
9435cce8 57 unsigned long bfn = pfn_to_bfn(XEN_PFN_DOWN(paddr));
91ffe4ad 58 phys_addr_t baddr = (phys_addr_t)bfn << XEN_PAGE_SHIFT;
e17b2f11 59
91ffe4ad
SS
60 baddr |= paddr & ~XEN_PAGE_MASK;
61 return baddr;
62}
e17b2f11 63
91ffe4ad
SS
64static inline dma_addr_t xen_phys_to_dma(struct device *dev, phys_addr_t paddr)
65{
66 return phys_to_dma(dev, xen_phys_to_bus(dev, paddr));
b097186f
KRW
67}
68
91ffe4ad
SS
69static inline phys_addr_t xen_bus_to_phys(struct device *dev,
70 phys_addr_t baddr)
b097186f 71{
9435cce8 72 unsigned long xen_pfn = bfn_to_pfn(XEN_PFN_DOWN(baddr));
91ffe4ad
SS
73 phys_addr_t paddr = (xen_pfn << XEN_PAGE_SHIFT) |
74 (baddr & ~XEN_PAGE_MASK);
e17b2f11
IC
75
76 return paddr;
b097186f
KRW
77}
78
91ffe4ad
SS
79static inline phys_addr_t xen_dma_to_phys(struct device *dev,
80 dma_addr_t dma_addr)
81{
82 return xen_bus_to_phys(dev, dma_to_phys(dev, dma_addr));
83}
84
2cf6a913 85static inline dma_addr_t xen_virt_to_bus(struct device *dev, void *address)
b097186f 86{
91ffe4ad 87 return xen_phys_to_dma(dev, virt_to_phys(address));
b097186f
KRW
88}
89
bf707266 90static inline int range_straddles_page_boundary(phys_addr_t p, size_t size)
b097186f 91{
bf707266
JG
92 unsigned long next_bfn, xen_pfn = XEN_PFN_DOWN(p);
93 unsigned int i, nr_pages = XEN_PFN_UP(xen_offset_in_page(p) + size);
b097186f 94
9435cce8 95 next_bfn = pfn_to_bfn(xen_pfn);
b097186f 96
bf707266 97 for (i = 1; i < nr_pages; i++)
9435cce8 98 if (pfn_to_bfn(++xen_pfn) != ++next_bfn)
bf707266 99 return 1;
b097186f 100
bf707266 101 return 0;
b097186f
KRW
102}
103
38ba51de 104static int is_xen_swiotlb_buffer(struct device *dev, dma_addr_t dma_addr)
b097186f 105{
91ffe4ad 106 unsigned long bfn = XEN_PFN_DOWN(dma_to_phys(dev, dma_addr));
9435cce8 107 unsigned long xen_pfn = bfn_to_local_pfn(bfn);
e9aab7e4 108 phys_addr_t paddr = (phys_addr_t)xen_pfn << XEN_PAGE_SHIFT;
b097186f
KRW
109
110 /* If the address is outside our domain, it CAN
111 * have the same virtual address as another address
112 * in our domain. Therefore _only_ check address within our domain.
113 */
9435cce8 114 if (pfn_valid(PFN_DOWN(paddr))) {
b097186f
KRW
115 return paddr >= virt_to_phys(xen_io_tlb_start) &&
116 paddr < virt_to_phys(xen_io_tlb_end);
117 }
118 return 0;
119}
120
b097186f
KRW
121static int
122xen_swiotlb_fixup(void *buf, size_t size, unsigned long nslabs)
123{
124 int i, rc;
125 int dma_bits;
69908907 126 dma_addr_t dma_handle;
1b65c4e5 127 phys_addr_t p = virt_to_phys(buf);
b097186f
KRW
128
129 dma_bits = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT) + PAGE_SHIFT;
130
131 i = 0;
132 do {
133 int slabs = min(nslabs - i, (unsigned long)IO_TLB_SEGSIZE);
134
135 do {
136 rc = xen_create_contiguous_region(
1b65c4e5 137 p + (i << IO_TLB_SHIFT),
b097186f 138 get_order(slabs << IO_TLB_SHIFT),
69908907 139 dma_bits, &dma_handle);
e6fa0dc8 140 } while (rc && dma_bits++ < MAX_DMA_BITS);
b097186f
KRW
141 if (rc)
142 return rc;
143
144 i += slabs;
145 } while (i < nslabs);
146 return 0;
147}
1cef36a5
KRW
148static unsigned long xen_set_nslabs(unsigned long nr_tbl)
149{
150 if (!nr_tbl) {
151 xen_io_tlb_nslabs = (64 * 1024 * 1024 >> IO_TLB_SHIFT);
152 xen_io_tlb_nslabs = ALIGN(xen_io_tlb_nslabs, IO_TLB_SEGSIZE);
153 } else
154 xen_io_tlb_nslabs = nr_tbl;
b097186f 155
1cef36a5
KRW
156 return xen_io_tlb_nslabs << IO_TLB_SHIFT;
157}
b097186f 158
5bab7864
KRW
159enum xen_swiotlb_err {
160 XEN_SWIOTLB_UNKNOWN = 0,
161 XEN_SWIOTLB_ENOMEM,
162 XEN_SWIOTLB_EFIXUP
163};
164
165static const char *xen_swiotlb_error(enum xen_swiotlb_err err)
166{
167 switch (err) {
168 case XEN_SWIOTLB_ENOMEM:
169 return "Cannot allocate Xen-SWIOTLB buffer\n";
170 case XEN_SWIOTLB_EFIXUP:
171 return "Failed to get contiguous memory for DMA from Xen!\n"\
172 "You either: don't have the permissions, do not have"\
173 " enough free memory under 4GB, or the hypervisor memory"\
174 " is too fragmented!";
175 default:
176 break;
177 }
178 return "";
179}
b8277600 180int __ref xen_swiotlb_init(int verbose, bool early)
b097186f 181{
b8277600 182 unsigned long bytes, order;
f4b2f07b 183 int rc = -ENOMEM;
5bab7864 184 enum xen_swiotlb_err m_ret = XEN_SWIOTLB_UNKNOWN;
f4b2f07b 185 unsigned int repeat = 3;
5f98ecdb 186
1cef36a5 187 xen_io_tlb_nslabs = swiotlb_nr_tbl();
f4b2f07b 188retry:
1cef36a5 189 bytes = xen_set_nslabs(xen_io_tlb_nslabs);
b8277600 190 order = get_order(xen_io_tlb_nslabs << IO_TLB_SHIFT);
4e7372e0
SS
191
192 /*
193 * IO TLB memory already allocated. Just use it.
194 */
195 if (io_tlb_start != 0) {
196 xen_io_tlb_start = phys_to_virt(io_tlb_start);
197 goto end;
198 }
199
b097186f
KRW
200 /*
201 * Get IO TLB memory from any location.
202 */
8a7f97b9 203 if (early) {
15c3c114
MR
204 xen_io_tlb_start = memblock_alloc(PAGE_ALIGN(bytes),
205 PAGE_SIZE);
8a7f97b9
MR
206 if (!xen_io_tlb_start)
207 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
208 __func__, PAGE_ALIGN(bytes), PAGE_SIZE);
209 } else {
b8277600
KRW
210#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
211#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
212 while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
8746515d 213 xen_io_tlb_start = (void *)xen_get_swiotlb_free_pages(order);
b8277600
KRW
214 if (xen_io_tlb_start)
215 break;
216 order--;
217 }
218 if (order != get_order(bytes)) {
283c0972
JP
219 pr_warn("Warning: only able to allocate %ld MB for software IO TLB\n",
220 (PAGE_SIZE << order) >> 20);
b8277600
KRW
221 xen_io_tlb_nslabs = SLABS_PER_PAGE << order;
222 bytes = xen_io_tlb_nslabs << IO_TLB_SHIFT;
223 }
224 }
f4b2f07b 225 if (!xen_io_tlb_start) {
5bab7864 226 m_ret = XEN_SWIOTLB_ENOMEM;
f4b2f07b
KRW
227 goto error;
228 }
b097186f
KRW
229 /*
230 * And replace that memory with pages under 4GB.
231 */
232 rc = xen_swiotlb_fixup(xen_io_tlb_start,
233 bytes,
234 xen_io_tlb_nslabs);
f4b2f07b 235 if (rc) {
b8277600 236 if (early)
2013288f
MR
237 memblock_free(__pa(xen_io_tlb_start),
238 PAGE_ALIGN(bytes));
b8277600
KRW
239 else {
240 free_pages((unsigned long)xen_io_tlb_start, order);
241 xen_io_tlb_start = NULL;
242 }
5bab7864 243 m_ret = XEN_SWIOTLB_EFIXUP;
b097186f 244 goto error;
f4b2f07b 245 }
c468bdee 246 if (early) {
ac2cbab2
YL
247 if (swiotlb_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs,
248 verbose))
249 panic("Cannot allocate SWIOTLB buffer");
c468bdee
KRW
250 rc = 0;
251 } else
b8277600 252 rc = swiotlb_late_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs);
7453c549 253
4e7372e0
SS
254end:
255 xen_io_tlb_end = xen_io_tlb_start + bytes;
7453c549
KRW
256 if (!rc)
257 swiotlb_set_max_segment(PAGE_SIZE);
258
b8277600 259 return rc;
b097186f 260error:
f4b2f07b
KRW
261 if (repeat--) {
262 xen_io_tlb_nslabs = max(1024UL, /* Min is 2MB */
263 (xen_io_tlb_nslabs >> 1));
283c0972
JP
264 pr_info("Lowering to %luMB\n",
265 (xen_io_tlb_nslabs << IO_TLB_SHIFT) >> 20);
f4b2f07b
KRW
266 goto retry;
267 }
283c0972 268 pr_err("%s (rc:%d)\n", xen_swiotlb_error(m_ret), rc);
b8277600
KRW
269 if (early)
270 panic("%s (rc:%d)", xen_swiotlb_error(m_ret), rc);
271 else
272 free_pages((unsigned long)xen_io_tlb_start, order);
273 return rc;
b097186f 274}
dceb1a68
CH
275
276static void *
b097186f 277xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
baa676fc 278 dma_addr_t *dma_handle, gfp_t flags,
00085f1e 279 unsigned long attrs)
b097186f
KRW
280{
281 void *ret;
282 int order = get_order(size);
283 u64 dma_mask = DMA_BIT_MASK(32);
6810df88
KRW
284 phys_addr_t phys;
285 dma_addr_t dev_addr;
b097186f
KRW
286
287 /*
288 * Ignore region specifiers - the kernel's ideas of
289 * pseudo-phys memory layout has nothing to do with the
290 * machine physical layout. We can't allocate highmem
291 * because we can't return a pointer to it.
292 */
293 flags &= ~(__GFP_DMA | __GFP_HIGHMEM);
294
7250f422
JJ
295 /* Convert the size to actually allocated. */
296 size = 1UL << (order + XEN_PAGE_SHIFT);
297
1b65c4e5
SS
298 /* On ARM this function returns an ioremap'ped virtual address for
299 * which virt_to_phys doesn't return the corresponding physical
300 * address. In fact on ARM virt_to_phys only works for kernel direct
301 * mapped RAM memory. Also see comment below.
302 */
303 ret = xen_alloc_coherent_pages(hwdev, size, dma_handle, flags, attrs);
b097186f 304
6810df88
KRW
305 if (!ret)
306 return ret;
307
b097186f 308 if (hwdev && hwdev->coherent_dma_mask)
038d07a2 309 dma_mask = hwdev->coherent_dma_mask;
b097186f 310
91ffe4ad 311 /* At this point dma_handle is the dma address, next we are
1b65c4e5
SS
312 * going to set it to the machine address.
313 * Do not use virt_to_phys(ret) because on ARM it doesn't correspond
314 * to *dma_handle. */
91ffe4ad
SS
315 phys = dma_to_phys(hwdev, *dma_handle);
316 dev_addr = xen_phys_to_dma(hwdev, phys);
6810df88
KRW
317 if (((dev_addr + size - 1 <= dma_mask)) &&
318 !range_straddles_page_boundary(phys, size))
319 *dma_handle = dev_addr;
320 else {
1b65c4e5 321 if (xen_create_contiguous_region(phys, order,
69908907 322 fls64(dma_mask), dma_handle) != 0) {
1b65c4e5 323 xen_free_coherent_pages(hwdev, size, ret, (dma_addr_t)phys, attrs);
b097186f
KRW
324 return NULL;
325 }
91ffe4ad 326 *dma_handle = phys_to_dma(hwdev, *dma_handle);
b877ac98 327 SetPageXenRemapped(virt_to_page(ret));
b097186f 328 }
6810df88 329 memset(ret, 0, size);
b097186f
KRW
330 return ret;
331}
b097186f 332
dceb1a68 333static void
b097186f 334xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
00085f1e 335 dma_addr_t dev_addr, unsigned long attrs)
b097186f
KRW
336{
337 int order = get_order(size);
6810df88
KRW
338 phys_addr_t phys;
339 u64 dma_mask = DMA_BIT_MASK(32);
8b1e868f 340 struct page *page;
b097186f 341
6810df88
KRW
342 if (hwdev && hwdev->coherent_dma_mask)
343 dma_mask = hwdev->coherent_dma_mask;
344
1b65c4e5
SS
345 /* do not use virt_to_phys because on ARM it doesn't return you the
346 * physical address */
91ffe4ad 347 phys = xen_dma_to_phys(hwdev, dev_addr);
6810df88 348
7250f422
JJ
349 /* Convert the size to actually allocated. */
350 size = 1UL << (order + XEN_PAGE_SHIFT);
351
8b1e868f
BO
352 if (is_vmalloc_addr(vaddr))
353 page = vmalloc_to_page(vaddr);
354 else
355 page = virt_to_page(vaddr);
356
50f6393f 357 if (!WARN_ON((dev_addr + size - 1 > dma_mask) ||
b877ac98 358 range_straddles_page_boundary(phys, size)) &&
8b1e868f 359 TestClearPageXenRemapped(page))
1b65c4e5 360 xen_destroy_contiguous_region(phys, order);
6810df88 361
91ffe4ad
SS
362 xen_free_coherent_pages(hwdev, size, vaddr, phys_to_dma(hwdev, phys),
363 attrs);
b097186f 364}
b097186f
KRW
365
366/*
367 * Map a single buffer of the indicated size for DMA in streaming mode. The
368 * physical address to use is returned.
369 *
370 * Once the device is given the dma address, the device owns this memory until
371 * either xen_swiotlb_unmap_page or xen_swiotlb_dma_sync_single is performed.
372 */
dceb1a68 373static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
b097186f
KRW
374 unsigned long offset, size_t size,
375 enum dma_data_direction dir,
00085f1e 376 unsigned long attrs)
b097186f 377{
e05ed4d1 378 phys_addr_t map, phys = page_to_phys(page) + offset;
91ffe4ad 379 dma_addr_t dev_addr = xen_phys_to_dma(dev, phys);
b097186f
KRW
380
381 BUG_ON(dir == DMA_NONE);
382 /*
383 * If the address happens to be in the device's DMA window,
384 * we can safely return the device addr and not worry about bounce
385 * buffering it.
386 */
68a33b17 387 if (dma_capable(dev, dev_addr, size, true) &&
a4dba130 388 !range_straddles_page_boundary(phys, size) &&
291be10f 389 !xen_arch_need_swiotlb(dev, phys, dev_addr) &&
063b8271
CH
390 swiotlb_force != SWIOTLB_FORCE)
391 goto done;
b097186f
KRW
392
393 /*
394 * Oh well, have to allocate and map a bounce buffer.
395 */
2b2b614d
ZK
396 trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);
397
ae4f0a17
SS
398 map = swiotlb_tbl_map_single(dev, virt_to_phys(xen_io_tlb_start),
399 phys, size, size, dir, attrs);
9c106119 400 if (map == (phys_addr_t)DMA_MAPPING_ERROR)
a4abe0ad 401 return DMA_MAPPING_ERROR;
b097186f 402
b4dca151 403 phys = map;
91ffe4ad 404 dev_addr = xen_phys_to_dma(dev, map);
b097186f
KRW
405
406 /*
407 * Ensure that the address returned is DMA'ble
408 */
68a33b17 409 if (unlikely(!dma_capable(dev, dev_addr, size, true))) {
3fc1ca00 410 swiotlb_tbl_unmap_single(dev, map, size, size, dir,
063b8271
CH
411 attrs | DMA_ATTR_SKIP_CPU_SYNC);
412 return DMA_MAPPING_ERROR;
413 }
76418421 414
063b8271 415done:
63f0620c
SS
416 if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
417 if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dev_addr))))
418 arch_sync_dma_for_device(phys, size, dir);
419 else
420 xen_dma_sync_for_device(dev, dev_addr, size, dir);
421 }
063b8271 422 return dev_addr;
b097186f 423}
b097186f
KRW
424
425/*
426 * Unmap a single streaming mode DMA translation. The dma_addr and size must
427 * match what was provided for in a previous xen_swiotlb_map_page call. All
428 * other usages are undefined.
429 *
430 * After this call, reads by the cpu to the buffer are guaranteed to see
431 * whatever the device wrote there.
432 */
bf7954e7
CH
433static void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
434 size_t size, enum dma_data_direction dir, unsigned long attrs)
b097186f 435{
91ffe4ad 436 phys_addr_t paddr = xen_dma_to_phys(hwdev, dev_addr);
b097186f
KRW
437
438 BUG_ON(dir == DMA_NONE);
439
63f0620c
SS
440 if (!dev_is_dma_coherent(hwdev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
441 if (pfn_valid(PFN_DOWN(dma_to_phys(hwdev, dev_addr))))
442 arch_sync_dma_for_cpu(paddr, size, dir);
443 else
444 xen_dma_sync_for_cpu(hwdev, dev_addr, size, dir);
445 }
6cf05463 446
b097186f 447 /* NOTE: We use dev_addr here, not paddr! */
38ba51de 448 if (is_xen_swiotlb_buffer(hwdev, dev_addr))
3fc1ca00 449 swiotlb_tbl_unmap_single(hwdev, paddr, size, size, dir, attrs);
b097186f
KRW
450}
451
b097186f 452static void
2e12dcee
CH
453xen_swiotlb_sync_single_for_cpu(struct device *dev, dma_addr_t dma_addr,
454 size_t size, enum dma_data_direction dir)
b097186f 455{
91ffe4ad 456 phys_addr_t paddr = xen_dma_to_phys(dev, dma_addr);
6cf05463 457
63f0620c
SS
458 if (!dev_is_dma_coherent(dev)) {
459 if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dma_addr))))
460 arch_sync_dma_for_cpu(paddr, size, dir);
461 else
462 xen_dma_sync_for_cpu(dev, dma_addr, size, dir);
463 }
6cf05463 464
38ba51de 465 if (is_xen_swiotlb_buffer(dev, dma_addr))
2e12dcee 466 swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_CPU);
b097186f
KRW
467}
468
2e12dcee
CH
469static void
470xen_swiotlb_sync_single_for_device(struct device *dev, dma_addr_t dma_addr,
471 size_t size, enum dma_data_direction dir)
b097186f 472{
91ffe4ad 473 phys_addr_t paddr = xen_dma_to_phys(dev, dma_addr);
b097186f 474
38ba51de 475 if (is_xen_swiotlb_buffer(dev, dma_addr))
2e12dcee
CH
476 swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_DEVICE);
477
63f0620c
SS
478 if (!dev_is_dma_coherent(dev)) {
479 if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dma_addr))))
480 arch_sync_dma_for_device(paddr, size, dir);
481 else
482 xen_dma_sync_for_device(dev, dma_addr, size, dir);
483 }
b097186f 484}
dceb1a68
CH
485
486/*
487 * Unmap a set of streaming mode DMA translations. Again, cpu read rules
488 * concerning calls here are the same as for swiotlb_unmap_page() above.
489 */
490static void
aca351cc
CH
491xen_swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
492 enum dma_data_direction dir, unsigned long attrs)
dceb1a68
CH
493{
494 struct scatterlist *sg;
495 int i;
496
497 BUG_ON(dir == DMA_NONE);
498
499 for_each_sg(sgl, sg, nelems, i)
bf7954e7
CH
500 xen_swiotlb_unmap_page(hwdev, sg->dma_address, sg_dma_len(sg),
501 dir, attrs);
dceb1a68
CH
502
503}
b097186f 504
dceb1a68 505static int
8b35d9fe 506xen_swiotlb_map_sg(struct device *dev, struct scatterlist *sgl, int nelems,
aca351cc 507 enum dma_data_direction dir, unsigned long attrs)
b097186f
KRW
508{
509 struct scatterlist *sg;
510 int i;
511
512 BUG_ON(dir == DMA_NONE);
513
514 for_each_sg(sgl, sg, nelems, i) {
8b35d9fe
CH
515 sg->dma_address = xen_swiotlb_map_page(dev, sg_page(sg),
516 sg->offset, sg->length, dir, attrs);
517 if (sg->dma_address == DMA_MAPPING_ERROR)
518 goto out_unmap;
781575cd 519 sg_dma_len(sg) = sg->length;
b097186f 520 }
8b35d9fe 521
b097186f 522 return nelems;
8b35d9fe
CH
523out_unmap:
524 xen_swiotlb_unmap_sg(dev, sgl, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
525 sg_dma_len(sgl) = 0;
526 return 0;
b097186f 527}
b097186f 528
b097186f 529static void
2e12dcee
CH
530xen_swiotlb_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl,
531 int nelems, enum dma_data_direction dir)
b097186f
KRW
532{
533 struct scatterlist *sg;
534 int i;
535
2e12dcee
CH
536 for_each_sg(sgl, sg, nelems, i) {
537 xen_swiotlb_sync_single_for_cpu(dev, sg->dma_address,
538 sg->length, dir);
539 }
b097186f 540}
b097186f 541
dceb1a68 542static void
2e12dcee 543xen_swiotlb_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
b097186f
KRW
544 int nelems, enum dma_data_direction dir)
545{
2e12dcee
CH
546 struct scatterlist *sg;
547 int i;
548
549 for_each_sg(sgl, sg, nelems, i) {
550 xen_swiotlb_sync_single_for_device(dev, sg->dma_address,
551 sg->length, dir);
552 }
b097186f 553}
b097186f 554
b097186f
KRW
555/*
556 * Return whether the given device DMA address mask can be supported
557 * properly. For example, if your device can only drive the low 24-bits
558 * during bus mastering, then you would pass 0x00ffffff as the mask to
559 * this function.
560 */
dceb1a68 561static int
b097186f
KRW
562xen_swiotlb_dma_supported(struct device *hwdev, u64 mask)
563{
2cf6a913 564 return xen_virt_to_bus(hwdev, xen_io_tlb_end - 1) <= mask;
b097186f 565}
eb1ddc00 566
dceb1a68
CH
567const struct dma_map_ops xen_swiotlb_dma_ops = {
568 .alloc = xen_swiotlb_alloc_coherent,
569 .free = xen_swiotlb_free_coherent,
570 .sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu,
571 .sync_single_for_device = xen_swiotlb_sync_single_for_device,
572 .sync_sg_for_cpu = xen_swiotlb_sync_sg_for_cpu,
573 .sync_sg_for_device = xen_swiotlb_sync_sg_for_device,
aca351cc
CH
574 .map_sg = xen_swiotlb_map_sg,
575 .unmap_sg = xen_swiotlb_unmap_sg,
dceb1a68
CH
576 .map_page = xen_swiotlb_map_page,
577 .unmap_page = xen_swiotlb_unmap_page,
578 .dma_supported = xen_swiotlb_dma_supported,
922659ea
CH
579 .mmap = dma_common_mmap,
580 .get_sgtable = dma_common_get_sgtable,
dceb1a68 581};