Merge branch 'work.dcache' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
[linux-2.6-block.git] / drivers / xen / swiotlb-xen.c
CommitLineData
d9523678 1// SPDX-License-Identifier: GPL-2.0-only
b097186f
KRW
2/*
3 * Copyright 2010
4 * by Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
5 *
6 * This code provides a IOMMU for Xen PV guests with PCI passthrough.
7 *
b097186f
KRW
8 * PV guests under Xen are running in an non-contiguous memory architecture.
9 *
10 * When PCI pass-through is utilized, this necessitates an IOMMU for
11 * translating bus (DMA) to virtual and vice-versa and also providing a
12 * mechanism to have contiguous pages for device drivers operations (say DMA
13 * operations).
14 *
15 * Specifically, under Xen the Linux idea of pages is an illusion. It
16 * assumes that pages start at zero and go up to the available memory. To
17 * help with that, the Linux Xen MMU provides a lookup mechanism to
18 * translate the page frame numbers (PFN) to machine frame numbers (MFN)
19 * and vice-versa. The MFN are the "real" frame numbers. Furthermore
20 * memory is not contiguous. Xen hypervisor stitches memory for guests
21 * from different pools, which means there is no guarantee that PFN==MFN
22 * and PFN+1==MFN+1. Lastly with Xen 4.0, pages (in debug mode) are
23 * allocated in descending order (high to low), meaning the guest might
24 * never get any MFN's under the 4GB mark.
b097186f
KRW
25 */
26
283c0972
JP
27#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
28
2013288f 29#include <linux/memblock.h>
ea8c64ac 30#include <linux/dma-direct.h>
b4dca151 31#include <linux/dma-noncoherent.h>
63c9744b 32#include <linux/export.h>
b097186f
KRW
33#include <xen/swiotlb-xen.h>
34#include <xen/page.h>
35#include <xen/xen-ops.h>
f4b2f07b 36#include <xen/hvc-console.h>
2b2b614d 37
83862ccf 38#include <asm/dma-mapping.h>
1b65c4e5 39#include <asm/xen/page-coherent.h>
e1d8f62a 40
2b2b614d 41#include <trace/events/swiotlb.h>
e6fa0dc8 42#define MAX_DMA_BITS 32
b097186f
KRW
43/*
44 * Used to do a quick range check in swiotlb_tbl_unmap_single and
45 * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this
46 * API.
47 */
48
49static char *xen_io_tlb_start, *xen_io_tlb_end;
50static unsigned long xen_io_tlb_nslabs;
51/*
52 * Quick lookup value of the bus address of the IOTLB.
53 */
54
b8b0f559 55static u64 start_dma_addr;
b097186f 56
e17b2f11 57/*
9435cce8 58 * Both of these functions should avoid XEN_PFN_PHYS because phys_addr_t
e17b2f11
IC
59 * can be 32bit when dma_addr_t is 64bit leading to a loss in
60 * information if the shift is done before casting to 64bit.
61 */
6b42a7ea 62static inline dma_addr_t xen_phys_to_bus(phys_addr_t paddr)
b097186f 63{
9435cce8
JG
64 unsigned long bfn = pfn_to_bfn(XEN_PFN_DOWN(paddr));
65 dma_addr_t dma = (dma_addr_t)bfn << XEN_PAGE_SHIFT;
e17b2f11 66
9435cce8 67 dma |= paddr & ~XEN_PAGE_MASK;
e17b2f11
IC
68
69 return dma;
b097186f
KRW
70}
71
6b42a7ea 72static inline phys_addr_t xen_bus_to_phys(dma_addr_t baddr)
b097186f 73{
9435cce8
JG
74 unsigned long xen_pfn = bfn_to_pfn(XEN_PFN_DOWN(baddr));
75 dma_addr_t dma = (dma_addr_t)xen_pfn << XEN_PAGE_SHIFT;
e17b2f11
IC
76 phys_addr_t paddr = dma;
77
9435cce8 78 paddr |= baddr & ~XEN_PAGE_MASK;
e17b2f11
IC
79
80 return paddr;
b097186f
KRW
81}
82
6b42a7ea 83static inline dma_addr_t xen_virt_to_bus(void *address)
b097186f
KRW
84{
85 return xen_phys_to_bus(virt_to_phys(address));
86}
87
bf707266 88static inline int range_straddles_page_boundary(phys_addr_t p, size_t size)
b097186f 89{
bf707266
JG
90 unsigned long next_bfn, xen_pfn = XEN_PFN_DOWN(p);
91 unsigned int i, nr_pages = XEN_PFN_UP(xen_offset_in_page(p) + size);
b097186f 92
9435cce8 93 next_bfn = pfn_to_bfn(xen_pfn);
b097186f 94
bf707266 95 for (i = 1; i < nr_pages; i++)
9435cce8 96 if (pfn_to_bfn(++xen_pfn) != ++next_bfn)
bf707266 97 return 1;
b097186f 98
bf707266 99 return 0;
b097186f
KRW
100}
101
102static int is_xen_swiotlb_buffer(dma_addr_t dma_addr)
103{
9435cce8
JG
104 unsigned long bfn = XEN_PFN_DOWN(dma_addr);
105 unsigned long xen_pfn = bfn_to_local_pfn(bfn);
106 phys_addr_t paddr = XEN_PFN_PHYS(xen_pfn);
b097186f
KRW
107
108 /* If the address is outside our domain, it CAN
109 * have the same virtual address as another address
110 * in our domain. Therefore _only_ check address within our domain.
111 */
9435cce8 112 if (pfn_valid(PFN_DOWN(paddr))) {
b097186f
KRW
113 return paddr >= virt_to_phys(xen_io_tlb_start) &&
114 paddr < virt_to_phys(xen_io_tlb_end);
115 }
116 return 0;
117}
118
b097186f
KRW
119static int
120xen_swiotlb_fixup(void *buf, size_t size, unsigned long nslabs)
121{
122 int i, rc;
123 int dma_bits;
69908907 124 dma_addr_t dma_handle;
1b65c4e5 125 phys_addr_t p = virt_to_phys(buf);
b097186f
KRW
126
127 dma_bits = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT) + PAGE_SHIFT;
128
129 i = 0;
130 do {
131 int slabs = min(nslabs - i, (unsigned long)IO_TLB_SEGSIZE);
132
133 do {
134 rc = xen_create_contiguous_region(
1b65c4e5 135 p + (i << IO_TLB_SHIFT),
b097186f 136 get_order(slabs << IO_TLB_SHIFT),
69908907 137 dma_bits, &dma_handle);
e6fa0dc8 138 } while (rc && dma_bits++ < MAX_DMA_BITS);
b097186f
KRW
139 if (rc)
140 return rc;
141
142 i += slabs;
143 } while (i < nslabs);
144 return 0;
145}
1cef36a5
KRW
146static unsigned long xen_set_nslabs(unsigned long nr_tbl)
147{
148 if (!nr_tbl) {
149 xen_io_tlb_nslabs = (64 * 1024 * 1024 >> IO_TLB_SHIFT);
150 xen_io_tlb_nslabs = ALIGN(xen_io_tlb_nslabs, IO_TLB_SEGSIZE);
151 } else
152 xen_io_tlb_nslabs = nr_tbl;
b097186f 153
1cef36a5
KRW
154 return xen_io_tlb_nslabs << IO_TLB_SHIFT;
155}
b097186f 156
5bab7864
KRW
157enum xen_swiotlb_err {
158 XEN_SWIOTLB_UNKNOWN = 0,
159 XEN_SWIOTLB_ENOMEM,
160 XEN_SWIOTLB_EFIXUP
161};
162
163static const char *xen_swiotlb_error(enum xen_swiotlb_err err)
164{
165 switch (err) {
166 case XEN_SWIOTLB_ENOMEM:
167 return "Cannot allocate Xen-SWIOTLB buffer\n";
168 case XEN_SWIOTLB_EFIXUP:
169 return "Failed to get contiguous memory for DMA from Xen!\n"\
170 "You either: don't have the permissions, do not have"\
171 " enough free memory under 4GB, or the hypervisor memory"\
172 " is too fragmented!";
173 default:
174 break;
175 }
176 return "";
177}
b8277600 178int __ref xen_swiotlb_init(int verbose, bool early)
b097186f 179{
b8277600 180 unsigned long bytes, order;
f4b2f07b 181 int rc = -ENOMEM;
5bab7864 182 enum xen_swiotlb_err m_ret = XEN_SWIOTLB_UNKNOWN;
f4b2f07b 183 unsigned int repeat = 3;
5f98ecdb 184
1cef36a5 185 xen_io_tlb_nslabs = swiotlb_nr_tbl();
f4b2f07b 186retry:
1cef36a5 187 bytes = xen_set_nslabs(xen_io_tlb_nslabs);
b8277600 188 order = get_order(xen_io_tlb_nslabs << IO_TLB_SHIFT);
4e7372e0
SS
189
190 /*
191 * IO TLB memory already allocated. Just use it.
192 */
193 if (io_tlb_start != 0) {
194 xen_io_tlb_start = phys_to_virt(io_tlb_start);
195 goto end;
196 }
197
b097186f
KRW
198 /*
199 * Get IO TLB memory from any location.
200 */
8a7f97b9 201 if (early) {
15c3c114
MR
202 xen_io_tlb_start = memblock_alloc(PAGE_ALIGN(bytes),
203 PAGE_SIZE);
8a7f97b9
MR
204 if (!xen_io_tlb_start)
205 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
206 __func__, PAGE_ALIGN(bytes), PAGE_SIZE);
207 } else {
b8277600
KRW
208#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
209#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
210 while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
8746515d 211 xen_io_tlb_start = (void *)xen_get_swiotlb_free_pages(order);
b8277600
KRW
212 if (xen_io_tlb_start)
213 break;
214 order--;
215 }
216 if (order != get_order(bytes)) {
283c0972
JP
217 pr_warn("Warning: only able to allocate %ld MB for software IO TLB\n",
218 (PAGE_SIZE << order) >> 20);
b8277600
KRW
219 xen_io_tlb_nslabs = SLABS_PER_PAGE << order;
220 bytes = xen_io_tlb_nslabs << IO_TLB_SHIFT;
221 }
222 }
f4b2f07b 223 if (!xen_io_tlb_start) {
5bab7864 224 m_ret = XEN_SWIOTLB_ENOMEM;
f4b2f07b
KRW
225 goto error;
226 }
b097186f
KRW
227 /*
228 * And replace that memory with pages under 4GB.
229 */
230 rc = xen_swiotlb_fixup(xen_io_tlb_start,
231 bytes,
232 xen_io_tlb_nslabs);
f4b2f07b 233 if (rc) {
b8277600 234 if (early)
2013288f
MR
235 memblock_free(__pa(xen_io_tlb_start),
236 PAGE_ALIGN(bytes));
b8277600
KRW
237 else {
238 free_pages((unsigned long)xen_io_tlb_start, order);
239 xen_io_tlb_start = NULL;
240 }
5bab7864 241 m_ret = XEN_SWIOTLB_EFIXUP;
b097186f 242 goto error;
f4b2f07b 243 }
b097186f 244 start_dma_addr = xen_virt_to_bus(xen_io_tlb_start);
c468bdee 245 if (early) {
ac2cbab2
YL
246 if (swiotlb_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs,
247 verbose))
248 panic("Cannot allocate SWIOTLB buffer");
c468bdee
KRW
249 rc = 0;
250 } else
b8277600 251 rc = swiotlb_late_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs);
7453c549 252
4e7372e0
SS
253end:
254 xen_io_tlb_end = xen_io_tlb_start + bytes;
7453c549
KRW
255 if (!rc)
256 swiotlb_set_max_segment(PAGE_SIZE);
257
b8277600 258 return rc;
b097186f 259error:
f4b2f07b
KRW
260 if (repeat--) {
261 xen_io_tlb_nslabs = max(1024UL, /* Min is 2MB */
262 (xen_io_tlb_nslabs >> 1));
283c0972
JP
263 pr_info("Lowering to %luMB\n",
264 (xen_io_tlb_nslabs << IO_TLB_SHIFT) >> 20);
f4b2f07b
KRW
265 goto retry;
266 }
283c0972 267 pr_err("%s (rc:%d)\n", xen_swiotlb_error(m_ret), rc);
b8277600
KRW
268 if (early)
269 panic("%s (rc:%d)", xen_swiotlb_error(m_ret), rc);
270 else
271 free_pages((unsigned long)xen_io_tlb_start, order);
272 return rc;
b097186f 273}
dceb1a68
CH
274
275static void *
b097186f 276xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
baa676fc 277 dma_addr_t *dma_handle, gfp_t flags,
00085f1e 278 unsigned long attrs)
b097186f
KRW
279{
280 void *ret;
281 int order = get_order(size);
282 u64 dma_mask = DMA_BIT_MASK(32);
6810df88
KRW
283 phys_addr_t phys;
284 dma_addr_t dev_addr;
b097186f
KRW
285
286 /*
287 * Ignore region specifiers - the kernel's ideas of
288 * pseudo-phys memory layout has nothing to do with the
289 * machine physical layout. We can't allocate highmem
290 * because we can't return a pointer to it.
291 */
292 flags &= ~(__GFP_DMA | __GFP_HIGHMEM);
293
7250f422
JJ
294 /* Convert the size to actually allocated. */
295 size = 1UL << (order + XEN_PAGE_SHIFT);
296
1b65c4e5
SS
297 /* On ARM this function returns an ioremap'ped virtual address for
298 * which virt_to_phys doesn't return the corresponding physical
299 * address. In fact on ARM virt_to_phys only works for kernel direct
300 * mapped RAM memory. Also see comment below.
301 */
302 ret = xen_alloc_coherent_pages(hwdev, size, dma_handle, flags, attrs);
b097186f 303
6810df88
KRW
304 if (!ret)
305 return ret;
306
b097186f 307 if (hwdev && hwdev->coherent_dma_mask)
038d07a2 308 dma_mask = hwdev->coherent_dma_mask;
b097186f 309
1b65c4e5
SS
310 /* At this point dma_handle is the physical address, next we are
311 * going to set it to the machine address.
312 * Do not use virt_to_phys(ret) because on ARM it doesn't correspond
313 * to *dma_handle. */
314 phys = *dma_handle;
6810df88
KRW
315 dev_addr = xen_phys_to_bus(phys);
316 if (((dev_addr + size - 1 <= dma_mask)) &&
317 !range_straddles_page_boundary(phys, size))
318 *dma_handle = dev_addr;
319 else {
1b65c4e5 320 if (xen_create_contiguous_region(phys, order,
69908907 321 fls64(dma_mask), dma_handle) != 0) {
1b65c4e5 322 xen_free_coherent_pages(hwdev, size, ret, (dma_addr_t)phys, attrs);
b097186f
KRW
323 return NULL;
324 }
b877ac98 325 SetPageXenRemapped(virt_to_page(ret));
b097186f 326 }
6810df88 327 memset(ret, 0, size);
b097186f
KRW
328 return ret;
329}
b097186f 330
dceb1a68 331static void
b097186f 332xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
00085f1e 333 dma_addr_t dev_addr, unsigned long attrs)
b097186f
KRW
334{
335 int order = get_order(size);
6810df88
KRW
336 phys_addr_t phys;
337 u64 dma_mask = DMA_BIT_MASK(32);
b097186f 338
6810df88
KRW
339 if (hwdev && hwdev->coherent_dma_mask)
340 dma_mask = hwdev->coherent_dma_mask;
341
1b65c4e5
SS
342 /* do not use virt_to_phys because on ARM it doesn't return you the
343 * physical address */
344 phys = xen_bus_to_phys(dev_addr);
6810df88 345
7250f422
JJ
346 /* Convert the size to actually allocated. */
347 size = 1UL << (order + XEN_PAGE_SHIFT);
348
50f6393f 349 if (!WARN_ON((dev_addr + size - 1 > dma_mask) ||
b877ac98
JG
350 range_straddles_page_boundary(phys, size)) &&
351 TestClearPageXenRemapped(virt_to_page(vaddr)))
1b65c4e5 352 xen_destroy_contiguous_region(phys, order);
6810df88 353
1b65c4e5 354 xen_free_coherent_pages(hwdev, size, vaddr, (dma_addr_t)phys, attrs);
b097186f 355}
b097186f
KRW
356
357/*
358 * Map a single buffer of the indicated size for DMA in streaming mode. The
359 * physical address to use is returned.
360 *
361 * Once the device is given the dma address, the device owns this memory until
362 * either xen_swiotlb_unmap_page or xen_swiotlb_dma_sync_single is performed.
363 */
dceb1a68 364static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
b097186f
KRW
365 unsigned long offset, size_t size,
366 enum dma_data_direction dir,
00085f1e 367 unsigned long attrs)
b097186f 368{
e05ed4d1 369 phys_addr_t map, phys = page_to_phys(page) + offset;
b097186f 370 dma_addr_t dev_addr = xen_phys_to_bus(phys);
b097186f
KRW
371
372 BUG_ON(dir == DMA_NONE);
373 /*
374 * If the address happens to be in the device's DMA window,
375 * we can safely return the device addr and not worry about bounce
376 * buffering it.
377 */
378 if (dma_capable(dev, dev_addr, size) &&
a4dba130 379 !range_straddles_page_boundary(phys, size) &&
291be10f 380 !xen_arch_need_swiotlb(dev, phys, dev_addr) &&
063b8271
CH
381 swiotlb_force != SWIOTLB_FORCE)
382 goto done;
b097186f
KRW
383
384 /*
385 * Oh well, have to allocate and map a bounce buffer.
386 */
2b2b614d
ZK
387 trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);
388
3fc1ca00
LB
389 map = swiotlb_tbl_map_single(dev, start_dma_addr, phys,
390 size, size, dir, attrs);
9c106119 391 if (map == (phys_addr_t)DMA_MAPPING_ERROR)
a4abe0ad 392 return DMA_MAPPING_ERROR;
b097186f 393
b4dca151 394 phys = map;
f1225ee4 395 dev_addr = xen_phys_to_bus(map);
b097186f
KRW
396
397 /*
398 * Ensure that the address returned is DMA'ble
399 */
063b8271 400 if (unlikely(!dma_capable(dev, dev_addr, size))) {
3fc1ca00 401 swiotlb_tbl_unmap_single(dev, map, size, size, dir,
063b8271
CH
402 attrs | DMA_ATTR_SKIP_CPU_SYNC);
403 return DMA_MAPPING_ERROR;
404 }
76418421 405
063b8271 406done:
b4dca151
CH
407 if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
408 xen_dma_sync_for_device(dev, dev_addr, phys, size, dir);
063b8271 409 return dev_addr;
b097186f 410}
b097186f
KRW
411
412/*
413 * Unmap a single streaming mode DMA translation. The dma_addr and size must
414 * match what was provided for in a previous xen_swiotlb_map_page call. All
415 * other usages are undefined.
416 *
417 * After this call, reads by the cpu to the buffer are guaranteed to see
418 * whatever the device wrote there.
419 */
bf7954e7
CH
420static void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
421 size_t size, enum dma_data_direction dir, unsigned long attrs)
b097186f
KRW
422{
423 phys_addr_t paddr = xen_bus_to_phys(dev_addr);
424
425 BUG_ON(dir == DMA_NONE);
426
b4dca151
CH
427 if (!dev_is_dma_coherent(hwdev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
428 xen_dma_sync_for_cpu(hwdev, dev_addr, paddr, size, dir);
6cf05463 429
b097186f 430 /* NOTE: We use dev_addr here, not paddr! */
68c60834 431 if (is_xen_swiotlb_buffer(dev_addr))
3fc1ca00 432 swiotlb_tbl_unmap_single(hwdev, paddr, size, size, dir, attrs);
b097186f
KRW
433}
434
b097186f 435static void
2e12dcee
CH
436xen_swiotlb_sync_single_for_cpu(struct device *dev, dma_addr_t dma_addr,
437 size_t size, enum dma_data_direction dir)
b097186f 438{
2e12dcee 439 phys_addr_t paddr = xen_bus_to_phys(dma_addr);
6cf05463 440
b4dca151
CH
441 if (!dev_is_dma_coherent(dev))
442 xen_dma_sync_for_cpu(dev, dma_addr, paddr, size, dir);
6cf05463 443
2e12dcee
CH
444 if (is_xen_swiotlb_buffer(dma_addr))
445 swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_CPU);
b097186f
KRW
446}
447
2e12dcee
CH
448static void
449xen_swiotlb_sync_single_for_device(struct device *dev, dma_addr_t dma_addr,
450 size_t size, enum dma_data_direction dir)
b097186f 451{
2e12dcee 452 phys_addr_t paddr = xen_bus_to_phys(dma_addr);
b097186f 453
2e12dcee
CH
454 if (is_xen_swiotlb_buffer(dma_addr))
455 swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_DEVICE);
456
b4dca151
CH
457 if (!dev_is_dma_coherent(dev))
458 xen_dma_sync_for_device(dev, dma_addr, paddr, size, dir);
b097186f 459}
dceb1a68
CH
460
461/*
462 * Unmap a set of streaming mode DMA translations. Again, cpu read rules
463 * concerning calls here are the same as for swiotlb_unmap_page() above.
464 */
465static void
aca351cc
CH
466xen_swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
467 enum dma_data_direction dir, unsigned long attrs)
dceb1a68
CH
468{
469 struct scatterlist *sg;
470 int i;
471
472 BUG_ON(dir == DMA_NONE);
473
474 for_each_sg(sgl, sg, nelems, i)
bf7954e7
CH
475 xen_swiotlb_unmap_page(hwdev, sg->dma_address, sg_dma_len(sg),
476 dir, attrs);
dceb1a68
CH
477
478}
b097186f 479
dceb1a68 480static int
8b35d9fe 481xen_swiotlb_map_sg(struct device *dev, struct scatterlist *sgl, int nelems,
aca351cc 482 enum dma_data_direction dir, unsigned long attrs)
b097186f
KRW
483{
484 struct scatterlist *sg;
485 int i;
486
487 BUG_ON(dir == DMA_NONE);
488
489 for_each_sg(sgl, sg, nelems, i) {
8b35d9fe
CH
490 sg->dma_address = xen_swiotlb_map_page(dev, sg_page(sg),
491 sg->offset, sg->length, dir, attrs);
492 if (sg->dma_address == DMA_MAPPING_ERROR)
493 goto out_unmap;
781575cd 494 sg_dma_len(sg) = sg->length;
b097186f 495 }
8b35d9fe 496
b097186f 497 return nelems;
8b35d9fe
CH
498out_unmap:
499 xen_swiotlb_unmap_sg(dev, sgl, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
500 sg_dma_len(sgl) = 0;
501 return 0;
b097186f 502}
b097186f 503
b097186f 504static void
2e12dcee
CH
505xen_swiotlb_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl,
506 int nelems, enum dma_data_direction dir)
b097186f
KRW
507{
508 struct scatterlist *sg;
509 int i;
510
2e12dcee
CH
511 for_each_sg(sgl, sg, nelems, i) {
512 xen_swiotlb_sync_single_for_cpu(dev, sg->dma_address,
513 sg->length, dir);
514 }
b097186f 515}
b097186f 516
dceb1a68 517static void
2e12dcee 518xen_swiotlb_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
b097186f
KRW
519 int nelems, enum dma_data_direction dir)
520{
2e12dcee
CH
521 struct scatterlist *sg;
522 int i;
523
524 for_each_sg(sgl, sg, nelems, i) {
525 xen_swiotlb_sync_single_for_device(dev, sg->dma_address,
526 sg->length, dir);
527 }
b097186f 528}
b097186f 529
b097186f
KRW
530/*
531 * Return whether the given device DMA address mask can be supported
532 * properly. For example, if your device can only drive the low 24-bits
533 * during bus mastering, then you would pass 0x00ffffff as the mask to
534 * this function.
535 */
dceb1a68 536static int
b097186f
KRW
537xen_swiotlb_dma_supported(struct device *hwdev, u64 mask)
538{
539 return xen_virt_to_bus(xen_io_tlb_end - 1) <= mask;
540}
eb1ddc00 541
dceb1a68
CH
542const struct dma_map_ops xen_swiotlb_dma_ops = {
543 .alloc = xen_swiotlb_alloc_coherent,
544 .free = xen_swiotlb_free_coherent,
545 .sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu,
546 .sync_single_for_device = xen_swiotlb_sync_single_for_device,
547 .sync_sg_for_cpu = xen_swiotlb_sync_sg_for_cpu,
548 .sync_sg_for_device = xen_swiotlb_sync_sg_for_device,
aca351cc
CH
549 .map_sg = xen_swiotlb_map_sg,
550 .unmap_sg = xen_swiotlb_unmap_sg,
dceb1a68
CH
551 .map_page = xen_swiotlb_map_page,
552 .unmap_page = xen_swiotlb_unmap_page,
553 .dma_supported = xen_swiotlb_dma_supported,
922659ea
CH
554 .mmap = dma_common_mmap,
555 .get_sgtable = dma_common_get_sgtable,
dceb1a68 556};