swiotlb-xen: remove XEN_PFN_PHYS
[linux-block.git] / drivers / xen / swiotlb-xen.c
CommitLineData
d9523678 1// SPDX-License-Identifier: GPL-2.0-only
b097186f
KRW
2/*
3 * Copyright 2010
4 * by Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
5 *
6 * This code provides a IOMMU for Xen PV guests with PCI passthrough.
7 *
b097186f
KRW
8 * PV guests under Xen are running in an non-contiguous memory architecture.
9 *
10 * When PCI pass-through is utilized, this necessitates an IOMMU for
11 * translating bus (DMA) to virtual and vice-versa and also providing a
12 * mechanism to have contiguous pages for device drivers operations (say DMA
13 * operations).
14 *
15 * Specifically, under Xen the Linux idea of pages is an illusion. It
16 * assumes that pages start at zero and go up to the available memory. To
17 * help with that, the Linux Xen MMU provides a lookup mechanism to
18 * translate the page frame numbers (PFN) to machine frame numbers (MFN)
19 * and vice-versa. The MFN are the "real" frame numbers. Furthermore
20 * memory is not contiguous. Xen hypervisor stitches memory for guests
21 * from different pools, which means there is no guarantee that PFN==MFN
22 * and PFN+1==MFN+1. Lastly with Xen 4.0, pages (in debug mode) are
23 * allocated in descending order (high to low), meaning the guest might
24 * never get any MFN's under the 4GB mark.
b097186f
KRW
25 */
26
283c0972
JP
27#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
28
2013288f 29#include <linux/memblock.h>
ea8c64ac 30#include <linux/dma-direct.h>
b4dca151 31#include <linux/dma-noncoherent.h>
63c9744b 32#include <linux/export.h>
b097186f
KRW
33#include <xen/swiotlb-xen.h>
34#include <xen/page.h>
35#include <xen/xen-ops.h>
f4b2f07b 36#include <xen/hvc-console.h>
2b2b614d 37
83862ccf 38#include <asm/dma-mapping.h>
1b65c4e5 39#include <asm/xen/page-coherent.h>
e1d8f62a 40
2b2b614d 41#include <trace/events/swiotlb.h>
e6fa0dc8 42#define MAX_DMA_BITS 32
b097186f
KRW
43/*
44 * Used to do a quick range check in swiotlb_tbl_unmap_single and
45 * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this
46 * API.
47 */
48
49static char *xen_io_tlb_start, *xen_io_tlb_end;
50static unsigned long xen_io_tlb_nslabs;
51/*
52 * Quick lookup value of the bus address of the IOTLB.
53 */
54
2cf6a913 55static inline dma_addr_t xen_phys_to_bus(struct device *dev, phys_addr_t paddr)
b097186f 56{
9435cce8
JG
57 unsigned long bfn = pfn_to_bfn(XEN_PFN_DOWN(paddr));
58 dma_addr_t dma = (dma_addr_t)bfn << XEN_PAGE_SHIFT;
e17b2f11 59
9435cce8 60 dma |= paddr & ~XEN_PAGE_MASK;
e17b2f11
IC
61
62 return dma;
b097186f
KRW
63}
64
d900781a 65static inline phys_addr_t xen_bus_to_phys(struct device *dev, dma_addr_t baddr)
b097186f 66{
9435cce8
JG
67 unsigned long xen_pfn = bfn_to_pfn(XEN_PFN_DOWN(baddr));
68 dma_addr_t dma = (dma_addr_t)xen_pfn << XEN_PAGE_SHIFT;
e17b2f11
IC
69 phys_addr_t paddr = dma;
70
9435cce8 71 paddr |= baddr & ~XEN_PAGE_MASK;
e17b2f11
IC
72
73 return paddr;
b097186f
KRW
74}
75
2cf6a913 76static inline dma_addr_t xen_virt_to_bus(struct device *dev, void *address)
b097186f 77{
2cf6a913 78 return xen_phys_to_bus(dev, virt_to_phys(address));
b097186f
KRW
79}
80
bf707266 81static inline int range_straddles_page_boundary(phys_addr_t p, size_t size)
b097186f 82{
bf707266
JG
83 unsigned long next_bfn, xen_pfn = XEN_PFN_DOWN(p);
84 unsigned int i, nr_pages = XEN_PFN_UP(xen_offset_in_page(p) + size);
b097186f 85
9435cce8 86 next_bfn = pfn_to_bfn(xen_pfn);
b097186f 87
bf707266 88 for (i = 1; i < nr_pages; i++)
9435cce8 89 if (pfn_to_bfn(++xen_pfn) != ++next_bfn)
bf707266 90 return 1;
b097186f 91
bf707266 92 return 0;
b097186f
KRW
93}
94
38ba51de 95static int is_xen_swiotlb_buffer(struct device *dev, dma_addr_t dma_addr)
b097186f 96{
9435cce8
JG
97 unsigned long bfn = XEN_PFN_DOWN(dma_addr);
98 unsigned long xen_pfn = bfn_to_local_pfn(bfn);
e9aab7e4 99 phys_addr_t paddr = (phys_addr_t)xen_pfn << XEN_PAGE_SHIFT;
b097186f
KRW
100
101 /* If the address is outside our domain, it CAN
102 * have the same virtual address as another address
103 * in our domain. Therefore _only_ check address within our domain.
104 */
9435cce8 105 if (pfn_valid(PFN_DOWN(paddr))) {
b097186f
KRW
106 return paddr >= virt_to_phys(xen_io_tlb_start) &&
107 paddr < virt_to_phys(xen_io_tlb_end);
108 }
109 return 0;
110}
111
b097186f
KRW
112static int
113xen_swiotlb_fixup(void *buf, size_t size, unsigned long nslabs)
114{
115 int i, rc;
116 int dma_bits;
69908907 117 dma_addr_t dma_handle;
1b65c4e5 118 phys_addr_t p = virt_to_phys(buf);
b097186f
KRW
119
120 dma_bits = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT) + PAGE_SHIFT;
121
122 i = 0;
123 do {
124 int slabs = min(nslabs - i, (unsigned long)IO_TLB_SEGSIZE);
125
126 do {
127 rc = xen_create_contiguous_region(
1b65c4e5 128 p + (i << IO_TLB_SHIFT),
b097186f 129 get_order(slabs << IO_TLB_SHIFT),
69908907 130 dma_bits, &dma_handle);
e6fa0dc8 131 } while (rc && dma_bits++ < MAX_DMA_BITS);
b097186f
KRW
132 if (rc)
133 return rc;
134
135 i += slabs;
136 } while (i < nslabs);
137 return 0;
138}
1cef36a5
KRW
139static unsigned long xen_set_nslabs(unsigned long nr_tbl)
140{
141 if (!nr_tbl) {
142 xen_io_tlb_nslabs = (64 * 1024 * 1024 >> IO_TLB_SHIFT);
143 xen_io_tlb_nslabs = ALIGN(xen_io_tlb_nslabs, IO_TLB_SEGSIZE);
144 } else
145 xen_io_tlb_nslabs = nr_tbl;
b097186f 146
1cef36a5
KRW
147 return xen_io_tlb_nslabs << IO_TLB_SHIFT;
148}
b097186f 149
5bab7864
KRW
150enum xen_swiotlb_err {
151 XEN_SWIOTLB_UNKNOWN = 0,
152 XEN_SWIOTLB_ENOMEM,
153 XEN_SWIOTLB_EFIXUP
154};
155
156static const char *xen_swiotlb_error(enum xen_swiotlb_err err)
157{
158 switch (err) {
159 case XEN_SWIOTLB_ENOMEM:
160 return "Cannot allocate Xen-SWIOTLB buffer\n";
161 case XEN_SWIOTLB_EFIXUP:
162 return "Failed to get contiguous memory for DMA from Xen!\n"\
163 "You either: don't have the permissions, do not have"\
164 " enough free memory under 4GB, or the hypervisor memory"\
165 " is too fragmented!";
166 default:
167 break;
168 }
169 return "";
170}
b8277600 171int __ref xen_swiotlb_init(int verbose, bool early)
b097186f 172{
b8277600 173 unsigned long bytes, order;
f4b2f07b 174 int rc = -ENOMEM;
5bab7864 175 enum xen_swiotlb_err m_ret = XEN_SWIOTLB_UNKNOWN;
f4b2f07b 176 unsigned int repeat = 3;
5f98ecdb 177
1cef36a5 178 xen_io_tlb_nslabs = swiotlb_nr_tbl();
f4b2f07b 179retry:
1cef36a5 180 bytes = xen_set_nslabs(xen_io_tlb_nslabs);
b8277600 181 order = get_order(xen_io_tlb_nslabs << IO_TLB_SHIFT);
4e7372e0
SS
182
183 /*
184 * IO TLB memory already allocated. Just use it.
185 */
186 if (io_tlb_start != 0) {
187 xen_io_tlb_start = phys_to_virt(io_tlb_start);
188 goto end;
189 }
190
b097186f
KRW
191 /*
192 * Get IO TLB memory from any location.
193 */
8a7f97b9 194 if (early) {
15c3c114
MR
195 xen_io_tlb_start = memblock_alloc(PAGE_ALIGN(bytes),
196 PAGE_SIZE);
8a7f97b9
MR
197 if (!xen_io_tlb_start)
198 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
199 __func__, PAGE_ALIGN(bytes), PAGE_SIZE);
200 } else {
b8277600
KRW
201#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
202#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
203 while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
8746515d 204 xen_io_tlb_start = (void *)xen_get_swiotlb_free_pages(order);
b8277600
KRW
205 if (xen_io_tlb_start)
206 break;
207 order--;
208 }
209 if (order != get_order(bytes)) {
283c0972
JP
210 pr_warn("Warning: only able to allocate %ld MB for software IO TLB\n",
211 (PAGE_SIZE << order) >> 20);
b8277600
KRW
212 xen_io_tlb_nslabs = SLABS_PER_PAGE << order;
213 bytes = xen_io_tlb_nslabs << IO_TLB_SHIFT;
214 }
215 }
f4b2f07b 216 if (!xen_io_tlb_start) {
5bab7864 217 m_ret = XEN_SWIOTLB_ENOMEM;
f4b2f07b
KRW
218 goto error;
219 }
b097186f
KRW
220 /*
221 * And replace that memory with pages under 4GB.
222 */
223 rc = xen_swiotlb_fixup(xen_io_tlb_start,
224 bytes,
225 xen_io_tlb_nslabs);
f4b2f07b 226 if (rc) {
b8277600 227 if (early)
2013288f
MR
228 memblock_free(__pa(xen_io_tlb_start),
229 PAGE_ALIGN(bytes));
b8277600
KRW
230 else {
231 free_pages((unsigned long)xen_io_tlb_start, order);
232 xen_io_tlb_start = NULL;
233 }
5bab7864 234 m_ret = XEN_SWIOTLB_EFIXUP;
b097186f 235 goto error;
f4b2f07b 236 }
c468bdee 237 if (early) {
ac2cbab2
YL
238 if (swiotlb_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs,
239 verbose))
240 panic("Cannot allocate SWIOTLB buffer");
c468bdee
KRW
241 rc = 0;
242 } else
b8277600 243 rc = swiotlb_late_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs);
7453c549 244
4e7372e0
SS
245end:
246 xen_io_tlb_end = xen_io_tlb_start + bytes;
7453c549
KRW
247 if (!rc)
248 swiotlb_set_max_segment(PAGE_SIZE);
249
b8277600 250 return rc;
b097186f 251error:
f4b2f07b
KRW
252 if (repeat--) {
253 xen_io_tlb_nslabs = max(1024UL, /* Min is 2MB */
254 (xen_io_tlb_nslabs >> 1));
283c0972
JP
255 pr_info("Lowering to %luMB\n",
256 (xen_io_tlb_nslabs << IO_TLB_SHIFT) >> 20);
f4b2f07b
KRW
257 goto retry;
258 }
283c0972 259 pr_err("%s (rc:%d)\n", xen_swiotlb_error(m_ret), rc);
b8277600
KRW
260 if (early)
261 panic("%s (rc:%d)", xen_swiotlb_error(m_ret), rc);
262 else
263 free_pages((unsigned long)xen_io_tlb_start, order);
264 return rc;
b097186f 265}
dceb1a68
CH
266
267static void *
b097186f 268xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
baa676fc 269 dma_addr_t *dma_handle, gfp_t flags,
00085f1e 270 unsigned long attrs)
b097186f
KRW
271{
272 void *ret;
273 int order = get_order(size);
274 u64 dma_mask = DMA_BIT_MASK(32);
6810df88
KRW
275 phys_addr_t phys;
276 dma_addr_t dev_addr;
b097186f
KRW
277
278 /*
279 * Ignore region specifiers - the kernel's ideas of
280 * pseudo-phys memory layout has nothing to do with the
281 * machine physical layout. We can't allocate highmem
282 * because we can't return a pointer to it.
283 */
284 flags &= ~(__GFP_DMA | __GFP_HIGHMEM);
285
7250f422
JJ
286 /* Convert the size to actually allocated. */
287 size = 1UL << (order + XEN_PAGE_SHIFT);
288
1b65c4e5
SS
289 /* On ARM this function returns an ioremap'ped virtual address for
290 * which virt_to_phys doesn't return the corresponding physical
291 * address. In fact on ARM virt_to_phys only works for kernel direct
292 * mapped RAM memory. Also see comment below.
293 */
294 ret = xen_alloc_coherent_pages(hwdev, size, dma_handle, flags, attrs);
b097186f 295
6810df88
KRW
296 if (!ret)
297 return ret;
298
b097186f 299 if (hwdev && hwdev->coherent_dma_mask)
038d07a2 300 dma_mask = hwdev->coherent_dma_mask;
b097186f 301
1b65c4e5
SS
302 /* At this point dma_handle is the physical address, next we are
303 * going to set it to the machine address.
304 * Do not use virt_to_phys(ret) because on ARM it doesn't correspond
305 * to *dma_handle. */
306 phys = *dma_handle;
2cf6a913 307 dev_addr = xen_phys_to_bus(hwdev, phys);
6810df88
KRW
308 if (((dev_addr + size - 1 <= dma_mask)) &&
309 !range_straddles_page_boundary(phys, size))
310 *dma_handle = dev_addr;
311 else {
1b65c4e5 312 if (xen_create_contiguous_region(phys, order,
69908907 313 fls64(dma_mask), dma_handle) != 0) {
1b65c4e5 314 xen_free_coherent_pages(hwdev, size, ret, (dma_addr_t)phys, attrs);
b097186f
KRW
315 return NULL;
316 }
b877ac98 317 SetPageXenRemapped(virt_to_page(ret));
b097186f 318 }
6810df88 319 memset(ret, 0, size);
b097186f
KRW
320 return ret;
321}
b097186f 322
dceb1a68 323static void
b097186f 324xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
00085f1e 325 dma_addr_t dev_addr, unsigned long attrs)
b097186f
KRW
326{
327 int order = get_order(size);
6810df88
KRW
328 phys_addr_t phys;
329 u64 dma_mask = DMA_BIT_MASK(32);
8b1e868f 330 struct page *page;
b097186f 331
6810df88
KRW
332 if (hwdev && hwdev->coherent_dma_mask)
333 dma_mask = hwdev->coherent_dma_mask;
334
1b65c4e5
SS
335 /* do not use virt_to_phys because on ARM it doesn't return you the
336 * physical address */
d900781a 337 phys = xen_bus_to_phys(hwdev, dev_addr);
6810df88 338
7250f422
JJ
339 /* Convert the size to actually allocated. */
340 size = 1UL << (order + XEN_PAGE_SHIFT);
341
8b1e868f
BO
342 if (is_vmalloc_addr(vaddr))
343 page = vmalloc_to_page(vaddr);
344 else
345 page = virt_to_page(vaddr);
346
50f6393f 347 if (!WARN_ON((dev_addr + size - 1 > dma_mask) ||
b877ac98 348 range_straddles_page_boundary(phys, size)) &&
8b1e868f 349 TestClearPageXenRemapped(page))
1b65c4e5 350 xen_destroy_contiguous_region(phys, order);
6810df88 351
1b65c4e5 352 xen_free_coherent_pages(hwdev, size, vaddr, (dma_addr_t)phys, attrs);
b097186f 353}
b097186f
KRW
354
355/*
356 * Map a single buffer of the indicated size for DMA in streaming mode. The
357 * physical address to use is returned.
358 *
359 * Once the device is given the dma address, the device owns this memory until
360 * either xen_swiotlb_unmap_page or xen_swiotlb_dma_sync_single is performed.
361 */
dceb1a68 362static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
b097186f
KRW
363 unsigned long offset, size_t size,
364 enum dma_data_direction dir,
00085f1e 365 unsigned long attrs)
b097186f 366{
e05ed4d1 367 phys_addr_t map, phys = page_to_phys(page) + offset;
2cf6a913 368 dma_addr_t dev_addr = xen_phys_to_bus(dev, phys);
b097186f
KRW
369
370 BUG_ON(dir == DMA_NONE);
371 /*
372 * If the address happens to be in the device's DMA window,
373 * we can safely return the device addr and not worry about bounce
374 * buffering it.
375 */
68a33b17 376 if (dma_capable(dev, dev_addr, size, true) &&
a4dba130 377 !range_straddles_page_boundary(phys, size) &&
291be10f 378 !xen_arch_need_swiotlb(dev, phys, dev_addr) &&
063b8271
CH
379 swiotlb_force != SWIOTLB_FORCE)
380 goto done;
b097186f
KRW
381
382 /*
383 * Oh well, have to allocate and map a bounce buffer.
384 */
2b2b614d
ZK
385 trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);
386
ae4f0a17
SS
387 map = swiotlb_tbl_map_single(dev, virt_to_phys(xen_io_tlb_start),
388 phys, size, size, dir, attrs);
9c106119 389 if (map == (phys_addr_t)DMA_MAPPING_ERROR)
a4abe0ad 390 return DMA_MAPPING_ERROR;
b097186f 391
b4dca151 392 phys = map;
2cf6a913 393 dev_addr = xen_phys_to_bus(dev, map);
b097186f
KRW
394
395 /*
396 * Ensure that the address returned is DMA'ble
397 */
68a33b17 398 if (unlikely(!dma_capable(dev, dev_addr, size, true))) {
3fc1ca00 399 swiotlb_tbl_unmap_single(dev, map, size, size, dir,
063b8271
CH
400 attrs | DMA_ATTR_SKIP_CPU_SYNC);
401 return DMA_MAPPING_ERROR;
402 }
76418421 403
063b8271 404done:
b4dca151 405 if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
995d3556 406 xen_dma_sync_for_device(dev, dev_addr, phys, size, dir);
063b8271 407 return dev_addr;
b097186f 408}
b097186f
KRW
409
410/*
411 * Unmap a single streaming mode DMA translation. The dma_addr and size must
412 * match what was provided for in a previous xen_swiotlb_map_page call. All
413 * other usages are undefined.
414 *
415 * After this call, reads by the cpu to the buffer are guaranteed to see
416 * whatever the device wrote there.
417 */
bf7954e7
CH
418static void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
419 size_t size, enum dma_data_direction dir, unsigned long attrs)
b097186f 420{
d900781a 421 phys_addr_t paddr = xen_bus_to_phys(hwdev, dev_addr);
b097186f
KRW
422
423 BUG_ON(dir == DMA_NONE);
424
b4dca151 425 if (!dev_is_dma_coherent(hwdev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
297f7f82 426 xen_dma_sync_for_cpu(hwdev, dev_addr, paddr, size, dir);
6cf05463 427
b097186f 428 /* NOTE: We use dev_addr here, not paddr! */
38ba51de 429 if (is_xen_swiotlb_buffer(hwdev, dev_addr))
3fc1ca00 430 swiotlb_tbl_unmap_single(hwdev, paddr, size, size, dir, attrs);
b097186f
KRW
431}
432
b097186f 433static void
2e12dcee
CH
434xen_swiotlb_sync_single_for_cpu(struct device *dev, dma_addr_t dma_addr,
435 size_t size, enum dma_data_direction dir)
b097186f 436{
d900781a 437 phys_addr_t paddr = xen_bus_to_phys(dev, dma_addr);
6cf05463 438
b4dca151 439 if (!dev_is_dma_coherent(dev))
297f7f82 440 xen_dma_sync_for_cpu(dev, dma_addr, paddr, size, dir);
6cf05463 441
38ba51de 442 if (is_xen_swiotlb_buffer(dev, dma_addr))
2e12dcee 443 swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_CPU);
b097186f
KRW
444}
445
2e12dcee
CH
446static void
447xen_swiotlb_sync_single_for_device(struct device *dev, dma_addr_t dma_addr,
448 size_t size, enum dma_data_direction dir)
b097186f 449{
d900781a 450 phys_addr_t paddr = xen_bus_to_phys(dev, dma_addr);
b097186f 451
38ba51de 452 if (is_xen_swiotlb_buffer(dev, dma_addr))
2e12dcee
CH
453 swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_DEVICE);
454
b4dca151 455 if (!dev_is_dma_coherent(dev))
995d3556 456 xen_dma_sync_for_device(dev, dma_addr, paddr, size, dir);
b097186f 457}
dceb1a68
CH
458
459/*
460 * Unmap a set of streaming mode DMA translations. Again, cpu read rules
461 * concerning calls here are the same as for swiotlb_unmap_page() above.
462 */
463static void
aca351cc
CH
464xen_swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
465 enum dma_data_direction dir, unsigned long attrs)
dceb1a68
CH
466{
467 struct scatterlist *sg;
468 int i;
469
470 BUG_ON(dir == DMA_NONE);
471
472 for_each_sg(sgl, sg, nelems, i)
bf7954e7
CH
473 xen_swiotlb_unmap_page(hwdev, sg->dma_address, sg_dma_len(sg),
474 dir, attrs);
dceb1a68
CH
475
476}
b097186f 477
dceb1a68 478static int
8b35d9fe 479xen_swiotlb_map_sg(struct device *dev, struct scatterlist *sgl, int nelems,
aca351cc 480 enum dma_data_direction dir, unsigned long attrs)
b097186f
KRW
481{
482 struct scatterlist *sg;
483 int i;
484
485 BUG_ON(dir == DMA_NONE);
486
487 for_each_sg(sgl, sg, nelems, i) {
8b35d9fe
CH
488 sg->dma_address = xen_swiotlb_map_page(dev, sg_page(sg),
489 sg->offset, sg->length, dir, attrs);
490 if (sg->dma_address == DMA_MAPPING_ERROR)
491 goto out_unmap;
781575cd 492 sg_dma_len(sg) = sg->length;
b097186f 493 }
8b35d9fe 494
b097186f 495 return nelems;
8b35d9fe
CH
496out_unmap:
497 xen_swiotlb_unmap_sg(dev, sgl, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
498 sg_dma_len(sgl) = 0;
499 return 0;
b097186f 500}
b097186f 501
b097186f 502static void
2e12dcee
CH
503xen_swiotlb_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl,
504 int nelems, enum dma_data_direction dir)
b097186f
KRW
505{
506 struct scatterlist *sg;
507 int i;
508
2e12dcee
CH
509 for_each_sg(sgl, sg, nelems, i) {
510 xen_swiotlb_sync_single_for_cpu(dev, sg->dma_address,
511 sg->length, dir);
512 }
b097186f 513}
b097186f 514
dceb1a68 515static void
2e12dcee 516xen_swiotlb_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
b097186f
KRW
517 int nelems, enum dma_data_direction dir)
518{
2e12dcee
CH
519 struct scatterlist *sg;
520 int i;
521
522 for_each_sg(sgl, sg, nelems, i) {
523 xen_swiotlb_sync_single_for_device(dev, sg->dma_address,
524 sg->length, dir);
525 }
b097186f 526}
b097186f 527
b097186f
KRW
528/*
529 * Return whether the given device DMA address mask can be supported
530 * properly. For example, if your device can only drive the low 24-bits
531 * during bus mastering, then you would pass 0x00ffffff as the mask to
532 * this function.
533 */
dceb1a68 534static int
b097186f
KRW
535xen_swiotlb_dma_supported(struct device *hwdev, u64 mask)
536{
2cf6a913 537 return xen_virt_to_bus(hwdev, xen_io_tlb_end - 1) <= mask;
b097186f 538}
eb1ddc00 539
dceb1a68
CH
540const struct dma_map_ops xen_swiotlb_dma_ops = {
541 .alloc = xen_swiotlb_alloc_coherent,
542 .free = xen_swiotlb_free_coherent,
543 .sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu,
544 .sync_single_for_device = xen_swiotlb_sync_single_for_device,
545 .sync_sg_for_cpu = xen_swiotlb_sync_sg_for_cpu,
546 .sync_sg_for_device = xen_swiotlb_sync_sg_for_device,
aca351cc
CH
547 .map_sg = xen_swiotlb_map_sg,
548 .unmap_sg = xen_swiotlb_unmap_sg,
dceb1a68
CH
549 .map_page = xen_swiotlb_map_page,
550 .unmap_page = xen_swiotlb_unmap_page,
551 .dma_supported = xen_swiotlb_dma_supported,
922659ea
CH
552 .mmap = dma_common_mmap,
553 .get_sgtable = dma_common_get_sgtable,
dceb1a68 554};