1 #ifndef _ASM_ARM_XEN_PAGE_COHERENT_H
2 #define _ASM_ARM_XEN_PAGE_COHERENT_H
5 #include <linux/dma-attrs.h>
6 #include <linux/dma-mapping.h>
8 void __xen_dma_map_page(struct device *hwdev, struct page *page,
9 dma_addr_t dev_addr, unsigned long offset, size_t size,
10 enum dma_data_direction dir, struct dma_attrs *attrs);
11 void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
12 size_t size, enum dma_data_direction dir,
13 struct dma_attrs *attrs);
14 void __xen_dma_sync_single_for_cpu(struct device *hwdev,
15 dma_addr_t handle, size_t size, enum dma_data_direction dir);
17 void __xen_dma_sync_single_for_device(struct device *hwdev,
18 dma_addr_t handle, size_t size, enum dma_data_direction dir);
20 static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
21 dma_addr_t *dma_handle, gfp_t flags,
22 struct dma_attrs *attrs)
24 return __generic_dma_ops(hwdev)->alloc(hwdev, size, dma_handle, flags, attrs);
27 static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
28 void *cpu_addr, dma_addr_t dma_handle,
29 struct dma_attrs *attrs)
31 __generic_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs);
34 static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
35 dma_addr_t dev_addr, unsigned long offset, size_t size,
36 enum dma_data_direction dir, struct dma_attrs *attrs)
38 unsigned long page_pfn = page_to_xen_pfn(page);
39 unsigned long dev_pfn = XEN_PFN_DOWN(dev_addr);
40 unsigned long compound_pages =
41 (1<<compound_order(page)) * XEN_PFN_PER_PAGE;
42 bool local = (page_pfn <= dev_pfn) &&
43 (dev_pfn - page_pfn < compound_pages);
46 * Dom0 is mapped 1:1, while the Linux page can span across
47 * multiple Xen pages, it's not possible for it to contain a
48 * mix of local and foreign Xen pages. So if the first xen_pfn
49 * == mfn the page is local otherwise it's a foreign page
50 * grant-mapped in dom0. If the page is local we can safely
51 * call the native dma_ops function, otherwise we call the xen
55 __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
57 __xen_dma_map_page(hwdev, page, dev_addr, offset, size, dir, attrs);
60 static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
61 size_t size, enum dma_data_direction dir,
62 struct dma_attrs *attrs)
64 unsigned long pfn = PFN_DOWN(handle);
66 * Dom0 is mapped 1:1, while the Linux page can be spanned accross
67 * multiple Xen page, it's not possible to have a mix of local and
68 * foreign Xen page. Dom0 is mapped 1:1, so calling pfn_valid on a
69 * foreign mfn will always return false. If the page is local we can
70 * safely call the native dma_ops function, otherwise we call the xen
74 if (__generic_dma_ops(hwdev)->unmap_page)
75 __generic_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs);
77 __xen_dma_unmap_page(hwdev, handle, size, dir, attrs);
80 static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
81 dma_addr_t handle, size_t size, enum dma_data_direction dir)
83 unsigned long pfn = PFN_DOWN(handle);
85 if (__generic_dma_ops(hwdev)->sync_single_for_cpu)
86 __generic_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir);
88 __xen_dma_sync_single_for_cpu(hwdev, handle, size, dir);
91 static inline void xen_dma_sync_single_for_device(struct device *hwdev,
92 dma_addr_t handle, size_t size, enum dma_data_direction dir)
94 unsigned long pfn = PFN_DOWN(handle);
96 if (__generic_dma_ops(hwdev)->sync_single_for_device)
97 __generic_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir);
99 __xen_dma_sync_single_for_device(hwdev, handle, size, dir);
102 #endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */