xen: introduce xen_dma_map/unmap_page and xen_dma_sync_single_for_cpu/device
authorStefano Stabellini <stefano.stabellini@eu.citrix.com>
Fri, 25 Oct 2013 10:39:49 +0000 (10:39 +0000)
committerStefano Stabellini <stefano.stabellini@eu.citrix.com>
Fri, 25 Oct 2013 10:39:49 +0000 (10:39 +0000)
Introduce xen_dma_map_page, xen_dma_unmap_page,
xen_dma_sync_single_for_cpu and xen_dma_sync_single_for_device.
They have empty implementations on x86 and ia64 but they call the
corresponding platform dma_ops function on arm and arm64.

Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
Changes in v9:
- xen_dma_map_page return void, avoid page_to_phys.

arch/arm/include/asm/xen/page-coherent.h
arch/arm64/include/asm/xen/page-coherent.h
arch/ia64/include/asm/xen/page-coherent.h
arch/x86/include/asm/xen/page-coherent.h

index c4d843c300c7c76c10147143daf0842bd42b987d..1109017499e52f05e92ca7056831323f4ed330e9 100644 (file)
@@ -19,4 +19,32 @@ static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
        __generic_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs);
 }
 
+static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
+            unsigned long offset, size_t size, enum dma_data_direction dir,
+            struct dma_attrs *attrs)
+{
+       __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
+}
+
+static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
+               size_t size, enum dma_data_direction dir,
+               struct dma_attrs *attrs)
+{
+       if (__generic_dma_ops(hwdev)->unmap_page)
+               __generic_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs);
+}
+
+static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
+               dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+       if (__generic_dma_ops(hwdev)->sync_single_for_cpu)
+               __generic_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir);
+}
+
+static inline void xen_dma_sync_single_for_device(struct device *hwdev,
+               dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+       if (__generic_dma_ops(hwdev)->sync_single_for_device)
+               __generic_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir);
+}
 #endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */
index 3b4f029b66608d11a231ad6d9dfd59d1d71f5c27..2820f1a6eebe0252d280e5d56e0a6bc60ac14182 100644 (file)
@@ -19,4 +19,29 @@ static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
        __generic_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs);
 }
 
+static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
+            unsigned long offset, size_t size, enum dma_data_direction dir,
+            struct dma_attrs *attrs)
+{
+       __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
+}
+
+static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
+               size_t size, enum dma_data_direction dir,
+               struct dma_attrs *attrs)
+{
+       __generic_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs);
+}
+
+static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
+               dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+       __generic_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir);
+}
+
+static inline void xen_dma_sync_single_for_device(struct device *hwdev,
+               dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+       __generic_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir);
+}
 #endif /* _ASM_ARM64_XEN_PAGE_COHERENT_H */
index 37b929c117bf0dbb93fe16a6a09d33ee42e5dc6c..96e42f97fa1ff9bd6a2e6d81fcfa652e07c36625 100644 (file)
@@ -21,4 +21,18 @@ static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
        free_pages((unsigned long) cpu_addr, get_order(size));
 }
 
+static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
+            unsigned long offset, size_t size, enum dma_data_direction dir,
+            struct dma_attrs *attrs) { }
+
+static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
+               size_t size, enum dma_data_direction dir,
+               struct dma_attrs *attrs) { }
+
+static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
+               dma_addr_t handle, size_t size, enum dma_data_direction dir) { }
+
+static inline void xen_dma_sync_single_for_device(struct device *hwdev,
+               dma_addr_t handle, size_t size, enum dma_data_direction dir) { }
+
 #endif /* _ASM_IA64_XEN_PAGE_COHERENT_H */
index 31de2e07918d4bc4d2fc708df9bef2dfd18b9ac4..7f02fe4e2c7b1d5a71c5e9931f8800f948f65929 100644 (file)
@@ -21,4 +21,18 @@ static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
        free_pages((unsigned long) cpu_addr, get_order(size));
 }
 
+static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
+            unsigned long offset, size_t size, enum dma_data_direction dir,
+            struct dma_attrs *attrs) { }
+
+static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
+               size_t size, enum dma_data_direction dir,
+               struct dma_attrs *attrs) { }
+
+static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
+               dma_addr_t handle, size_t size, enum dma_data_direction dir) { }
+
+static inline void xen_dma_sync_single_for_device(struct device *hwdev,
+               dma_addr_t handle, size_t size, enum dma_data_direction dir) { }
+
 #endif /* _ASM_X86_XEN_PAGE_COHERENT_H */