From 3567258d281b5b515d5165ed23851d9f84087e7d Mon Sep 17 00:00:00 2001 From: Stefano Stabellini Date: Fri, 21 Nov 2014 11:05:39 +0000 Subject: [PATCH] xen/arm: use hypercall to flush caches in map_page In xen_dma_map_page, if the page is a local page, call the native map_page dma_ops. If the page is foreign, call __xen_dma_map_page that issues any required cache maintenane operations via hypercall. The reason for doing this is that the native dma_ops map_page could allocate buffers than need to be freed. If the page is foreign we don't call the native unmap_page dma_ops function, resulting in a memory leak. Suggested-by: Catalin Marinas Signed-off-by: Stefano Stabellini Reviewed-by: Catalin Marinas --- arch/arm/include/asm/xen/page-coherent.h | 13 ++++++++++++- arch/arm/xen/mm32.c | 12 ++++++++++++ 2 files changed, 24 insertions(+), 1 deletion(-) diff --git a/arch/arm/include/asm/xen/page-coherent.h b/arch/arm/include/asm/xen/page-coherent.h index a309f42b411c..efd562412850 100644 --- a/arch/arm/include/asm/xen/page-coherent.h +++ b/arch/arm/include/asm/xen/page-coherent.h @@ -5,6 +5,9 @@ #include #include +void __xen_dma_map_page(struct device *hwdev, struct page *page, + dma_addr_t dev_addr, unsigned long offset, size_t size, + enum dma_data_direction dir, struct dma_attrs *attrs); void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs); @@ -32,7 +35,15 @@ static inline void xen_dma_map_page(struct device *hwdev, struct page *page, dma_addr_t dev_addr, unsigned long offset, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs) { - __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs); + bool local = PFN_DOWN(dev_addr) == page_to_pfn(page); + /* Dom0 is mapped 1:1, so if pfn == mfn the page is local otherwise + * is a foreign page grant-mapped in dom0. If the page is local we + * can safely call the native dma_ops function, otherwise we call + * the xen specific function. */ + if (local) + __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs); + else + __xen_dma_map_page(hwdev, page, dev_addr, offset, size, dir, attrs); } static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, diff --git a/arch/arm/xen/mm32.c b/arch/arm/xen/mm32.c index 3ce9dc1efb0c..c86919bbea83 100644 --- a/arch/arm/xen/mm32.c +++ b/arch/arm/xen/mm32.c @@ -43,6 +43,18 @@ static void __xen_dma_page_cpu_to_dev(struct device *hwdev, dma_addr_t handle, dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, DMA_MAP); } +void __xen_dma_map_page(struct device *hwdev, struct page *page, + dma_addr_t dev_addr, unsigned long offset, size_t size, + enum dma_data_direction dir, struct dma_attrs *attrs) +{ + if (is_device_dma_coherent(hwdev)) + return; + if (dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) + return; + + __xen_dma_page_cpu_to_dev(hwdev, dev_addr, size, dir); +} + void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs) -- 2.25.1