Revert "powerpc/kernel/iommu: Align size for IOMMU_PAGE_SIZE() to save TCEs"
authorFrederic Barrat <fbarrat@linux.ibm.com>
Wed, 26 May 2021 14:45:40 +0000 (16:45 +0200)
committerMichael Ellerman <mpe@ellerman.id.au>
Tue, 1 Jun 2021 01:17:08 +0000 (11:17 +1000)
This reverts commit 3c0468d4451eb6b4f6604370639f163f9637a479.

That commit was breaking alignment guarantees for the DMA address when
allocating coherent mappings, as described in
Documentation/core-api/dma-api-howto.rst

It was also noticed by Mellanox' driver:
[ 1515.763621] mlx5_core c002:01:00.0: mlx5_frag_buf_alloc_node:146:(pid 13402): unexpected map alignment: 0x0800000000c61000, page_shift=16
[ 1515.763635] mlx5_core c002:01:00.0: mlx5_cqwq_create:181:(pid
13402): mlx5_frag_buf_alloc_node() failed, -12

Fixes: 3c0468d4451e ("powerpc/kernel/iommu: Align size for  IOMMU_PAGE_SIZE() to save TCEs")
Signed-off-by: Frederic Barrat <fbarrat@linux.ibm.com>
Reviewed-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20210526144540.117795-1-fbarrat@linux.ibm.com
arch/powerpc/kernel/iommu.c

index 57d6b85e9b964f1339f9656a893e8c3ba87ee545..2af89a5e379f2bd2e3d3e624a52615728fc27ba0 100644 (file)
@@ -898,7 +898,6 @@ void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
        unsigned int order;
        unsigned int nio_pages, io_order;
        struct page *page;
-       size_t size_io = size;
 
        size = PAGE_ALIGN(size);
        order = get_order(size);
@@ -925,9 +924,8 @@ void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
        memset(ret, 0, size);
 
        /* Set up tces to cover the allocated range */
-       size_io = IOMMU_PAGE_ALIGN(size_io, tbl);
-       nio_pages = size_io >> tbl->it_page_shift;
-       io_order = get_iommu_order(size_io, tbl);
+       nio_pages = size >> tbl->it_page_shift;
+       io_order = get_iommu_order(size, tbl);
        mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
                              mask >> tbl->it_page_shift, io_order, 0);
        if (mapping == DMA_MAPPING_ERROR) {
@@ -942,9 +940,10 @@ void iommu_free_coherent(struct iommu_table *tbl, size_t size,
                         void *vaddr, dma_addr_t dma_handle)
 {
        if (tbl) {
-               size_t size_io = IOMMU_PAGE_ALIGN(size, tbl);
-               unsigned int nio_pages = size_io >> tbl->it_page_shift;
+               unsigned int nio_pages;
 
+               size = PAGE_ALIGN(size);
+               nio_pages = size >> tbl->it_page_shift;
                iommu_free(tbl, dma_handle, nio_pages);
                size = PAGE_ALIGN(size);
                free_pages((unsigned long)vaddr, get_order(size));