of: reserved_mem: Restructure call site for dma_contiguous_early_fixup()
authorOreoluwa Babatunde <oreoluwa.babatunde@oss.qualcomm.com>
Wed, 6 Aug 2025 17:24:21 +0000 (10:24 -0700)
committerMarek Szyprowski <m.szyprowski@samsung.com>
Mon, 11 Aug 2025 11:05:38 +0000 (13:05 +0200)
Restructure the call site for dma_contiguous_early_fixup() to
where the reserved_mem nodes are being parsed from the DT so that
dma_mmu_remap[] is populated before dma_contiguous_remap() is called.

Fixes: 8a6e02d0c00e ("of: reserved_mem: Restructure how the reserved memory regions are processed")
Signed-off-by: Oreoluwa Babatunde <oreoluwa.babatunde@oss.qualcomm.com>
Tested-by: William Zhang <william.zhang@broadcom.com>
Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
Link: https://lore.kernel.org/r/20250806172421.2748302-1-oreoluwa.babatunde@oss.qualcomm.com
drivers/of/of_reserved_mem.c
include/linux/dma-map-ops.h
kernel/dma/contiguous.c

index 77016c0cc296e5c441e2ee0a0889d4a00c5c5a8e..7350b23cb7341889eba8a83c3334ef494ee70a99 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/memblock.h>
 #include <linux/kmemleak.h>
 #include <linux/cma.h>
+#include <linux/dma-map-ops.h>
 
 #include "of_private.h"
 
@@ -175,13 +176,17 @@ static int __init __reserved_mem_reserve_reg(unsigned long node,
                base = dt_mem_next_cell(dt_root_addr_cells, &prop);
                size = dt_mem_next_cell(dt_root_size_cells, &prop);
 
-               if (size &&
-                   early_init_dt_reserve_memory(base, size, nomap) == 0)
+               if (size && early_init_dt_reserve_memory(base, size, nomap) == 0) {
+                       /* Architecture specific contiguous memory fixup. */
+                       if (of_flat_dt_is_compatible(node, "shared-dma-pool") &&
+                           of_get_flat_dt_prop(node, "reusable", NULL))
+                               dma_contiguous_early_fixup(base, size);
                        pr_debug("Reserved memory: reserved region for node '%s': base %pa, size %lu MiB\n",
                                uname, &base, (unsigned long)(size / SZ_1M));
-               else
+               } else {
                        pr_err("Reserved memory: failed to reserve memory for node '%s': base %pa, size %lu MiB\n",
                               uname, &base, (unsigned long)(size / SZ_1M));
+               }
 
                len -= t_len;
        }
@@ -472,7 +477,10 @@ static int __init __reserved_mem_alloc_size(unsigned long node, const char *unam
                       uname, (unsigned long)(size / SZ_1M));
                return -ENOMEM;
        }
-
+       /* Architecture specific contiguous memory fixup. */
+       if (of_flat_dt_is_compatible(node, "shared-dma-pool") &&
+           of_get_flat_dt_prop(node, "reusable", NULL))
+               dma_contiguous_early_fixup(base, size);
        /* Save region in the reserved_mem array */
        fdt_reserved_mem_save_node(node, uname, base, size);
        return 0;
index f48e5fb88bd5dd346094bbf2ce1b79e5f5bfe1a6..332b80c42b6f3271ad91d1c0d0d0481a7ab232c8 100644 (file)
@@ -153,6 +153,9 @@ static inline void dma_free_contiguous(struct device *dev, struct page *page,
 {
        __free_pages(page, get_order(size));
 }
+static inline void dma_contiguous_early_fixup(phys_addr_t base, unsigned long size)
+{
+}
 #endif /* CONFIG_DMA_CMA*/
 
 #ifdef CONFIG_DMA_DECLARE_COHERENT
index 67af8a55185d99f177dd6b4202f803fe31e6b303..d9b9dcba6ff7cf5904ac93b72c061fd59072c41b 100644 (file)
@@ -483,8 +483,6 @@ static int __init rmem_cma_setup(struct reserved_mem *rmem)
                pr_err("Reserved memory: unable to setup CMA region\n");
                return err;
        }
-       /* Architecture specific contiguous memory fixup. */
-       dma_contiguous_early_fixup(rmem->base, rmem->size);
 
        if (default_cma)
                dma_contiguous_default_area = cma;