book3s64/hash: Refactor hash__kernel_map_pages() function
authorRitesh Harjani (IBM) <ritesh.list@gmail.com>
Fri, 18 Oct 2024 17:29:47 +0000 (22:59 +0530)
committerMichael Ellerman <mpe@ellerman.id.au>
Wed, 23 Oct 2024 07:53:19 +0000 (18:53 +1100)
This refactors hash__kernel_map_pages() function to call
hash_debug_pagealloc_map_pages(). This will come useful when we will add
kfence support.

No functionality changes in this patch.

Signed-off-by: Ritesh Harjani (IBM) <ritesh.list@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://patch.msgid.link/0cb8ddcccdcf61ea06ab4d92aacd770c16cc0f2c.1729271995.git.ritesh.list@gmail.com
arch/powerpc/mm/book3s64/hash_utils.c

index 030c120d139950325c46e2076d1aacfe8a5a4dfa..da9b089c8e8beb8f4b60a577b82c476d2aa7298d 100644 (file)
@@ -349,7 +349,8 @@ static inline void hash_debug_pagealloc_add_slot(phys_addr_t paddr, int slot)
                linear_map_hash_slots[paddr >> PAGE_SHIFT] = slot | 0x80;
 }
 
-int hash__kernel_map_pages(struct page *page, int numpages, int enable)
+static int hash_debug_pagealloc_map_pages(struct page *page, int numpages,
+                                         int enable)
 {
        unsigned long flags, vaddr, lmi;
        int i;
@@ -368,6 +369,12 @@ int hash__kernel_map_pages(struct page *page, int numpages, int enable)
        local_irq_restore(flags);
        return 0;
 }
+
+int hash__kernel_map_pages(struct page *page, int numpages, int enable)
+{
+       return hash_debug_pagealloc_map_pages(page, numpages, enable);
+}
+
 #else /* CONFIG_DEBUG_PAGEALLOC */
 int hash__kernel_map_pages(struct page *page, int numpages,
                                         int enable)