mm: move free_area_empty() to mm/internal.h
authorMike Rapoport (IBM) <rppt@kernel.org>
Sun, 26 Mar 2023 16:02:15 +0000 (19:02 +0300)
committerAndrew Morton <akpm@linux-foundation.org>
Tue, 18 Apr 2023 23:29:47 +0000 (16:29 -0700)
The free_area_empty() helper is only used inside mm/ so move it there to
reduce noise in include/linux/mmzone.h

Link: https://lkml.kernel.org/r/20230326160215.2674531-1-rppt@kernel.org
Signed-off-by: Mike Rapoport (IBM) <rppt@kernel.org>
Suggested-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/mmzone.h
mm/internal.h

index 2d22e47dc1ebac5716f4ce6995f79b295f364eb7..337956748976ec23f47218f8319e66bf6eb1422c 100644 (file)
@@ -110,11 +110,6 @@ struct free_area {
        unsigned long           nr_free;
 };
 
-static inline bool free_area_empty(struct free_area *area, int migratetype)
-{
-       return list_empty(&area->free_list[migratetype]);
-}
-
 struct pglist_data;
 
 #ifdef CONFIG_NUMA
index 73b167b59cc533e7d07157ac4a763a17f6c9e896..92ddd3a05b74a8894842abb2624e4b2e76761188 100644 (file)
@@ -517,6 +517,11 @@ void init_cma_reserved_pageblock(struct page *page);
 int find_suitable_fallback(struct free_area *area, unsigned int order,
                        int migratetype, bool only_stealable, bool *can_steal);
 
+static inline bool free_area_empty(struct free_area *area, int migratetype)
+{
+       return list_empty(&area->free_list[migratetype]);
+}
+
 /*
  * These three helpers classifies VMAs for virtual memory accounting.
  */