mm: change to return bool for isolate_movable_page()
authorBaolin Wang <baolin.wang@linux.alibaba.com>
Wed, 15 Feb 2023 10:39:37 +0000 (18:39 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Mon, 20 Feb 2023 20:46:17 +0000 (12:46 -0800)
Now the isolate_movable_page() can only return 0 or -EBUSY, and no users
will care about the negative return value, thus we can convert the
isolate_movable_page() to return a boolean value to make the code more
clear when checking the movable page isolation state.

No functional changes intended.

[akpm@linux-foundation.org: remove unneeded comment, per Matthew]
Link: https://lkml.kernel.org/r/cb877f73f4fff8d309611082ec740a7065b1ade0.1676424378.git.baolin.wang@linux.alibaba.com
Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
Acked-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Acked-by: Linus Torvalds <torvalds@linux-foundation.org>
Reviewed-by: SeongJae Park <sj@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/migrate.h
mm/compaction.c
mm/memory-failure.c
mm/memory_hotplug.c
mm/migrate.c

index c88b96b48be706b0980e3f99433996e62a31fde8..6b252f519c868cfb2b91d3bb830af9e1e1108e17 100644 (file)
@@ -71,7 +71,7 @@ extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free,
                unsigned long private, enum migrate_mode mode, int reason,
                unsigned int *ret_succeeded);
 extern struct page *alloc_migration_target(struct page *page, unsigned long private);
-extern int isolate_movable_page(struct page *page, isolate_mode_t mode);
+extern bool isolate_movable_page(struct page *page, isolate_mode_t mode);
 
 int migrate_huge_page_move_mapping(struct address_space *mapping,
                struct folio *dst, struct folio *src);
@@ -92,8 +92,8 @@ static inline int migrate_pages(struct list_head *l, new_page_t new,
 static inline struct page *alloc_migration_target(struct page *page,
                unsigned long private)
        { return NULL; }
-static inline int isolate_movable_page(struct page *page, isolate_mode_t mode)
-       { return -EBUSY; }
+static inline bool isolate_movable_page(struct page *page, isolate_mode_t mode)
+       { return false; }
 
 static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
                                  struct folio *dst, struct folio *src)
index d73578af44cc1a572980df136cbc7fc296e99685..ad7409f70519048ecf3505240f75feb4346be194 100644 (file)
@@ -976,7 +976,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
                                        locked = NULL;
                                }
 
-                               if (!isolate_movable_page(page, mode))
+                               if (isolate_movable_page(page, mode))
                                        goto isolate_success;
                        }
 
index 8604753bc6444999d197c20d0c3d413098fa2ff8..a1ede7bdce95e89adae2bb9664a54dce708fc419 100644 (file)
@@ -2515,8 +2515,8 @@ static bool isolate_page(struct page *page, struct list_head *pagelist)
                if (lru)
                        isolated = isolate_lru_page(page);
                else
-                       isolated = !isolate_movable_page(page,
-                                                        ISOLATE_UNEVICTABLE);
+                       isolated = isolate_movable_page(page,
+                                                       ISOLATE_UNEVICTABLE);
 
                if (isolated) {
                        list_add(&page->lru, pagelist);
index 5fc2dcf4e3abe8f36660e895d87b5fd9d92fda4d..5f73fd894b897fa5c76e3b94aae8f658b102d5d3 100644 (file)
@@ -1668,18 +1668,18 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
                 * We can skip free pages. And we can deal with pages on
                 * LRU and non-lru movable pages.
                 */
-               if (PageLRU(page)) {
+               if (PageLRU(page))
                        isolated = isolate_lru_page(page);
-                       ret = isolated ? 0 : -EBUSY;
-               } else
-                       ret = isolate_movable_page(page, ISOLATE_UNEVICTABLE);
-               if (!ret) { /* Success */
+               else
+                       isolated = isolate_movable_page(page, ISOLATE_UNEVICTABLE);
+               if (isolated) {
                        list_add_tail(&page->lru, &source);
                        if (!__PageMovable(page))
                                inc_node_page_state(page, NR_ISOLATED_ANON +
                                                    page_is_file_lru(page));
 
                } else {
+                       ret = -EBUSY;
                        if (__ratelimit(&migrate_rs)) {
                                pr_warn("failed to isolate pfn %lx\n", pfn);
                                dump_page(page, "isolation failed");
index 2db546a0618cdd0494ecf0c49768e7c40d7fcee4..9a101c7bb8fffc19f14bc6f02a60b080d561a723 100644 (file)
@@ -58,7 +58,7 @@
 
 #include "internal.h"
 
-int isolate_movable_page(struct page *page, isolate_mode_t mode)
+bool isolate_movable_page(struct page *page, isolate_mode_t mode)
 {
        struct folio *folio = folio_get_nontail_page(page);
        const struct movable_operations *mops;
@@ -119,14 +119,14 @@ int isolate_movable_page(struct page *page, isolate_mode_t mode)
        folio_set_isolated(folio);
        folio_unlock(folio);
 
-       return 0;
+       return true;
 
 out_no_isolated:
        folio_unlock(folio);
 out_putfolio:
        folio_put(folio);
 out:
-       return -EBUSY;
+       return false;
 }
 
 static void putback_movable_folio(struct folio *folio)