shmem: Add shmem_writeout()
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Wed, 2 Apr 2025 14:59:59 +0000 (15:59 +0100)
committerChristian Brauner <brauner@kernel.org>
Mon, 7 Apr 2025 07:36:49 +0000 (09:36 +0200)
This will be the replacement for shmem_writepage().

Signed-off-by: "Matthew Wilcox (Oracle)" <willy@infradead.org>
Link: https://lore.kernel.org/r/20250402150005.2309458-6-willy@infradead.org
Reviewed-by: Baolin Wang <baolin.wang@linux.alibaba.com>
Signed-off-by: Christian Brauner <brauner@kernel.org>
include/linux/shmem_fs.h
mm/shmem.c

index 0b273a7b9f01d59f9046e2d7edbb82bb03effeb0..5f03a39a26f716b81b17ac716c6d5e16be98bb94 100644 (file)
@@ -104,10 +104,11 @@ static inline bool shmem_mapping(struct address_space *mapping)
        return false;
 }
 #endif /* CONFIG_SHMEM */
-extern void shmem_unlock_mapping(struct address_space *mapping);
-extern struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
+void shmem_unlock_mapping(struct address_space *mapping);
+struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
                                        pgoff_t index, gfp_t gfp_mask);
-extern void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end);
+int shmem_writeout(struct folio *folio, struct writeback_control *wbc);
+void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end);
 int shmem_unuse(unsigned int type);
 
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
index 99327c30507c449a3c779a13b77e53582e8c8b61..7d377ceae035022a0169ae8c2a09f904e1fcaef3 100644 (file)
@@ -1536,12 +1536,20 @@ int shmem_unuse(unsigned int type)
        return error;
 }
 
-/*
- * Move the page from the page cache to the swap cache.
- */
 static int shmem_writepage(struct page *page, struct writeback_control *wbc)
 {
-       struct folio *folio = page_folio(page);
+       return shmem_writeout(page_folio(page), wbc);
+}
+
+/**
+ * shmem_writeout - Write the folio to swap
+ * @folio: The folio to write
+ * @wbc: How writeback is to be done
+ *
+ * Move the folio from the page cache to the swap cache.
+ */
+int shmem_writeout(struct folio *folio, struct writeback_control *wbc)
+{
        struct address_space *mapping = folio->mapping;
        struct inode *inode = mapping->host;
        struct shmem_inode_info *info = SHMEM_I(inode);
@@ -1586,9 +1594,8 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
 try_split:
                /* Ensure the subpages are still dirty */
                folio_test_set_dirty(folio);
-               if (split_huge_page_to_list_to_order(page, wbc->list, 0))
+               if (split_folio_to_list(folio, wbc->list))
                        goto redirty;
-               folio = page_folio(page);
                folio_clear_dirty(folio);
        }
 
@@ -1660,6 +1667,7 @@ redirty:
        folio_unlock(folio);
        return 0;
 }
+EXPORT_SYMBOL_GPL(shmem_writeout);
 
 #if defined(CONFIG_NUMA) && defined(CONFIG_TMPFS)
 static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)