mm/truncate: add folio_unmap_invalidate() helper
authorJens Axboe <axboe@kernel.dk>
Fri, 8 Nov 2024 15:21:09 +0000 (08:21 -0700)
committerJens Axboe <axboe@kernel.dk>
Fri, 20 Dec 2024 15:30:39 +0000 (08:30 -0700)
Add a folio_unmap_invalidate() helper, which unmaps and invalidates a
given folio. The caller must already have locked the folio. Embed the
old invalidate_complete_folio2() helper in there as well, as nobody else
calls it.

Use this new helper in invalidate_inode_pages2_range(), rather than
duplicate the code there.

In preparation for using this elsewhere as well, have it take a gfp_t
mask rather than assume GFP_KERNEL is the right choice. This bubbles
back to invalidate_complete_folio2() as well.

Signed-off-by: Jens Axboe <axboe@kernel.dk>
mm/internal.h
mm/truncate.c

index cb8d8e8e3ffa5f7fec34d8498a9aad1808232748..ed3c3690eb037be0005682b3b742153ddb5c0710 100644 (file)
@@ -392,6 +392,8 @@ void unmap_page_range(struct mmu_gather *tlb,
                             struct vm_area_struct *vma,
                             unsigned long addr, unsigned long end,
                             struct zap_details *details);
+int folio_unmap_invalidate(struct address_space *mapping, struct folio *folio,
+                          gfp_t gfp);
 
 void page_cache_ra_order(struct readahead_control *, struct file_ra_state *,
                unsigned int order);
index 7c304d2f0052d4336150a2c138d032b688e1717d..e2e115adfbc58a7cfa91a2d02a7855bbc9d38c87 100644 (file)
@@ -525,6 +525,15 @@ unsigned long invalidate_mapping_pages(struct address_space *mapping,
 }
 EXPORT_SYMBOL(invalidate_mapping_pages);
 
+static int folio_launder(struct address_space *mapping, struct folio *folio)
+{
+       if (!folio_test_dirty(folio))
+               return 0;
+       if (folio->mapping != mapping || mapping->a_ops->launder_folio == NULL)
+               return 0;
+       return mapping->a_ops->launder_folio(folio);
+}
+
 /*
  * This is like mapping_evict_folio(), except it ignores the folio's
  * refcount.  We do this because invalidate_inode_pages2() needs stronger
@@ -532,14 +541,26 @@ EXPORT_SYMBOL(invalidate_mapping_pages);
  * shrink_folio_list() has a temp ref on them, or because they're transiently
  * sitting in the folio_add_lru() caches.
  */
-static int invalidate_complete_folio2(struct address_space *mapping,
-                                       struct folio *folio)
+int folio_unmap_invalidate(struct address_space *mapping, struct folio *folio,
+                          gfp_t gfp)
 {
-       if (folio->mapping != mapping)
-               return 0;
+       int ret;
+
+       VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
 
-       if (!filemap_release_folio(folio, GFP_KERNEL))
+       if (folio_test_dirty(folio))
                return 0;
+       if (folio_mapped(folio))
+               unmap_mapping_folio(folio);
+       BUG_ON(folio_mapped(folio));
+
+       ret = folio_launder(mapping, folio);
+       if (ret)
+               return ret;
+       if (folio->mapping != mapping)
+               return -EBUSY;
+       if (!filemap_release_folio(folio, gfp))
+               return -EBUSY;
 
        spin_lock(&mapping->host->i_lock);
        xa_lock_irq(&mapping->i_pages);
@@ -558,16 +579,7 @@ static int invalidate_complete_folio2(struct address_space *mapping,
 failed:
        xa_unlock_irq(&mapping->i_pages);
        spin_unlock(&mapping->host->i_lock);
-       return 0;
-}
-
-static int folio_launder(struct address_space *mapping, struct folio *folio)
-{
-       if (!folio_test_dirty(folio))
-               return 0;
-       if (folio->mapping != mapping || mapping->a_ops->launder_folio == NULL)
-               return 0;
-       return mapping->a_ops->launder_folio(folio);
+       return -EBUSY;
 }
 
 /**
@@ -631,16 +643,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
                        }
                        VM_BUG_ON_FOLIO(!folio_contains(folio, indices[i]), folio);
                        folio_wait_writeback(folio);
-
-                       if (folio_mapped(folio))
-                               unmap_mapping_folio(folio);
-                       BUG_ON(folio_mapped(folio));
-
-                       ret2 = folio_launder(mapping, folio);
-                       if (ret2 == 0) {
-                               if (!invalidate_complete_folio2(mapping, folio))
-                                       ret2 = -EBUSY;
-                       }
+                       ret2 = folio_unmap_invalidate(mapping, folio, GFP_KERNEL);
                        if (ret2 < 0)
                                ret = ret2;
                        folio_unlock(folio);