fs/buffer: add folio_create_empty_buffers helper
authorPankaj Raghav <p.raghav@samsung.com>
Mon, 17 Apr 2023 12:36:17 +0000 (14:36 +0200)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 21 Apr 2023 21:52:01 +0000 (14:52 -0700)
Folio version of create_empty_buffers().  This is required to convert
create_page_buffers() to folio_create_buffers() later in the series.

It removes several calls to compound_head() as it works directly on folio
compared to create_empty_buffers().  Hence, create_empty_buffers() has
been modified to call folio_create_empty_buffers().

Link: https://lkml.kernel.org/r/20230417123618.22094-4-p.raghav@samsung.com
Signed-off-by: Pankaj Raghav <p.raghav@samsung.com>
Reviewed-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Hannes Reinecke <hare@suse.de>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: Christian Brauner <brauner@kernel.org>
Cc: Luis Chamberlain <mcgrof@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
fs/buffer.c
include/linux/buffer_head.h

index bbd4aa670262481df0eedc5fe0c225031cc5f58f..e6266bf7eeea3d5ae4b3cc41bab4eb936aec7d6a 100644 (file)
@@ -1594,18 +1594,17 @@ out:
 }
 EXPORT_SYMBOL(block_invalidate_folio);
 
-
 /*
  * We attach and possibly dirty the buffers atomically wrt
  * block_dirty_folio() via private_lock.  try_to_free_buffers
- * is already excluded via the page lock.
+ * is already excluded via the folio lock.
  */
-void create_empty_buffers(struct page *page,
-                       unsigned long blocksize, unsigned long b_state)
+void folio_create_empty_buffers(struct folio *folio, unsigned long blocksize,
+                               unsigned long b_state)
 {
        struct buffer_head *bh, *head, *tail;
 
-       head = alloc_page_buffers(page, blocksize, true);
+       head = folio_alloc_buffers(folio, blocksize, true);
        bh = head;
        do {
                bh->b_state |= b_state;
@@ -1614,19 +1613,26 @@ void create_empty_buffers(struct page *page,
        } while (bh);
        tail->b_this_page = head;
 
-       spin_lock(&page->mapping->private_lock);
-       if (PageUptodate(page) || PageDirty(page)) {
+       spin_lock(&folio->mapping->private_lock);
+       if (folio_test_uptodate(folio) || folio_test_dirty(folio)) {
                bh = head;
                do {
-                       if (PageDirty(page))
+                       if (folio_test_dirty(folio))
                                set_buffer_dirty(bh);
-                       if (PageUptodate(page))
+                       if (folio_test_uptodate(folio))
                                set_buffer_uptodate(bh);
                        bh = bh->b_this_page;
                } while (bh != head);
        }
-       attach_page_private(page, head);
-       spin_unlock(&page->mapping->private_lock);
+       folio_attach_private(folio, head);
+       spin_unlock(&folio->mapping->private_lock);
+}
+EXPORT_SYMBOL(folio_create_empty_buffers);
+
+void create_empty_buffers(struct page *page,
+                       unsigned long blocksize, unsigned long b_state)
+{
+       folio_create_empty_buffers(page_folio(page), blocksize, b_state);
 }
 EXPORT_SYMBOL(create_empty_buffers);
 
index 0b14eab41bd1f5a6305bebc9583ceaffec7f0a3b..1520793c72dad7e1b81d0b0d8f73626d4af88b11 100644 (file)
@@ -205,6 +205,8 @@ struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
                bool retry);
 void create_empty_buffers(struct page *, unsigned long,
                        unsigned long b_state);
+void folio_create_empty_buffers(struct folio *folio, unsigned long blocksize,
+                               unsigned long b_state);
 void end_buffer_read_sync(struct buffer_head *bh, int uptodate);
 void end_buffer_write_sync(struct buffer_head *bh, int uptodate);
 void end_buffer_async_write(struct buffer_head *bh, int uptodate);