nilfs2: convert nilfs_page_mkwrite() to use a folio
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Tue, 14 Nov 2023 08:44:27 +0000 (17:44 +0900)
committerAndrew Morton <akpm@linux-foundation.org>
Mon, 11 Dec 2023 01:21:28 +0000 (17:21 -0800)
Using the new folio APIs saves seven hidden calls to compound_head().

Link: https://lkml.kernel.org/r/20231114084436.2755-12-konishi.ryusuke@gmail.com
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Ryusuke Konishi <konishi.ryusuke@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
fs/nilfs2/file.c

index 740ce26d1e7657dd3b5f33da2bed271acd1d7f95..bec33b89a075858ebf289a95fa4c83dbf6e86103 100644 (file)
@@ -45,34 +45,36 @@ int nilfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
 static vm_fault_t nilfs_page_mkwrite(struct vm_fault *vmf)
 {
        struct vm_area_struct *vma = vmf->vma;
-       struct page *page = vmf->page;
+       struct folio *folio = page_folio(vmf->page);
        struct inode *inode = file_inode(vma->vm_file);
        struct nilfs_transaction_info ti;
+       struct buffer_head *bh, *head;
        int ret = 0;
 
        if (unlikely(nilfs_near_disk_full(inode->i_sb->s_fs_info)))
                return VM_FAULT_SIGBUS; /* -ENOSPC */
 
        sb_start_pagefault(inode->i_sb);
-       lock_page(page);
-       if (page->mapping != inode->i_mapping ||
-           page_offset(page) >= i_size_read(inode) || !PageUptodate(page)) {
-               unlock_page(page);
+       folio_lock(folio);
+       if (folio->mapping != inode->i_mapping ||
+           folio_pos(folio) >= i_size_read(inode) ||
+           !folio_test_uptodate(folio)) {
+               folio_unlock(folio);
                ret = -EFAULT;  /* make the VM retry the fault */
                goto out;
        }
 
        /*
-        * check to see if the page is mapped already (no holes)
+        * check to see if the folio is mapped already (no holes)
         */
-       if (PageMappedToDisk(page))
+       if (folio_test_mappedtodisk(folio))
                goto mapped;
 
-       if (page_has_buffers(page)) {
-               struct buffer_head *bh, *head;
+       head = folio_buffers(folio);
+       if (head) {
                int fully_mapped = 1;
 
-               bh = head = page_buffers(page);
+               bh = head;
                do {
                        if (!buffer_mapped(bh)) {
                                fully_mapped = 0;
@@ -81,11 +83,11 @@ static vm_fault_t nilfs_page_mkwrite(struct vm_fault *vmf)
                } while (bh = bh->b_this_page, bh != head);
 
                if (fully_mapped) {
-                       SetPageMappedToDisk(page);
+                       folio_set_mappedtodisk(folio);
                        goto mapped;
                }
        }
-       unlock_page(page);
+       folio_unlock(folio);
 
        /*
         * fill hole blocks
@@ -105,7 +107,7 @@ static vm_fault_t nilfs_page_mkwrite(struct vm_fault *vmf)
        nilfs_transaction_commit(inode->i_sb);
 
  mapped:
-       wait_for_stable_page(page);
+       folio_wait_stable(folio);
  out:
        sb_end_pagefault(inode->i_sb);
        return vmf_fs_error(ret);