Btrfs: Add writepages support
authorChris Mason <chris.mason@oracle.com>
Thu, 1 Nov 2007 23:45:34 +0000 (19:45 -0400)
committerChris Mason <chris.mason@oracle.com>
Thu, 25 Sep 2008 15:03:57 +0000 (11:03 -0400)
Signed-off-by: Chris Mason <chris.mason@oracle.com>
fs/btrfs/extent_map.c
fs/btrfs/extent_map.h
fs/btrfs/inode.c

index ff8881fb56d6bf611ed805a26fe3fa728682d446..b0677c84bb751fb7a0838860117fe74c61274d5c 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/blkdev.h>
 #include <linux/swap.h>
 #include <linux/version.h>
+#include <linux/writeback.h>
 #include "extent_map.h"
 
 /* temporary define until extent_map moves out of btrfs */
@@ -35,6 +36,12 @@ struct tree_entry {
        struct rb_node rb_node;
 };
 
+struct extent_page_data {
+       struct bio *bio;
+       struct extent_map_tree *tree;
+       get_extent_t *get_extent;
+};
+
 void __init extent_map_init(void)
 {
        extent_map_cache = btrfs_cache_create("extent_map",
@@ -1460,40 +1467,76 @@ static int end_bio_extent_preparewrite(struct bio *bio,
 #endif
 }
 
-static int submit_extent_page(int rw, struct extent_map_tree *tree,
-                             struct page *page, sector_t sector,
-                             size_t size, unsigned long offset,
-                             struct block_device *bdev,
-                             bio_end_io_t end_io_func)
+static struct bio *
+extent_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
+                gfp_t gfp_flags)
 {
        struct bio *bio;
-       int ret = 0;
 
-       bio = bio_alloc(GFP_NOIO, 1);
+       bio = bio_alloc(gfp_flags, nr_vecs);
 
-       bio->bi_sector = sector;
-       bio->bi_bdev = bdev;
-       bio->bi_io_vec[0].bv_page = page;
-       bio->bi_io_vec[0].bv_len = size;
-       bio->bi_io_vec[0].bv_offset = offset;
-
-       bio->bi_vcnt = 1;
-       bio->bi_idx = 0;
-       bio->bi_size = size;
+       if (bio == NULL && (current->flags & PF_MEMALLOC)) {
+               while (!bio && (nr_vecs /= 2))
+                       bio = bio_alloc(gfp_flags, nr_vecs);
+       }
 
-       bio->bi_end_io = end_io_func;
-       bio->bi_private = tree;
+       if (bio) {
+               bio->bi_bdev = bdev;
+               bio->bi_sector = first_sector;
+       }
+       return bio;
+}
 
+static int submit_one_bio(int rw, struct bio *bio)
+{
+       int ret = 0;
        bio_get(bio);
        submit_bio(rw, bio);
-
        if (bio_flagged(bio, BIO_EOPNOTSUPP))
                ret = -EOPNOTSUPP;
-
        bio_put(bio);
        return ret;
 }
 
+static int submit_extent_page(int rw, struct extent_map_tree *tree,
+                             struct page *page, sector_t sector,
+                             size_t size, unsigned long offset,
+                             struct block_device *bdev,
+                             struct bio **bio_ret,
+                             int max_pages,
+                             bio_end_io_t end_io_func)
+{
+       int ret = 0;
+       struct bio *bio;
+       int nr;
+
+       if (bio_ret && *bio_ret) {
+               bio = *bio_ret;
+               if (bio->bi_sector + (bio->bi_size >> 9) != sector ||
+                   bio_add_page(bio, page, size, offset) < size) {
+                       ret = submit_one_bio(rw, bio);
+                       bio = NULL;
+               } else {
+                       return 0;
+               }
+       }
+       nr = min(max_pages, bio_get_nr_vecs(bdev));
+       bio = extent_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH);
+       if (!bio) {
+               printk("failed to allocate bio nr %d\n", nr);
+       }
+       bio_add_page(bio, page, size, offset);
+       bio->bi_end_io = end_io_func;
+       bio->bi_private = tree;
+       if (bio_ret) {
+               *bio_ret = bio;
+       } else {
+               ret = submit_one_bio(rw, bio);
+       }
+
+       return ret;
+}
+
 void set_page_extent_mapped(struct page *page)
 {
        if (!PagePrivate(page)) {
@@ -1590,7 +1633,8 @@ int extent_read_full_page(struct extent_map_tree *tree, struct page *page,
                if (!ret) {
                        ret = submit_extent_page(READ, tree, page,
                                                 sector, iosize, page_offset,
-                                                bdev, end_bio_extent_readpage);
+                                                bdev, NULL, 1,
+                                                end_bio_extent_readpage);
                }
                if (ret)
                        SetPageError(page);
@@ -1613,11 +1657,12 @@ EXPORT_SYMBOL(extent_read_full_page);
  * are found, they are marked writeback.  Then the lock bits are removed
  * and the end_io handler clears the writeback ranges
  */
-int extent_write_full_page(struct extent_map_tree *tree, struct page *page,
-                         get_extent_t *get_extent,
-                         struct writeback_control *wbc)
+static int __extent_writepage(struct page *page, struct writeback_control *wbc,
+                             void *data)
 {
        struct inode *inode = page->mapping->host;
+       struct extent_page_data *epd = data;
+       struct extent_map_tree *tree = epd->tree;
        u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
        u64 page_end = start + PAGE_CACHE_SIZE - 1;
        u64 end;
@@ -1691,7 +1736,7 @@ int extent_write_full_page(struct extent_map_tree *tree, struct page *page,
                        clear_extent_dirty(tree, cur, page_end, GFP_NOFS);
                        break;
                }
-               em = get_extent(inode, page, page_offset, cur, end, 1);
+               em = epd->get_extent(inode, page, page_offset, cur, end, 1);
                if (IS_ERR(em) || !em) {
                        SetPageError(page);
                        break;
@@ -1734,9 +1779,12 @@ int extent_write_full_page(struct extent_map_tree *tree, struct page *page,
                if (ret)
                        SetPageError(page);
                else {
+                       unsigned long nr = end_index + 1;
                        set_range_writeback(tree, cur, cur + iosize - 1);
+
                        ret = submit_extent_page(WRITE, tree, page, sector,
                                                 iosize, page_offset, bdev,
+                                                &epd->bio, nr,
                                                 end_bio_extent_writepage);
                        if (ret)
                                SetPageError(page);
@@ -1750,8 +1798,44 @@ done:
        unlock_page(page);
        return 0;
 }
+
+int extent_write_full_page(struct extent_map_tree *tree, struct page *page,
+                         get_extent_t *get_extent,
+                         struct writeback_control *wbc)
+{
+       int ret;
+       struct extent_page_data epd = {
+               .bio = NULL,
+               .tree = tree,
+               .get_extent = get_extent,
+       };
+
+       ret = __extent_writepage(page, wbc, &epd);
+       if (epd.bio)
+               submit_one_bio(WRITE, epd.bio);
+       return ret;
+}
 EXPORT_SYMBOL(extent_write_full_page);
 
+int extent_writepages(struct extent_map_tree *tree,
+                     struct address_space *mapping,
+                     get_extent_t *get_extent,
+                     struct writeback_control *wbc)
+{
+       int ret;
+       struct extent_page_data epd = {
+               .bio = NULL,
+               .tree = tree,
+               .get_extent = get_extent,
+       };
+
+       ret = write_cache_pages(mapping, wbc, __extent_writepage, &epd);
+       if (epd.bio)
+               submit_one_bio(WRITE, epd.bio);
+       return ret;
+}
+EXPORT_SYMBOL(extent_writepages);
+
 /*
  * basic invalidatepage code, this waits on any locked or writeback
  * ranges corresponding to the page, and then deletes any extent state
@@ -1869,6 +1953,7 @@ int extent_prepare_write(struct extent_map_tree *tree,
                                       EXTENT_LOCKED, 0, NULL, GFP_NOFS);
                        ret = submit_extent_page(READ, tree, page,
                                         sector, iosize, page_offset, em->bdev,
+                                        NULL, 1,
                                         end_bio_extent_preparewrite);
                        iocount++;
                        block_start = block_start + iosize;
index b6f6519f9ba3d8b0bfc73c21112e0c6d99d0f973..0b0dcf19fe45f0f99bc827cf870dcf74dcb1ffa3 100644 (file)
@@ -136,6 +136,10 @@ int extent_invalidatepage(struct extent_map_tree *tree,
 int extent_write_full_page(struct extent_map_tree *tree, struct page *page,
                          get_extent_t *get_extent,
                          struct writeback_control *wbc);
+int extent_writepages(struct extent_map_tree *tree,
+                     struct address_space *mapping,
+                     get_extent_t *get_extent,
+                     struct writeback_control *wbc);
 int extent_prepare_write(struct extent_map_tree *tree,
                         struct inode *inode, struct page *page,
                         unsigned from, unsigned to, get_extent_t *get_extent);
index 0c65141b99930425c94e90dc9c8e264b9909aa90..25f32d7c7ee8f5f8568f3e6097deba2e47e75f22 100644 (file)
@@ -1747,6 +1747,15 @@ static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
        return extent_write_full_page(tree, page, btrfs_get_extent, wbc);
 }
 
+
+static int btrfs_writepages(struct address_space *mapping,
+                           struct writeback_control *wbc)
+{
+       struct extent_map_tree *tree;
+       tree = &BTRFS_I(mapping->host)->extent_tree;
+       return extent_writepages(tree, mapping, btrfs_get_extent, wbc);
+}
+
 static int btrfs_releasepage(struct page *page, gfp_t unused_gfp_flags)
 {
        struct extent_map_tree *tree;
@@ -2526,6 +2535,7 @@ static struct extent_map_ops btrfs_extent_map_ops = {
 static struct address_space_operations btrfs_aops = {
        .readpage       = btrfs_readpage,
        .writepage      = btrfs_writepage,
+       .writepages     = btrfs_writepages,
        .sync_page      = block_sync_page,
        .prepare_write  = btrfs_prepare_write,
        .commit_write   = btrfs_commit_write,