#include <linux/statfs.h>
#include <linux/compat.h>
#include <linux/bit_spinlock.h>
-#include <linux/version.h>
#include <linux/xattr.h>
#include <linux/posix_acl.h>
+#include <linux/falloc.h>
+#include "compat.h"
#include "ctree.h"
#include "disk-io.h"
#include "transaction.h"
#include "volumes.h"
#include "ordered-data.h"
#include "xattr.h"
-#include "compat.h"
#include "tree-log.h"
+#include "ref-cache.h"
+#include "compression.h"
struct btrfs_iget_args {
u64 ino;
};
static void btrfs_truncate(struct inode *inode);
+static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end);
+static noinline int cow_file_range(struct inode *inode,
+ struct page *locked_page,
+ u64 start, u64 end, int *page_started,
+ unsigned long *nr_written, int unlock);
+/*
+ * a very lame attempt at stopping writes when the FS is 85% full. There
+ * are countless ways this is incorrect, but it is better than nothing.
+ */
int btrfs_check_free_space(struct btrfs_root *root, u64 num_required,
int for_del)
{
u64 total;
u64 used;
u64 thresh;
- unsigned long flags;
int ret = 0;
- spin_lock_irqsave(&root->fs_info->delalloc_lock, flags);
+ spin_lock(&root->fs_info->delalloc_lock);
total = btrfs_super_total_bytes(&root->fs_info->super_copy);
used = btrfs_super_bytes_used(&root->fs_info->super_copy);
if (for_del)
if (used + root->fs_info->delalloc_bytes + num_required > thresh)
ret = -ENOSPC;
- spin_unlock_irqrestore(&root->fs_info->delalloc_lock, flags);
+ spin_unlock(&root->fs_info->delalloc_lock);
return ret;
}
-static int cow_file_range(struct inode *inode, u64 start, u64 end)
+/*
+ * this does all the hard work for inserting an inline extent into
+ * the btree. The caller should have done a btrfs_drop_extents so that
+ * no overlapping inline items exist in the btree
+ */
+static noinline int insert_inline_extent(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root, struct inode *inode,
+ u64 start, size_t size, size_t compressed_size,
+ struct page **compressed_pages)
+{
+ struct btrfs_key key;
+ struct btrfs_path *path;
+ struct extent_buffer *leaf;
+ struct page *page = NULL;
+ char *kaddr;
+ unsigned long ptr;
+ struct btrfs_file_extent_item *ei;
+ int err = 0;
+ int ret;
+ size_t cur_size = size;
+ size_t datasize;
+ unsigned long offset;
+ int use_compress = 0;
+
+ if (compressed_size && compressed_pages) {
+ use_compress = 1;
+ cur_size = compressed_size;
+ }
+
+ path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
+
+ btrfs_set_trans_block_group(trans, inode);
+
+ key.objectid = inode->i_ino;
+ key.offset = start;
+ btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
+ datasize = btrfs_file_extent_calc_inline_size(cur_size);
+
+ inode_add_bytes(inode, size);
+ ret = btrfs_insert_empty_item(trans, root, path, &key,
+ datasize);
+ BUG_ON(ret);
+ if (ret) {
+ err = ret;
+ goto fail;
+ }
+ leaf = path->nodes[0];
+ ei = btrfs_item_ptr(leaf, path->slots[0],
+ struct btrfs_file_extent_item);
+ btrfs_set_file_extent_generation(leaf, ei, trans->transid);
+ btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE);
+ btrfs_set_file_extent_encryption(leaf, ei, 0);
+ btrfs_set_file_extent_other_encoding(leaf, ei, 0);
+ btrfs_set_file_extent_ram_bytes(leaf, ei, size);
+ ptr = btrfs_file_extent_inline_start(ei);
+
+ if (use_compress) {
+ struct page *cpage;
+ int i = 0;
+ while (compressed_size > 0) {
+ cpage = compressed_pages[i];
+ cur_size = min_t(unsigned long, compressed_size,
+ PAGE_CACHE_SIZE);
+
+ kaddr = kmap(cpage);
+ write_extent_buffer(leaf, kaddr, ptr, cur_size);
+ kunmap(cpage);
+
+ i++;
+ ptr += cur_size;
+ compressed_size -= cur_size;
+ }
+ btrfs_set_file_extent_compression(leaf, ei,
+ BTRFS_COMPRESS_ZLIB);
+ } else {
+ page = find_get_page(inode->i_mapping,
+ start >> PAGE_CACHE_SHIFT);
+ btrfs_set_file_extent_compression(leaf, ei, 0);
+ kaddr = kmap_atomic(page, KM_USER0);
+ offset = start & (PAGE_CACHE_SIZE - 1);
+ write_extent_buffer(leaf, kaddr + offset, ptr, size);
+ kunmap_atomic(kaddr, KM_USER0);
+ page_cache_release(page);
+ }
+ btrfs_mark_buffer_dirty(leaf);
+ btrfs_free_path(path);
+
+ BTRFS_I(inode)->disk_i_size = inode->i_size;
+ btrfs_update_inode(trans, root, inode);
+ return 0;
+fail:
+ btrfs_free_path(path);
+ return err;
+}
+
+
+/*
+ * conditionally insert an inline extent into the file. This
+ * does the checks required to make sure the data is small enough
+ * to fit as an inline extent.
+ */
+static int cow_file_range_inline(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct inode *inode, u64 start, u64 end,
+ size_t compressed_size,
+ struct page **compressed_pages)
+{
+ u64 isize = i_size_read(inode);
+ u64 actual_end = min(end + 1, isize);
+ u64 inline_len = actual_end - start;
+ u64 aligned_end = (end + root->sectorsize - 1) &
+ ~((u64)root->sectorsize - 1);
+ u64 hint_byte;
+ u64 data_len = inline_len;
+ int ret;
+
+ if (compressed_size)
+ data_len = compressed_size;
+
+ if (start > 0 ||
+ actual_end >= PAGE_CACHE_SIZE ||
+ data_len >= BTRFS_MAX_INLINE_DATA_SIZE(root) ||
+ (!compressed_size &&
+ (actual_end & (root->sectorsize - 1)) == 0) ||
+ end + 1 < isize ||
+ data_len > root->fs_info->max_inline) {
+ return 1;
+ }
+
+ ret = btrfs_drop_extents(trans, root, inode, start,
+ aligned_end, start, &hint_byte);
+ BUG_ON(ret);
+
+ if (isize > actual_end)
+ inline_len = min_t(u64, isize, actual_end);
+ ret = insert_inline_extent(trans, root, inode, start,
+ inline_len, compressed_size,
+ compressed_pages);
+ BUG_ON(ret);
+ btrfs_drop_extent_cache(inode, start, aligned_end, 0);
+ return 0;
+}
+
+struct async_extent {
+ u64 start;
+ u64 ram_size;
+ u64 compressed_size;
+ struct page **pages;
+ unsigned long nr_pages;
+ struct list_head list;
+};
+
+struct async_cow {
+ struct inode *inode;
+ struct btrfs_root *root;
+ struct page *locked_page;
+ u64 start;
+ u64 end;
+ struct list_head extents;
+ struct btrfs_work work;
+};
+
+static noinline int add_async_extent(struct async_cow *cow,
+ u64 start, u64 ram_size,
+ u64 compressed_size,
+ struct page **pages,
+ unsigned long nr_pages)
+{
+ struct async_extent *async_extent;
+
+ async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS);
+ async_extent->start = start;
+ async_extent->ram_size = ram_size;
+ async_extent->compressed_size = compressed_size;
+ async_extent->pages = pages;
+ async_extent->nr_pages = nr_pages;
+ list_add_tail(&async_extent->list, &cow->extents);
+ return 0;
+}
+
+/*
+ * we create compressed extents in two phases. The first
+ * phase compresses a range of pages that have already been
+ * locked (both pages and state bits are locked).
+ *
+ * This is done inside an ordered work queue, and the compression
+ * is spread across many cpus. The actual IO submission is step
+ * two, and the ordered work queue takes care of making sure that
+ * happens in the same order things were put onto the queue by
+ * writepages and friends.
+ *
+ * If this code finds it can't get good compression, it puts an
+ * entry onto the work queue to write the uncompressed bytes. This
+ * makes sure that both compressed inodes and uncompressed inodes
+ * are written in the same order that pdflush sent them down.
+ */
+static noinline int compress_file_range(struct inode *inode,
+ struct page *locked_page,
+ u64 start, u64 end,
+ struct async_cow *async_cow,
+ int *num_added)
+{
+ struct btrfs_root *root = BTRFS_I(inode)->root;
+ struct btrfs_trans_handle *trans;
+ u64 num_bytes;
+ u64 orig_start;
+ u64 disk_num_bytes;
+ u64 blocksize = root->sectorsize;
+ u64 actual_end;
+ u64 isize = i_size_read(inode);
+ int ret = 0;
+ struct page **pages = NULL;
+ unsigned long nr_pages;
+ unsigned long nr_pages_ret = 0;
+ unsigned long total_compressed = 0;
+ unsigned long total_in = 0;
+ unsigned long max_compressed = 128 * 1024;
+ unsigned long max_uncompressed = 128 * 1024;
+ int i;
+ int will_compress;
+
+ orig_start = start;
+
+ actual_end = min_t(u64, isize, end + 1);
+again:
+ will_compress = 0;
+ nr_pages = (end >> PAGE_CACHE_SHIFT) - (start >> PAGE_CACHE_SHIFT) + 1;
+ nr_pages = min(nr_pages, (128 * 1024UL) / PAGE_CACHE_SIZE);
+
+ total_compressed = actual_end - start;
+
+ /* we want to make sure that amount of ram required to uncompress
+ * an extent is reasonable, so we limit the total size in ram
+ * of a compressed extent to 128k. This is a crucial number
+ * because it also controls how easily we can spread reads across
+ * cpus for decompression.
+ *
+ * We also want to make sure the amount of IO required to do
+ * a random read is reasonably small, so we limit the size of
+ * a compressed extent to 128k.
+ */
+ total_compressed = min(total_compressed, max_uncompressed);
+ num_bytes = (end - start + blocksize) & ~(blocksize - 1);
+ num_bytes = max(blocksize, num_bytes);
+ disk_num_bytes = num_bytes;
+ total_in = 0;
+ ret = 0;
+
+ /*
+ * we do compression for mount -o compress and when the
+ * inode has not been flagged as nocompress. This flag can
+ * change at any time if we discover bad compression ratios.
+ */
+ if (!btrfs_test_flag(inode, NOCOMPRESS) &&
+ btrfs_test_opt(root, COMPRESS)) {
+ WARN_ON(pages);
+ pages = kzalloc(sizeof(struct page *) * nr_pages, GFP_NOFS);
+
+ ret = btrfs_zlib_compress_pages(inode->i_mapping, start,
+ total_compressed, pages,
+ nr_pages, &nr_pages_ret,
+ &total_in,
+ &total_compressed,
+ max_compressed);
+
+ if (!ret) {
+ unsigned long offset = total_compressed &
+ (PAGE_CACHE_SIZE - 1);
+ struct page *page = pages[nr_pages_ret - 1];
+ char *kaddr;
+
+ /* zero the tail end of the last page, we might be
+ * sending it down to disk
+ */
+ if (offset) {
+ kaddr = kmap_atomic(page, KM_USER0);
+ memset(kaddr + offset, 0,
+ PAGE_CACHE_SIZE - offset);
+ kunmap_atomic(kaddr, KM_USER0);
+ }
+ will_compress = 1;
+ }
+ }
+ if (start == 0) {
+ trans = btrfs_join_transaction(root, 1);
+ BUG_ON(!trans);
+ btrfs_set_trans_block_group(trans, inode);
+
+ /* lets try to make an inline extent */
+ if (ret || total_in < (actual_end - start)) {
+ /* we didn't compress the entire range, try
+ * to make an uncompressed inline extent.
+ */
+ ret = cow_file_range_inline(trans, root, inode,
+ start, end, 0, NULL);
+ } else {
+ /* try making a compressed inline extent */
+ ret = cow_file_range_inline(trans, root, inode,
+ start, end,
+ total_compressed, pages);
+ }
+ btrfs_end_transaction(trans, root);
+ if (ret == 0) {
+ /*
+ * inline extent creation worked, we don't need
+ * to create any more async work items. Unlock
+ * and free up our temp pages.
+ */
+ extent_clear_unlock_delalloc(inode,
+ &BTRFS_I(inode)->io_tree,
+ start, end, NULL, 1, 0,
+ 0, 1, 1, 1);
+ ret = 0;
+ goto free_pages_out;
+ }
+ }
+
+ if (will_compress) {
+ /*
+ * we aren't doing an inline extent round the compressed size
+ * up to a block size boundary so the allocator does sane
+ * things
+ */
+ total_compressed = (total_compressed + blocksize - 1) &
+ ~(blocksize - 1);
+
+ /*
+ * one last check to make sure the compression is really a
+ * win, compare the page count read with the blocks on disk
+ */
+ total_in = (total_in + PAGE_CACHE_SIZE - 1) &
+ ~(PAGE_CACHE_SIZE - 1);
+ if (total_compressed >= total_in) {
+ will_compress = 0;
+ } else {
+ disk_num_bytes = total_compressed;
+ num_bytes = total_in;
+ }
+ }
+ if (!will_compress && pages) {
+ /*
+ * the compression code ran but failed to make things smaller,
+ * free any pages it allocated and our page pointer array
+ */
+ for (i = 0; i < nr_pages_ret; i++) {
+ WARN_ON(pages[i]->mapping);
+ page_cache_release(pages[i]);
+ }
+ kfree(pages);
+ pages = NULL;
+ total_compressed = 0;
+ nr_pages_ret = 0;
+
+ /* flag the file so we don't compress in the future */
+ btrfs_set_flag(inode, NOCOMPRESS);
+ }
+ if (will_compress) {
+ *num_added += 1;
+
+ /* the async work queues will take care of doing actual
+ * allocation on disk for these compressed pages,
+ * and will submit them to the elevator.
+ */
+ add_async_extent(async_cow, start, num_bytes,
+ total_compressed, pages, nr_pages_ret);
+
+ if (start + num_bytes < end && start + num_bytes < actual_end) {
+ start += num_bytes;
+ pages = NULL;
+ cond_resched();
+ goto again;
+ }
+ } else {
+ /*
+ * No compression, but we still need to write the pages in
+ * the file we've been given so far. redirty the locked
+ * page if it corresponds to our extent and set things up
+ * for the async work queue to run cow_file_range to do
+ * the normal delalloc dance
+ */
+ if (page_offset(locked_page) >= start &&
+ page_offset(locked_page) <= end) {
+ __set_page_dirty_nobuffers(locked_page);
+ /* unlocked later on in the async handlers */
+ }
+ add_async_extent(async_cow, start, end - start + 1, 0, NULL, 0);
+ *num_added += 1;
+ }
+
+out:
+ return 0;
+
+free_pages_out:
+ for (i = 0; i < nr_pages_ret; i++) {
+ WARN_ON(pages[i]->mapping);
+ page_cache_release(pages[i]);
+ }
+ kfree(pages);
+
+ goto out;
+}
+
+/*
+ * phase two of compressed writeback. This is the ordered portion
+ * of the code, which only gets called in the order the work was
+ * queued. We walk all the async extents created by compress_file_range
+ * and send them down to the disk.
+ */
+static noinline int submit_compressed_extents(struct inode *inode,
+ struct async_cow *async_cow)
+{
+ struct async_extent *async_extent;
+ u64 alloc_hint = 0;
+ struct btrfs_trans_handle *trans;
+ struct btrfs_key ins;
+ struct extent_map *em;
+ struct btrfs_root *root = BTRFS_I(inode)->root;
+ struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
+ struct extent_io_tree *io_tree;
+ int ret;
+
+ if (list_empty(&async_cow->extents))
+ return 0;
+
+ trans = btrfs_join_transaction(root, 1);
+
+ while (!list_empty(&async_cow->extents)) {
+ async_extent = list_entry(async_cow->extents.next,
+ struct async_extent, list);
+ list_del(&async_extent->list);
+
+ io_tree = &BTRFS_I(inode)->io_tree;
+
+ /* did the compression code fall back to uncompressed IO? */
+ if (!async_extent->pages) {
+ int page_started = 0;
+ unsigned long nr_written = 0;
+
+ lock_extent(io_tree, async_extent->start,
+ async_extent->start +
+ async_extent->ram_size - 1, GFP_NOFS);
+
+ /* allocate blocks */
+ cow_file_range(inode, async_cow->locked_page,
+ async_extent->start,
+ async_extent->start +
+ async_extent->ram_size - 1,
+ &page_started, &nr_written, 0);
+
+ /*
+ * if page_started, cow_file_range inserted an
+ * inline extent and took care of all the unlocking
+ * and IO for us. Otherwise, we need to submit
+ * all those pages down to the drive.
+ */
+ if (!page_started)
+ extent_write_locked_range(io_tree,
+ inode, async_extent->start,
+ async_extent->start +
+ async_extent->ram_size - 1,
+ btrfs_get_extent,
+ WB_SYNC_ALL);
+ kfree(async_extent);
+ cond_resched();
+ continue;
+ }
+
+ lock_extent(io_tree, async_extent->start,
+ async_extent->start + async_extent->ram_size - 1,
+ GFP_NOFS);
+ /*
+ * here we're doing allocation and writeback of the
+ * compressed pages
+ */
+ btrfs_drop_extent_cache(inode, async_extent->start,
+ async_extent->start +
+ async_extent->ram_size - 1, 0);
+
+ ret = btrfs_reserve_extent(trans, root,
+ async_extent->compressed_size,
+ async_extent->compressed_size,
+ 0, alloc_hint,
+ (u64)-1, &ins, 1);
+ BUG_ON(ret);
+ em = alloc_extent_map(GFP_NOFS);
+ em->start = async_extent->start;
+ em->len = async_extent->ram_size;
+ em->orig_start = em->start;
+
+ em->block_start = ins.objectid;
+ em->block_len = ins.offset;
+ em->bdev = root->fs_info->fs_devices->latest_bdev;
+ set_bit(EXTENT_FLAG_PINNED, &em->flags);
+ set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
+
+ while (1) {
+ spin_lock(&em_tree->lock);
+ ret = add_extent_mapping(em_tree, em);
+ spin_unlock(&em_tree->lock);
+ if (ret != -EEXIST) {
+ free_extent_map(em);
+ break;
+ }
+ btrfs_drop_extent_cache(inode, async_extent->start,
+ async_extent->start +
+ async_extent->ram_size - 1, 0);
+ }
+
+ ret = btrfs_add_ordered_extent(inode, async_extent->start,
+ ins.objectid,
+ async_extent->ram_size,
+ ins.offset,
+ BTRFS_ORDERED_COMPRESSED);
+ BUG_ON(ret);
+
+ btrfs_end_transaction(trans, root);
+
+ /*
+ * clear dirty, set writeback and unlock the pages.
+ */
+ extent_clear_unlock_delalloc(inode,
+ &BTRFS_I(inode)->io_tree,
+ async_extent->start,
+ async_extent->start +
+ async_extent->ram_size - 1,
+ NULL, 1, 1, 0, 1, 1, 0);
+
+ ret = btrfs_submit_compressed_write(inode,
+ async_extent->start,
+ async_extent->ram_size,
+ ins.objectid,
+ ins.offset, async_extent->pages,
+ async_extent->nr_pages);
+
+ BUG_ON(ret);
+ trans = btrfs_join_transaction(root, 1);
+ alloc_hint = ins.objectid + ins.offset;
+ kfree(async_extent);
+ cond_resched();
+ }
+
+ btrfs_end_transaction(trans, root);
+ return 0;
+}
+
+/*
+ * when extent_io.c finds a delayed allocation range in the file,
+ * the call backs end up in this code. The basic idea is to
+ * allocate extents on disk for the range, and create ordered data structs
+ * in ram to track those extents.
+ *
+ * locked_page is the page that writepage had locked already. We use
+ * it to make sure we don't do extra locks or unlocks.
+ *
+ * *page_started is set to one if we unlock locked_page and do everything
+ * required to start IO on it. It may be clean and already done with
+ * IO when we return.
+ */
+static noinline int cow_file_range(struct inode *inode,
+ struct page *locked_page,
+ u64 start, u64 end, int *page_started,
+ unsigned long *nr_written,
+ int unlock)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_trans_handle *trans;
u64 alloc_hint = 0;
u64 num_bytes;
+ unsigned long ram_size;
+ u64 disk_num_bytes;
u64 cur_alloc_size;
u64 blocksize = root->sectorsize;
- u64 orig_num_bytes;
+ u64 actual_end;
+ u64 isize = i_size_read(inode);
struct btrfs_key ins;
struct extent_map *em;
struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
BUG_ON(!trans);
btrfs_set_trans_block_group(trans, inode);
+ actual_end = min_t(u64, isize, end + 1);
+
num_bytes = (end - start + blocksize) & ~(blocksize - 1);
num_bytes = max(blocksize, num_bytes);
- orig_num_bytes = num_bytes;
+ disk_num_bytes = num_bytes;
+ ret = 0;
- if (alloc_hint == EXTENT_MAP_INLINE)
- goto out;
+ if (start == 0) {
+ /* lets try to make an inline extent */
+ ret = cow_file_range_inline(trans, root, inode,
+ start, end, 0, NULL);
+ if (ret == 0) {
+ extent_clear_unlock_delalloc(inode,
+ &BTRFS_I(inode)->io_tree,
+ start, end, NULL, 1, 1,
+ 1, 1, 1, 1);
+ *nr_written = *nr_written +
+ (end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE;
+ *page_started = 1;
+ ret = 0;
+ goto out;
+ }
+ }
- BUG_ON(num_bytes > btrfs_super_total_bytes(&root->fs_info->super_copy));
- mutex_lock(&BTRFS_I(inode)->extent_mutex);
- btrfs_drop_extent_cache(inode, start, start + num_bytes - 1);
- mutex_unlock(&BTRFS_I(inode)->extent_mutex);
+ BUG_ON(disk_num_bytes >
+ btrfs_super_total_bytes(&root->fs_info->super_copy));
+
+ btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0);
- while(num_bytes > 0) {
- cur_alloc_size = min(num_bytes, root->fs_info->max_extent);
+ while (disk_num_bytes > 0) {
+ cur_alloc_size = min(disk_num_bytes, root->fs_info->max_extent);
ret = btrfs_reserve_extent(trans, root, cur_alloc_size,
- root->sectorsize, 0, 0,
+ root->sectorsize, 0, alloc_hint,
(u64)-1, &ins, 1);
- if (ret) {
- WARN_ON(1);
- goto out;
- }
+ BUG_ON(ret);
+
em = alloc_extent_map(GFP_NOFS);
em->start = start;
+ em->orig_start = em->start;
+
+ ram_size = ins.offset;
em->len = ins.offset;
+
em->block_start = ins.objectid;
+ em->block_len = ins.offset;
em->bdev = root->fs_info->fs_devices->latest_bdev;
- mutex_lock(&BTRFS_I(inode)->extent_mutex);
set_bit(EXTENT_FLAG_PINNED, &em->flags);
- while(1) {
+
+ while (1) {
spin_lock(&em_tree->lock);
ret = add_extent_mapping(em_tree, em);
spin_unlock(&em_tree->lock);
break;
}
btrfs_drop_extent_cache(inode, start,
- start + ins.offset - 1);
+ start + ram_size - 1, 0);
}
- mutex_unlock(&BTRFS_I(inode)->extent_mutex);
cur_alloc_size = ins.offset;
ret = btrfs_add_ordered_extent(inode, start, ins.objectid,
- ins.offset, 0);
+ ram_size, cur_alloc_size, 0);
BUG_ON(ret);
- if (num_bytes < cur_alloc_size) {
- printk("num_bytes %Lu cur_alloc %Lu\n", num_bytes,
- cur_alloc_size);
- break;
+
+ if (root->root_key.objectid ==
+ BTRFS_DATA_RELOC_TREE_OBJECTID) {
+ ret = btrfs_reloc_clone_csums(inode, start,
+ cur_alloc_size);
+ BUG_ON(ret);
}
+
+ if (disk_num_bytes < cur_alloc_size)
+ break;
+
+ /* we're not doing compressed IO, don't unlock the first
+ * page (which the caller expects to stay locked), don't
+ * clear any dirty bits and don't set any writeback bits
+ */
+ extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
+ start, start + ram_size - 1,
+ locked_page, unlock, 1,
+ 1, 0, 0, 0);
+ disk_num_bytes -= cur_alloc_size;
num_bytes -= cur_alloc_size;
alloc_hint = ins.objectid + ins.offset;
start += cur_alloc_size;
}
out:
+ ret = 0;
btrfs_end_transaction(trans, root);
+
return ret;
}
-static int run_delalloc_nocow(struct inode *inode, u64 start, u64 end)
+/*
+ * work queue call back to started compression on a file and pages
+ */
+static noinline void async_cow_start(struct btrfs_work *work)
+{
+ struct async_cow *async_cow;
+ int num_added = 0;
+ async_cow = container_of(work, struct async_cow, work);
+
+ compress_file_range(async_cow->inode, async_cow->locked_page,
+ async_cow->start, async_cow->end, async_cow,
+ &num_added);
+ if (num_added == 0)
+ async_cow->inode = NULL;
+}
+
+/*
+ * work queue call back to submit previously compressed pages
+ */
+static noinline void async_cow_submit(struct btrfs_work *work)
+{
+ struct async_cow *async_cow;
+ struct btrfs_root *root;
+ unsigned long nr_pages;
+
+ async_cow = container_of(work, struct async_cow, work);
+
+ root = async_cow->root;
+ nr_pages = (async_cow->end - async_cow->start + PAGE_CACHE_SIZE) >>
+ PAGE_CACHE_SHIFT;
+
+ atomic_sub(nr_pages, &root->fs_info->async_delalloc_pages);
+
+ if (atomic_read(&root->fs_info->async_delalloc_pages) <
+ 5 * 1042 * 1024 &&
+ waitqueue_active(&root->fs_info->async_submit_wait))
+ wake_up(&root->fs_info->async_submit_wait);
+
+ if (async_cow->inode)
+ submit_compressed_extents(async_cow->inode, async_cow);
+}
+
+static noinline void async_cow_free(struct btrfs_work *work)
+{
+ struct async_cow *async_cow;
+ async_cow = container_of(work, struct async_cow, work);
+ kfree(async_cow);
+}
+
+static int cow_file_range_async(struct inode *inode, struct page *locked_page,
+ u64 start, u64 end, int *page_started,
+ unsigned long *nr_written)
+{
+ struct async_cow *async_cow;
+ struct btrfs_root *root = BTRFS_I(inode)->root;
+ unsigned long nr_pages;
+ u64 cur_end;
+ int limit = 10 * 1024 * 1042;
+
+ if (!btrfs_test_opt(root, COMPRESS)) {
+ return cow_file_range(inode, locked_page, start, end,
+ page_started, nr_written, 1);
+ }
+
+ clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED |
+ EXTENT_DELALLOC, 1, 0, GFP_NOFS);
+ while (start < end) {
+ async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
+ async_cow->inode = inode;
+ async_cow->root = root;
+ async_cow->locked_page = locked_page;
+ async_cow->start = start;
+
+ if (btrfs_test_flag(inode, NOCOMPRESS))
+ cur_end = end;
+ else
+ cur_end = min(end, start + 512 * 1024 - 1);
+
+ async_cow->end = cur_end;
+ INIT_LIST_HEAD(&async_cow->extents);
+
+ async_cow->work.func = async_cow_start;
+ async_cow->work.ordered_func = async_cow_submit;
+ async_cow->work.ordered_free = async_cow_free;
+ async_cow->work.flags = 0;
+
+ nr_pages = (cur_end - start + PAGE_CACHE_SIZE) >>
+ PAGE_CACHE_SHIFT;
+ atomic_add(nr_pages, &root->fs_info->async_delalloc_pages);
+
+ btrfs_queue_worker(&root->fs_info->delalloc_workers,
+ &async_cow->work);
+
+ if (atomic_read(&root->fs_info->async_delalloc_pages) > limit) {
+ wait_event(root->fs_info->async_submit_wait,
+ (atomic_read(&root->fs_info->async_delalloc_pages) <
+ limit));
+ }
+
+ while (atomic_read(&root->fs_info->async_submit_draining) &&
+ atomic_read(&root->fs_info->async_delalloc_pages)) {
+ wait_event(root->fs_info->async_submit_wait,
+ (atomic_read(&root->fs_info->async_delalloc_pages) ==
+ 0));
+ }
+
+ *nr_written += nr_pages;
+ start = cur_end + 1;
+ }
+ *page_started = 1;
+ return 0;
+}
+
+static noinline int csum_exist_in_range(struct btrfs_root *root,
+ u64 bytenr, u64 num_bytes)
+{
+ int ret;
+ struct btrfs_ordered_sum *sums;
+ LIST_HEAD(list);
+
+ ret = btrfs_lookup_csums_range(root->fs_info->csum_root, bytenr,
+ bytenr + num_bytes - 1, &list);
+ if (ret == 0 && list_empty(&list))
+ return 0;
+
+ while (!list_empty(&list)) {
+ sums = list_entry(list.next, struct btrfs_ordered_sum, list);
+ list_del(&sums->list);
+ kfree(sums);
+ }
+ return 1;
+}
+
+/*
+ * when nowcow writeback call back. This checks for snapshots or COW copies
+ * of the extents that exist in the file, and COWs the file as required.
+ *
+ * If no cow copies or snapshots exist, we write directly to the existing
+ * blocks on disk
+ */
+static int run_delalloc_nocow(struct inode *inode, struct page *locked_page,
+ u64 start, u64 end, int *page_started, int force,
+ unsigned long *nr_written)
{
- u64 extent_start;
- u64 extent_end;
- u64 bytenr;
- u64 loops = 0;
- u64 total_fs_bytes;
struct btrfs_root *root = BTRFS_I(inode)->root;
- struct btrfs_block_group_cache *block_group;
struct btrfs_trans_handle *trans;
struct extent_buffer *leaf;
- int found_type;
struct btrfs_path *path;
- struct btrfs_file_extent_item *item;
- int ret;
- int err = 0;
+ struct btrfs_file_extent_item *fi;
struct btrfs_key found_key;
+ u64 cow_start;
+ u64 cur_offset;
+ u64 extent_end;
+ u64 disk_bytenr;
+ u64 num_bytes;
+ int extent_type;
+ int ret;
+ int type;
+ int nocow;
+ int check_prev = 1;
- total_fs_bytes = btrfs_super_total_bytes(&root->fs_info->super_copy);
path = btrfs_alloc_path();
BUG_ON(!path);
trans = btrfs_join_transaction(root, 1);
BUG_ON(!trans);
-again:
- ret = btrfs_lookup_file_extent(NULL, root, path,
- inode->i_ino, start, 0);
- if (ret < 0) {
- err = ret;
- goto out;
- }
- if (ret != 0) {
- if (path->slots[0] == 0)
- goto not_found;
- path->slots[0]--;
- }
+ cow_start = (u64)-1;
+ cur_offset = start;
+ while (1) {
+ ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino,
+ cur_offset, 0);
+ BUG_ON(ret < 0);
+ if (ret > 0 && path->slots[0] > 0 && check_prev) {
+ leaf = path->nodes[0];
+ btrfs_item_key_to_cpu(leaf, &found_key,
+ path->slots[0] - 1);
+ if (found_key.objectid == inode->i_ino &&
+ found_key.type == BTRFS_EXTENT_DATA_KEY)
+ path->slots[0]--;
+ }
+ check_prev = 0;
+next_slot:
+ leaf = path->nodes[0];
+ if (path->slots[0] >= btrfs_header_nritems(leaf)) {
+ ret = btrfs_next_leaf(root, path);
+ if (ret < 0)
+ BUG_ON(1);
+ if (ret > 0)
+ break;
+ leaf = path->nodes[0];
+ }
- leaf = path->nodes[0];
- item = btrfs_item_ptr(leaf, path->slots[0],
- struct btrfs_file_extent_item);
+ nocow = 0;
+ disk_bytenr = 0;
+ num_bytes = 0;
+ btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
- /* are we inside the extent that was found? */
- btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
- found_type = btrfs_key_type(&found_key);
- if (found_key.objectid != inode->i_ino ||
- found_type != BTRFS_EXTENT_DATA_KEY)
- goto not_found;
+ if (found_key.objectid > inode->i_ino ||
+ found_key.type > BTRFS_EXTENT_DATA_KEY ||
+ found_key.offset > end)
+ break;
- found_type = btrfs_file_extent_type(leaf, item);
- extent_start = found_key.offset;
- if (found_type == BTRFS_FILE_EXTENT_REG) {
- u64 extent_num_bytes;
+ if (found_key.offset > cur_offset) {
+ extent_end = found_key.offset;
+ goto out_check;
+ }
- extent_num_bytes = btrfs_file_extent_num_bytes(leaf, item);
- extent_end = extent_start + extent_num_bytes;
- err = 0;
+ fi = btrfs_item_ptr(leaf, path->slots[0],
+ struct btrfs_file_extent_item);
+ extent_type = btrfs_file_extent_type(leaf, fi);
- if (loops && start != extent_start)
- goto not_found;
+ if (extent_type == BTRFS_FILE_EXTENT_REG ||
+ extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
+ disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
+ extent_end = found_key.offset +
+ btrfs_file_extent_num_bytes(leaf, fi);
+ if (extent_end <= start) {
+ path->slots[0]++;
+ goto next_slot;
+ }
+ if (disk_bytenr == 0)
+ goto out_check;
+ if (btrfs_file_extent_compression(leaf, fi) ||
+ btrfs_file_extent_encryption(leaf, fi) ||
+ btrfs_file_extent_other_encoding(leaf, fi))
+ goto out_check;
+ if (extent_type == BTRFS_FILE_EXTENT_REG && !force)
+ goto out_check;
+ if (btrfs_extent_readonly(root, disk_bytenr))
+ goto out_check;
+ if (btrfs_cross_ref_exist(trans, root, inode->i_ino,
+ disk_bytenr))
+ goto out_check;
+ disk_bytenr += btrfs_file_extent_offset(leaf, fi);
+ disk_bytenr += cur_offset - found_key.offset;
+ num_bytes = min(end + 1, extent_end) - cur_offset;
+ /*
+ * force cow if csum exists in the range.
+ * this ensure that csum for a given extent are
+ * either valid or do not exist.
+ */
+ if (csum_exist_in_range(root, disk_bytenr, num_bytes))
+ goto out_check;
+ nocow = 1;
+ } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
+ extent_end = found_key.offset +
+ btrfs_file_extent_inline_len(leaf, fi);
+ extent_end = ALIGN(extent_end, root->sectorsize);
+ } else {
+ BUG_ON(1);
+ }
+out_check:
+ if (extent_end <= start) {
+ path->slots[0]++;
+ goto next_slot;
+ }
+ if (!nocow) {
+ if (cow_start == (u64)-1)
+ cow_start = cur_offset;
+ cur_offset = extent_end;
+ if (cur_offset > end)
+ break;
+ path->slots[0]++;
+ goto next_slot;
+ }
- if (start < extent_start || start >= extent_end)
- goto not_found;
+ btrfs_release_path(root, path);
+ if (cow_start != (u64)-1) {
+ ret = cow_file_range(inode, locked_page, cow_start,
+ found_key.offset - 1, page_started,
+ nr_written, 1);
+ BUG_ON(ret);
+ cow_start = (u64)-1;
+ }
- bytenr = btrfs_file_extent_disk_bytenr(leaf, item);
- if (bytenr == 0)
- goto not_found;
+ if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
+ struct extent_map *em;
+ struct extent_map_tree *em_tree;
+ em_tree = &BTRFS_I(inode)->extent_tree;
+ em = alloc_extent_map(GFP_NOFS);
+ em->start = cur_offset;
+ em->orig_start = em->start;
+ em->len = num_bytes;
+ em->block_len = num_bytes;
+ em->block_start = disk_bytenr;
+ em->bdev = root->fs_info->fs_devices->latest_bdev;
+ set_bit(EXTENT_FLAG_PINNED, &em->flags);
+ while (1) {
+ spin_lock(&em_tree->lock);
+ ret = add_extent_mapping(em_tree, em);
+ spin_unlock(&em_tree->lock);
+ if (ret != -EEXIST) {
+ free_extent_map(em);
+ break;
+ }
+ btrfs_drop_extent_cache(inode, em->start,
+ em->start + em->len - 1, 0);
+ }
+ type = BTRFS_ORDERED_PREALLOC;
+ } else {
+ type = BTRFS_ORDERED_NOCOW;
+ }
- if (btrfs_cross_ref_exists(trans, root, &found_key, bytenr))
- goto not_found;
- /*
- * we may be called by the resizer, make sure we're inside
- * the limits of the FS
- */
- block_group = btrfs_lookup_block_group(root->fs_info,
- bytenr);
- if (!block_group || block_group->ro)
- goto not_found;
+ ret = btrfs_add_ordered_extent(inode, cur_offset, disk_bytenr,
+ num_bytes, num_bytes, type);
+ BUG_ON(ret);
- bytenr += btrfs_file_extent_offset(leaf, item);
- extent_num_bytes = min(end + 1, extent_end) - start;
- ret = btrfs_add_ordered_extent(inode, start, bytenr,
- extent_num_bytes, 1);
- if (ret) {
- err = ret;
- goto out;
- }
+ extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
+ cur_offset, cur_offset + num_bytes - 1,
+ locked_page, 1, 1, 1, 0, 0, 0);
+ cur_offset = extent_end;
+ if (cur_offset > end)
+ break;
+ }
+ btrfs_release_path(root, path);
- btrfs_release_path(root, path);
- start = extent_end;
- if (start <= end) {
- loops++;
- goto again;
- }
- } else {
-not_found:
- btrfs_end_transaction(trans, root);
- btrfs_free_path(path);
- return cow_file_range(inode, start, end);
+ if (cur_offset <= end && cow_start == (u64)-1)
+ cow_start = cur_offset;
+ if (cow_start != (u64)-1) {
+ ret = cow_file_range(inode, locked_page, cow_start, end,
+ page_started, nr_written, 1);
+ BUG_ON(ret);
}
-out:
- WARN_ON(err);
- btrfs_end_transaction(trans, root);
+
+ ret = btrfs_end_transaction(trans, root);
+ BUG_ON(ret);
btrfs_free_path(path);
- return err;
+ return 0;
}
-static int run_delalloc_range(struct inode *inode, u64 start, u64 end)
+/*
+ * extent_io.c call back to do delayed allocation processing
+ */
+static int run_delalloc_range(struct inode *inode, struct page *locked_page,
+ u64 start, u64 end, int *page_started,
+ unsigned long *nr_written)
{
- struct btrfs_root *root = BTRFS_I(inode)->root;
int ret;
- if (btrfs_test_opt(root, NODATACOW) ||
- btrfs_test_flag(inode, NODATACOW))
- ret = run_delalloc_nocow(inode, start, end);
+ if (btrfs_test_flag(inode, NODATACOW))
+ ret = run_delalloc_nocow(inode, locked_page, start, end,
+ page_started, 1, nr_written);
+ else if (btrfs_test_flag(inode, PREALLOC))
+ ret = run_delalloc_nocow(inode, locked_page, start, end,
+ page_started, 0, nr_written);
else
- ret = cow_file_range(inode, start, end);
+ ret = cow_file_range_async(inode, locked_page, start, end,
+ page_started, nr_written);
return ret;
}
-int btrfs_set_bit_hook(struct inode *inode, u64 start, u64 end,
+/*
+ * extent_io.c set_bit_hook, used to track delayed allocation
+ * bytes in this file, and to maintain the list of inodes that
+ * have pending delalloc work to be done.
+ */
+static int btrfs_set_bit_hook(struct inode *inode, u64 start, u64 end,
unsigned long old, unsigned long bits)
{
- unsigned long flags;
+ /*
+ * set_bit and clear bit hooks normally require _irqsave/restore
+ * but in this case, we are only testeing for the DELALLOC
+ * bit, which is only set or cleared with irqs on
+ */
if (!(old & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
struct btrfs_root *root = BTRFS_I(inode)->root;
- spin_lock_irqsave(&root->fs_info->delalloc_lock, flags);
+ spin_lock(&root->fs_info->delalloc_lock);
BTRFS_I(inode)->delalloc_bytes += end - start + 1;
root->fs_info->delalloc_bytes += end - start + 1;
if (list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
list_add_tail(&BTRFS_I(inode)->delalloc_inodes,
&root->fs_info->delalloc_inodes);
}
- spin_unlock_irqrestore(&root->fs_info->delalloc_lock, flags);
+ spin_unlock(&root->fs_info->delalloc_lock);
}
return 0;
}
-int btrfs_clear_bit_hook(struct inode *inode, u64 start, u64 end,
+/*
+ * extent_io.c clear_bit_hook, see set_bit_hook for why
+ */
+static int btrfs_clear_bit_hook(struct inode *inode, u64 start, u64 end,
unsigned long old, unsigned long bits)
{
+ /*
+ * set_bit and clear bit hooks normally require _irqsave/restore
+ * but in this case, we are only testeing for the DELALLOC
+ * bit, which is only set or cleared with irqs on
+ */
if ((old & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
struct btrfs_root *root = BTRFS_I(inode)->root;
- unsigned long flags;
- spin_lock_irqsave(&root->fs_info->delalloc_lock, flags);
+ spin_lock(&root->fs_info->delalloc_lock);
if (end - start + 1 > root->fs_info->delalloc_bytes) {
- printk("warning: delalloc account %Lu %Lu\n",
- end - start + 1, root->fs_info->delalloc_bytes);
+ printk(KERN_INFO "btrfs warning: delalloc account "
+ "%llu %llu\n",
+ (unsigned long long)end - start + 1,
+ (unsigned long long)
+ root->fs_info->delalloc_bytes);
root->fs_info->delalloc_bytes = 0;
BTRFS_I(inode)->delalloc_bytes = 0;
} else {
!list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
list_del_init(&BTRFS_I(inode)->delalloc_inodes);
}
- spin_unlock_irqrestore(&root->fs_info->delalloc_lock, flags);
+ spin_unlock(&root->fs_info->delalloc_lock);
}
return 0;
}
+/*
+ * extent_io.c merge_bio_hook, this must check the chunk tree to make sure
+ * we don't create bios that span stripes or chunks
+ */
int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
- size_t size, struct bio *bio)
+ size_t size, struct bio *bio,
+ unsigned long bio_flags)
{
struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
struct btrfs_mapping_tree *map_tree;
- u64 logical = bio->bi_sector << 9;
+ u64 logical = (u64)bio->bi_sector << 9;
u64 length = 0;
u64 map_length;
int ret;
+ if (bio_flags & EXTENT_BIO_COMPRESSED)
+ return 0;
+
length = bio->bi_size;
map_tree = &root->fs_info->mapping_tree;
map_length = length;
ret = btrfs_map_block(map_tree, READ, logical,
&map_length, NULL, 0);
- if (map_length < length + size) {
+ if (map_length < length + size)
return 1;
- }
return 0;
}
-int __btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
- int mirror_num)
+/*
+ * in order to insert checksums into the metadata in large chunks,
+ * we wait until bio submission time. All the pages in the bio are
+ * checksummed and sums are attached onto the ordered extent record.
+ *
+ * At IO completion time the cums attached on the ordered extent record
+ * are inserted into the btree
+ */
+static int __btrfs_submit_bio_start(struct inode *inode, int rw,
+ struct bio *bio, int mirror_num,
+ unsigned long bio_flags)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
int ret = 0;
- ret = btrfs_csum_one_bio(root, inode, bio);
+ ret = btrfs_csum_one_bio(root, inode, bio, 0, 0);
BUG_ON(ret);
+ return 0;
+}
+/*
+ * in order to insert checksums into the metadata in large chunks,
+ * we wait until bio submission time. All the pages in the bio are
+ * checksummed and sums are attached onto the ordered extent record.
+ *
+ * At IO completion time the cums attached on the ordered extent record
+ * are inserted into the btree
+ */
+static int __btrfs_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
+ int mirror_num, unsigned long bio_flags)
+{
+ struct btrfs_root *root = BTRFS_I(inode)->root;
return btrfs_map_bio(root, rw, bio, mirror_num, 1);
}
-int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
- int mirror_num)
+/*
+ * extent_io.c submission hook. This does the right thing for csum calculation
+ * on write, or reading the csums from the tree before a read
+ */
+static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
+ int mirror_num, unsigned long bio_flags)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
int ret = 0;
+ int skip_sum;
+
+ skip_sum = btrfs_test_flag(inode, NODATASUM);
ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
BUG_ON(ret);
- if (btrfs_test_opt(root, NODATASUM) ||
- btrfs_test_flag(inode, NODATASUM)) {
- goto mapit;
- }
-
if (!(rw & (1 << BIO_RW))) {
- btrfs_lookup_bio_sums(root, inode, bio);
+ if (bio_flags & EXTENT_BIO_COMPRESSED) {
+ return btrfs_submit_compressed_read(inode, bio,
+ mirror_num, bio_flags);
+ } else if (!skip_sum)
+ btrfs_lookup_bio_sums(root, inode, bio, NULL);
goto mapit;
- }
- return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
+ } else if (!skip_sum) {
+ /* csum items have already been cloned */
+ if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
+ goto mapit;
+ /* we're doing a write, do the async checksumming */
+ return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
inode, rw, bio, mirror_num,
- __btrfs_submit_bio_hook);
+ bio_flags, __btrfs_submit_bio_start,
+ __btrfs_submit_bio_done);
+ }
+
mapit:
return btrfs_map_bio(root, rw, bio, mirror_num, 0);
}
+/*
+ * given a list of ordered sums record them in the inode. This happens
+ * at IO completion time based on sums calculated at bio submission time.
+ */
static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
struct inode *inode, u64 file_offset,
struct list_head *list)
{
- struct list_head *cur;
struct btrfs_ordered_sum *sum;
btrfs_set_trans_block_group(trans, inode);
- list_for_each(cur, list) {
- sum = list_entry(cur, struct btrfs_ordered_sum, list);
- btrfs_csum_file_blocks(trans, BTRFS_I(inode)->root,
- inode, sum);
+
+ list_for_each_entry(sum, list, list) {
+ btrfs_csum_file_blocks(trans,
+ BTRFS_I(inode)->root->fs_info->csum_root, sum);
}
return 0;
}
int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end)
{
+ if ((end & (PAGE_CACHE_SIZE - 1)) == 0)
+ WARN_ON(1);
return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end,
GFP_NOFS);
}
+/* see btrfs_writepage_start_hook for details on why this is required */
struct btrfs_writepage_fixup {
struct page *page;
struct btrfs_work work;
};
-/* see btrfs_writepage_start_hook for details on why this is required */
-void btrfs_writepage_fixup_worker(struct btrfs_work *work)
+static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
{
struct btrfs_writepage_fixup *fixup;
struct btrfs_ordered_extent *ordered;
* good idea. This causes problems because we want to make sure COW
* properly happens and the data=ordered rules are followed.
*
- * In our case any range that doesn't have the EXTENT_ORDERED bit set
+ * In our case any range that doesn't have the ORDERED bit set
* hasn't been properly setup for IO. We kick off an async process
* to fix it up. The async helper will wait for ordered extents, set
* the delalloc bit and make it safe to write the page.
*/
-int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end)
+static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end)
{
struct inode *inode = page->mapping->host;
struct btrfs_writepage_fixup *fixup;
return -EAGAIN;
}
+static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
+ struct inode *inode, u64 file_pos,
+ u64 disk_bytenr, u64 disk_num_bytes,
+ u64 num_bytes, u64 ram_bytes,
+ u8 compression, u8 encryption,
+ u16 other_encoding, int extent_type)
+{
+ struct btrfs_root *root = BTRFS_I(inode)->root;
+ struct btrfs_file_extent_item *fi;
+ struct btrfs_path *path;
+ struct extent_buffer *leaf;
+ struct btrfs_key ins;
+ u64 hint;
+ int ret;
+
+ path = btrfs_alloc_path();
+ BUG_ON(!path);
+
+ ret = btrfs_drop_extents(trans, root, inode, file_pos,
+ file_pos + num_bytes, file_pos, &hint);
+ BUG_ON(ret);
+
+ ins.objectid = inode->i_ino;
+ ins.offset = file_pos;
+ ins.type = BTRFS_EXTENT_DATA_KEY;
+ ret = btrfs_insert_empty_item(trans, root, path, &ins, sizeof(*fi));
+ BUG_ON(ret);
+ leaf = path->nodes[0];
+ fi = btrfs_item_ptr(leaf, path->slots[0],
+ struct btrfs_file_extent_item);
+ btrfs_set_file_extent_generation(leaf, fi, trans->transid);
+ btrfs_set_file_extent_type(leaf, fi, extent_type);
+ btrfs_set_file_extent_disk_bytenr(leaf, fi, disk_bytenr);
+ btrfs_set_file_extent_disk_num_bytes(leaf, fi, disk_num_bytes);
+ btrfs_set_file_extent_offset(leaf, fi, 0);
+ btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
+ btrfs_set_file_extent_ram_bytes(leaf, fi, ram_bytes);
+ btrfs_set_file_extent_compression(leaf, fi, compression);
+ btrfs_set_file_extent_encryption(leaf, fi, encryption);
+ btrfs_set_file_extent_other_encoding(leaf, fi, other_encoding);
+ btrfs_mark_buffer_dirty(leaf);
+
+ inode_add_bytes(inode, num_bytes);
+ btrfs_drop_extent_cache(inode, file_pos, file_pos + num_bytes - 1, 0);
+
+ ins.objectid = disk_bytenr;
+ ins.offset = disk_num_bytes;
+ ins.type = BTRFS_EXTENT_ITEM_KEY;
+ ret = btrfs_alloc_reserved_extent(trans, root, leaf->start,
+ root->root_key.objectid,
+ trans->transid, inode->i_ino, &ins);
+ BUG_ON(ret);
+
+ btrfs_free_path(path);
+ return 0;
+}
+
+/* as ordered data IO finishes, this gets called so we can finish
+ * an ordered extent if the range of bytes in the file it covers are
+ * fully written.
+ */
static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_trans_handle *trans;
struct btrfs_ordered_extent *ordered_extent;
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
- u64 alloc_hint = 0;
- struct list_head list;
- struct btrfs_key ins;
+ int compressed = 0;
int ret;
ret = btrfs_dec_test_ordered_pending(inode, start, end - start + 1);
ordered_extent->file_offset + ordered_extent->len - 1,
GFP_NOFS);
- INIT_LIST_HEAD(&list);
-
- ins.objectid = ordered_extent->start;
- ins.offset = ordered_extent->len;
- ins.type = BTRFS_EXTENT_ITEM_KEY;
-
- ret = btrfs_alloc_reserved_extent(trans, root, root->root_key.objectid,
- trans->transid, inode->i_ino,
- ordered_extent->file_offset, &ins);
- BUG_ON(ret);
-
- mutex_lock(&BTRFS_I(inode)->extent_mutex);
-
- ret = btrfs_drop_extents(trans, root, inode,
- ordered_extent->file_offset,
- ordered_extent->file_offset +
- ordered_extent->len,
- ordered_extent->file_offset, &alloc_hint);
- BUG_ON(ret);
- ret = btrfs_insert_file_extent(trans, root, inode->i_ino,
- ordered_extent->file_offset,
- ordered_extent->start,
- ordered_extent->len,
- ordered_extent->len, 0);
- BUG_ON(ret);
-
- btrfs_drop_extent_cache(inode, ordered_extent->file_offset,
- ordered_extent->file_offset +
- ordered_extent->len - 1);
- mutex_unlock(&BTRFS_I(inode)->extent_mutex);
-
- inode->i_blocks += ordered_extent->len >> 9;
+ if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
+ compressed = 1;
+ if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
+ BUG_ON(compressed);
+ ret = btrfs_mark_extent_written(trans, root, inode,
+ ordered_extent->file_offset,
+ ordered_extent->file_offset +
+ ordered_extent->len);
+ BUG_ON(ret);
+ } else {
+ ret = insert_reserved_file_extent(trans, inode,
+ ordered_extent->file_offset,
+ ordered_extent->start,
+ ordered_extent->disk_len,
+ ordered_extent->len,
+ ordered_extent->len,
+ compressed, 0, 0,
+ BTRFS_FILE_EXTENT_REG);
+ BUG_ON(ret);
+ }
unlock_extent(io_tree, ordered_extent->file_offset,
ordered_extent->file_offset + ordered_extent->len - 1,
GFP_NOFS);
add_pending_csums(trans, inode, ordered_extent->file_offset,
&ordered_extent->list);
+ mutex_lock(&BTRFS_I(inode)->extent_mutex);
btrfs_ordered_update_i_size(inode, ordered_extent);
btrfs_update_inode(trans, root, inode);
btrfs_remove_ordered_extent(inode, ordered_extent);
+ mutex_unlock(&BTRFS_I(inode)->extent_mutex);
/* once for us */
btrfs_put_ordered_extent(ordered_extent);
return 0;
}
-int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
+static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
struct extent_state *state, int uptodate)
{
return btrfs_finish_ordered_io(page->mapping->host, start, end);
}
+/*
+ * When IO fails, either with EIO or csum verification fails, we
+ * try other mirrors that might have a good copy of the data. This
+ * io_failure_record is used to record state as we go through all the
+ * mirrors. If another mirror has good data, the page is set up to date
+ * and things continue. If a good mirror can't be found, the original
+ * bio end_io callback is called to indicate things have failed.
+ */
struct io_failure_record {
struct page *page;
u64 start;
u64 len;
u64 logical;
+ unsigned long bio_flags;
int last_mirror;
};
-int btrfs_io_failed_hook(struct bio *failed_bio,
+static int btrfs_io_failed_hook(struct bio *failed_bio,
struct page *page, u64 start, u64 end,
struct extent_state *state)
{
failrec->start = start;
failrec->len = end - start + 1;
failrec->last_mirror = 0;
+ failrec->bio_flags = 0;
spin_lock(&em_tree->lock);
em = lookup_extent_mapping(em_tree, start, failrec->len);
}
logical = start - em->start;
logical = em->block_start + logical;
+ if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
+ logical = em->block_start;
+ failrec->bio_flags = EXTENT_BIO_COMPRESSED;
+ }
failrec->logical = logical;
free_extent_map(em);
set_extent_bits(failure_tree, start, end, EXTENT_LOCKED |
failrec->logical, failrec->len);
failrec->last_mirror++;
if (!state) {
- spin_lock_irq(&BTRFS_I(inode)->io_tree.lock);
+ spin_lock(&BTRFS_I(inode)->io_tree.lock);
state = find_first_extent_bit_state(&BTRFS_I(inode)->io_tree,
failrec->start,
EXTENT_LOCKED);
if (state && state->start != failrec->start)
state = NULL;
- spin_unlock_irq(&BTRFS_I(inode)->io_tree.lock);
+ spin_unlock(&BTRFS_I(inode)->io_tree.lock);
}
if (!state || failrec->last_mirror > num_copies) {
set_state_private(failure_tree, failrec->start, 0);
bio->bi_sector = failrec->logical >> 9;
bio->bi_bdev = failed_bio->bi_bdev;
bio->bi_size = 0;
+
bio_add_page(bio, page, failrec->len, start - page_offset(page));
if (failed_bio->bi_rw & (1 << BIO_RW))
rw = WRITE;
rw = READ;
BTRFS_I(inode)->io_tree.ops->submit_bio_hook(inode, rw, bio,
- failrec->last_mirror);
+ failrec->last_mirror,
+ failrec->bio_flags);
return 0;
}
-int btrfs_clean_io_failures(struct inode *inode, u64 start)
+/*
+ * each time an IO finishes, we do a fast check in the IO failure tree
+ * to see if we need to process or clean up an io_failure_record
+ */
+static int btrfs_clean_io_failures(struct inode *inode, u64 start)
{
u64 private;
u64 private_failure;
return 0;
}
-int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end,
+/*
+ * when reads are done, we need to check csums to verify the data is correct
+ * if there's a match, we allow the bio to finish. If not, we go through
+ * the io_failure_record routines to find good copies
+ */
+static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end,
struct extent_state *state)
{
size_t offset = start - ((u64)page->index << PAGE_CACHE_SHIFT);
int ret;
struct btrfs_root *root = BTRFS_I(inode)->root;
u32 csum = ~(u32)0;
- unsigned long flags;
- if (btrfs_test_opt(root, NODATASUM) ||
- btrfs_test_flag(inode, NODATASUM))
+ if (PageChecked(page)) {
+ ClearPageChecked(page);
+ goto good;
+ }
+ if (btrfs_test_flag(inode, NODATASUM))
+ return 0;
+
+ if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID &&
+ test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1)) {
+ clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM,
+ GFP_NOFS);
return 0;
+ }
+
if (state && state->start == start) {
private = state->private;
ret = 0;
} else {
ret = get_state_private(io_tree, start, &private);
}
- local_irq_save(flags);
- kaddr = kmap_atomic(page, KM_IRQ0);
- if (ret) {
+ kaddr = kmap_atomic(page, KM_USER0);
+ if (ret)
goto zeroit;
- }
+
csum = btrfs_csum_data(root, kaddr + offset, csum, end - start + 1);
btrfs_csum_final(csum, (char *)&csum);
- if (csum != private) {
+ if (csum != private)
goto zeroit;
- }
- kunmap_atomic(kaddr, KM_IRQ0);
- local_irq_restore(flags);
+ kunmap_atomic(kaddr, KM_USER0);
+good:
/* if the io failure tree for this inode is non-empty,
* check to see if we've recovered from a failed IO
*/
return 0;
zeroit:
- printk("btrfs csum failed ino %lu off %llu csum %u private %Lu\n",
- page->mapping->host->i_ino, (unsigned long long)start, csum,
- private);
+ printk(KERN_INFO "btrfs csum failed ino %lu off %llu csum %u "
+ "private %llu\n", page->mapping->host->i_ino,
+ (unsigned long long)start, csum,
+ (unsigned long long)private);
memset(kaddr + offset, 1, end - start + 1);
flush_dcache_page(page);
- kunmap_atomic(kaddr, KM_IRQ0);
- local_irq_restore(flags);
+ kunmap_atomic(kaddr, KM_USER0);
if (private == 0)
return 0;
return -EIO;
struct inode *inode;
int ret = 0, nr_unlink = 0, nr_truncate = 0;
- /* don't do orphan cleanup if the fs is readonly. */
- if (root->inode->i_sb->s_flags & MS_RDONLY)
- return;
-
path = btrfs_alloc_path();
if (!path)
return;
btrfs_set_key_type(&key, BTRFS_ORPHAN_ITEM_KEY);
key.offset = (u64)-1;
- trans = btrfs_start_transaction(root, 1);
- btrfs_set_trans_block_group(trans, root->inode);
while (1) {
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
* crossing root thing. we store the inode number in the
* offset of the orphan item.
*/
- inode = btrfs_iget_locked(root->inode->i_sb,
+ inode = btrfs_iget_locked(root->fs_info->sb,
found_key.offset, root);
if (!inode)
break;
* do a destroy_inode
*/
if (is_bad_inode(inode)) {
+ trans = btrfs_start_transaction(root, 1);
btrfs_orphan_del(trans, inode);
+ btrfs_end_transaction(trans, root);
iput(inode);
continue;
}
printk(KERN_INFO "btrfs: truncated %d orphans\n", nr_truncate);
btrfs_free_path(path);
- btrfs_end_transaction(trans, root);
}
+/*
+ * read an inode from the btree into the in-memory inode
+ */
void btrfs_read_locked_inode(struct inode *inode)
{
struct btrfs_path *path;
inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, tspec);
inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
- inode->i_blocks = btrfs_inode_nblocks(leaf, inode_item);
+ inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item));
BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item);
+ BTRFS_I(inode)->sequence = btrfs_inode_sequence(leaf, inode_item);
inode->i_generation = BTRFS_I(inode)->generation;
inode->i_rdev = 0;
rdev = btrfs_inode_rdev(leaf, inode_item);
BTRFS_I(inode)->index_cnt = (u64)-1;
+ BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
alloc_group_block = btrfs_inode_block_group(leaf, inode_item);
- BTRFS_I(inode)->block_group = btrfs_lookup_block_group(root->fs_info,
- alloc_group_block);
- BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
- if (!BTRFS_I(inode)->block_group) {
- BTRFS_I(inode)->block_group = btrfs_find_block_group(root,
- NULL, 0,
- BTRFS_BLOCK_GROUP_METADATA, 0);
- }
+ BTRFS_I(inode)->block_group = btrfs_find_block_group(root, 0,
+ alloc_group_block, 0);
btrfs_free_path(path);
inode_item = NULL;
make_bad_inode(inode);
}
+/*
+ * given a leaf and an inode, copy the inode fields into the leaf
+ */
static void fill_inode_item(struct btrfs_trans_handle *trans,
struct extent_buffer *leaf,
struct btrfs_inode_item *item,
btrfs_set_timespec_nsec(leaf, btrfs_inode_ctime(item),
inode->i_ctime.tv_nsec);
- btrfs_set_inode_nblocks(leaf, item, inode->i_blocks);
+ btrfs_set_inode_nbytes(leaf, item, inode_get_bytes(inode));
btrfs_set_inode_generation(leaf, item, BTRFS_I(inode)->generation);
+ btrfs_set_inode_sequence(leaf, item, BTRFS_I(inode)->sequence);
btrfs_set_inode_transid(leaf, item, trans->transid);
btrfs_set_inode_rdev(leaf, item, inode->i_rdev);
btrfs_set_inode_flags(leaf, item, BTRFS_I(inode)->flags);
- btrfs_set_inode_block_group(leaf, item,
- BTRFS_I(inode)->block_group->key.objectid);
+ btrfs_set_inode_block_group(leaf, item, BTRFS_I(inode)->block_group);
}
-int noinline btrfs_update_inode(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct inode *inode)
+/*
+ * copy everything in the in-memory inode into the btree.
+ */
+noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root, struct inode *inode)
{
struct btrfs_inode_item *inode_item;
struct btrfs_path *path;
}
+/*
+ * unlink helper that gets used here in inode.c and in the tree logging
+ * recovery code. It remove a link in a directory with a given name, and
+ * also drops the back refs in the inode to the directory
+ */
int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct inode *dir, struct inode *inode,
inode->i_ino,
dir->i_ino, &index);
if (ret) {
- printk("failed to delete reference to %.*s, "
+ printk(KERN_INFO "btrfs failed to delete reference to %.*s, "
"inode %lu parent %lu\n", name_len, name,
inode->i_ino, dir->i_ino);
goto err;
ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len,
inode, dir->i_ino);
- BUG_ON(ret);
+ BUG_ON(ret != 0 && ret != -ENOENT);
+ if (ret != -ENOENT)
+ BTRFS_I(dir)->log_dirty_trans = trans->transid;
ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len,
dir, index);
struct btrfs_trans_handle *trans;
unsigned long nr = 0;
- if (inode->i_size > BTRFS_EMPTY_DIR_SIZE) {
+ /*
+ * the FIRST_FREE_OBJECTID check makes sure we don't try to rmdir
+ * the root of a subvolume or snapshot
+ */
+ if (inode->i_size > BTRFS_EMPTY_DIR_SIZE ||
+ inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) {
return -ENOTEMPTY;
}
/* now the directory is empty */
err = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
dentry->d_name.name, dentry->d_name.len);
- if (!err) {
+ if (!err)
btrfs_i_size_write(inode, 0);
- }
fail_trans:
nr = trans->blocks_used;
return err;
}
+#if 0
+/*
+ * when truncating bytes in a file, it is possible to avoid reading
+ * the leaves that contain only checksum items. This can be the
+ * majority of the IO required to delete a large file, but it must
+ * be done carefully.
+ *
+ * The keys in the level just above the leaves are checked to make sure
+ * the lowest key in a given leaf is a csum key, and starts at an offset
+ * after the new size.
+ *
+ * Then the key for the next leaf is checked to make sure it also has
+ * a checksum item for the same file. If it does, we know our target leaf
+ * contains only checksum items, and it can be safely freed without reading
+ * it.
+ *
+ * This is just an optimization targeted at large files. It may do
+ * nothing. It will return 0 unless things went badly.
+ */
+static noinline int drop_csum_leaves(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct btrfs_path *path,
+ struct inode *inode, u64 new_size)
+{
+ struct btrfs_key key;
+ int ret;
+ int nritems;
+ struct btrfs_key found_key;
+ struct btrfs_key other_key;
+ struct btrfs_leaf_ref *ref;
+ u64 leaf_gen;
+ u64 leaf_start;
+
+ path->lowest_level = 1;
+ key.objectid = inode->i_ino;
+ key.type = BTRFS_CSUM_ITEM_KEY;
+ key.offset = new_size;
+again:
+ ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
+ if (ret < 0)
+ goto out;
+
+ if (path->nodes[1] == NULL) {
+ ret = 0;
+ goto out;
+ }
+ ret = 0;
+ btrfs_node_key_to_cpu(path->nodes[1], &found_key, path->slots[1]);
+ nritems = btrfs_header_nritems(path->nodes[1]);
+
+ if (!nritems)
+ goto out;
+
+ if (path->slots[1] >= nritems)
+ goto next_node;
+
+ /* did we find a key greater than anything we want to delete? */
+ if (found_key.objectid > inode->i_ino ||
+ (found_key.objectid == inode->i_ino && found_key.type > key.type))
+ goto out;
+
+ /* we check the next key in the node to make sure the leave contains
+ * only checksum items. This comparison doesn't work if our
+ * leaf is the last one in the node
+ */
+ if (path->slots[1] + 1 >= nritems) {
+next_node:
+ /* search forward from the last key in the node, this
+ * will bring us into the next node in the tree
+ */
+ btrfs_node_key_to_cpu(path->nodes[1], &found_key, nritems - 1);
+
+ /* unlikely, but we inc below, so check to be safe */
+ if (found_key.offset == (u64)-1)
+ goto out;
+
+ /* search_forward needs a path with locks held, do the
+ * search again for the original key. It is possible
+ * this will race with a balance and return a path that
+ * we could modify, but this drop is just an optimization
+ * and is allowed to miss some leaves.
+ */
+ btrfs_release_path(root, path);
+ found_key.offset++;
+
+ /* setup a max key for search_forward */
+ other_key.offset = (u64)-1;
+ other_key.type = key.type;
+ other_key.objectid = key.objectid;
+
+ path->keep_locks = 1;
+ ret = btrfs_search_forward(root, &found_key, &other_key,
+ path, 0, 0);
+ path->keep_locks = 0;
+ if (ret || found_key.objectid != key.objectid ||
+ found_key.type != key.type) {
+ ret = 0;
+ goto out;
+ }
+
+ key.offset = found_key.offset;
+ btrfs_release_path(root, path);
+ cond_resched();
+ goto again;
+ }
+
+ /* we know there's one more slot after us in the tree,
+ * read that key so we can verify it is also a checksum item
+ */
+ btrfs_node_key_to_cpu(path->nodes[1], &other_key, path->slots[1] + 1);
+
+ if (found_key.objectid < inode->i_ino)
+ goto next_key;
+
+ if (found_key.type != key.type || found_key.offset < new_size)
+ goto next_key;
+
+ /*
+ * if the key for the next leaf isn't a csum key from this objectid,
+ * we can't be sure there aren't good items inside this leaf.
+ * Bail out
+ */
+ if (other_key.objectid != inode->i_ino || other_key.type != key.type)
+ goto out;
+
+ leaf_start = btrfs_node_blockptr(path->nodes[1], path->slots[1]);
+ leaf_gen = btrfs_node_ptr_generation(path->nodes[1], path->slots[1]);
+ /*
+ * it is safe to delete this leaf, it contains only
+ * csum items from this inode at an offset >= new_size
+ */
+ ret = btrfs_del_leaf(trans, root, path, leaf_start);
+ BUG_ON(ret);
+
+ if (root->ref_cows && leaf_gen < trans->transid) {
+ ref = btrfs_alloc_leaf_ref(root, 0);
+ if (ref) {
+ ref->root_gen = root->root_key.offset;
+ ref->bytenr = leaf_start;
+ ref->owner = 0;
+ ref->generation = leaf_gen;
+ ref->nritems = 0;
+
+ ret = btrfs_add_leaf_ref(root, ref, 0);
+ WARN_ON(ret);
+ btrfs_free_leaf_ref(root, ref);
+ } else {
+ WARN_ON(1);
+ }
+ }
+next_key:
+ btrfs_release_path(root, path);
+
+ if (other_key.objectid == inode->i_ino &&
+ other_key.type == key.type && other_key.offset > key.offset) {
+ key.offset = other_key.offset;
+ cond_resched();
+ goto again;
+ }
+ ret = 0;
+out:
+ /* fixup any changes we've made to the path */
+ path->lowest_level = 0;
+ path->keep_locks = 0;
+ btrfs_release_path(root, path);
+ return ret;
+}
+
+#endif
+
/*
* this can truncate away extent items, csum items and directory items.
* It starts at a high offset and removes keys until it can't find
- * any higher than i_size.
+ * any higher than new_size
*
* csum items that cross the new i_size are truncated to the new size
* as well.
int pending_del_nr = 0;
int pending_del_slot = 0;
int extent_type = -1;
+ int encoding;
u64 mask = root->sectorsize - 1;
if (root->ref_cows)
- btrfs_drop_extent_cache(inode,
- new_size & (~mask), (u64)-1);
+ btrfs_drop_extent_cache(inode, new_size & (~mask), (u64)-1, 0);
path = btrfs_alloc_path();
path->reada = -1;
BUG_ON(!path);
key.type = (u8)-1;
btrfs_init_path(path);
+
search_again:
ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
- if (ret < 0) {
+ if (ret < 0)
goto error;
- }
+
if (ret > 0) {
/* there are no items in the tree for us to truncate, we're
* done
path->slots[0]--;
}
- while(1) {
+ while (1) {
fi = NULL;
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
found_type = btrfs_key_type(&found_key);
+ encoding = 0;
if (found_key.objectid != inode->i_ino)
break;
fi = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item);
extent_type = btrfs_file_extent_type(leaf, fi);
+ encoding = btrfs_file_extent_compression(leaf, fi);
+ encoding |= btrfs_file_extent_encryption(leaf, fi);
+ encoding |= btrfs_file_extent_other_encoding(leaf, fi);
+
if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
item_end +=
btrfs_file_extent_num_bytes(leaf, fi);
} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
- struct btrfs_item *item = btrfs_item_nr(leaf,
- path->slots[0]);
item_end += btrfs_file_extent_inline_len(leaf,
- item);
+ fi);
}
item_end--;
}
- if (found_type == BTRFS_CSUM_ITEM_KEY) {
- ret = btrfs_csum_truncate(trans, root, path,
- new_size);
- BUG_ON(ret);
- }
if (item_end < new_size) {
- if (found_type == BTRFS_DIR_ITEM_KEY) {
+ if (found_type == BTRFS_DIR_ITEM_KEY)
found_type = BTRFS_INODE_ITEM_KEY;
- } else if (found_type == BTRFS_EXTENT_ITEM_KEY) {
- found_type = BTRFS_CSUM_ITEM_KEY;
- } else if (found_type == BTRFS_EXTENT_DATA_KEY) {
+ else if (found_type == BTRFS_EXTENT_ITEM_KEY)
+ found_type = BTRFS_EXTENT_DATA_KEY;
+ else if (found_type == BTRFS_EXTENT_DATA_KEY)
found_type = BTRFS_XATTR_ITEM_KEY;
- } else if (found_type == BTRFS_XATTR_ITEM_KEY) {
+ else if (found_type == BTRFS_XATTR_ITEM_KEY)
found_type = BTRFS_INODE_REF_KEY;
- } else if (found_type) {
+ else if (found_type)
found_type--;
- } else {
+ else
break;
- }
btrfs_set_key_type(&key, found_type);
goto next;
}
if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
u64 num_dec;
extent_start = btrfs_file_extent_disk_bytenr(leaf, fi);
- if (!del_item) {
+ if (!del_item && !encoding) {
u64 orig_num_bytes =
btrfs_file_extent_num_bytes(leaf, fi);
extent_num_bytes = new_size -
num_dec = (orig_num_bytes -
extent_num_bytes);
if (root->ref_cows && extent_start != 0)
- dec_i_blocks(inode, num_dec);
+ inode_sub_bytes(inode, num_dec);
btrfs_mark_buffer_dirty(leaf);
} else {
extent_num_bytes =
if (extent_start != 0) {
found_extent = 1;
if (root->ref_cows)
- dec_i_blocks(inode, num_dec);
- }
- if (root->ref_cows) {
- root_gen =
- btrfs_header_generation(leaf);
+ inode_sub_bytes(inode, num_dec);
}
+ root_gen = btrfs_header_generation(leaf);
root_owner = btrfs_header_owner(leaf);
}
} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
- if (!del_item) {
+ /*
+ * we can't truncate inline items that have had
+ * special encodings
+ */
+ if (!del_item &&
+ btrfs_file_extent_compression(leaf, fi) == 0 &&
+ btrfs_file_extent_encryption(leaf, fi) == 0 &&
+ btrfs_file_extent_other_encoding(leaf, fi) == 0) {
u32 size = new_size - found_key.offset;
if (root->ref_cows) {
- dec_i_blocks(inode, item_end + 1 -
- found_key.offset - size);
+ inode_sub_bytes(inode, item_end + 1 -
+ new_size);
}
size =
btrfs_file_extent_calc_inline_size(size);
size, 1);
BUG_ON(ret);
} else if (root->ref_cows) {
- dec_i_blocks(inode, item_end + 1 -
- found_key.offset);
+ inode_sub_bytes(inode, item_end + 1 -
+ found_key.offset);
}
}
delete:
pending_del_nr++;
pending_del_slot = path->slots[0];
} else {
- printk("bad pending slot %d pending_del_nr %d pending_del_slot %d\n", path->slots[0], pending_del_nr, pending_del_slot);
+ BUG();
}
} else {
break;
if (found_extent) {
ret = btrfs_free_extent(trans, root, extent_start,
extent_num_bytes,
- root_owner,
- root_gen, inode->i_ino,
- found_key.offset, 0);
+ leaf->start, root_owner,
+ root_gen, inode->i_ino, 0);
BUG_ON(ret);
}
next:
return ret;
}
-static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
+int btrfs_cont_expand(struct inode *inode, loff_t size)
{
- struct inode *inode = dentry->d_inode;
+ struct btrfs_trans_handle *trans;
+ struct btrfs_root *root = BTRFS_I(inode)->root;
+ struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
+ struct extent_map *em;
+ u64 mask = root->sectorsize - 1;
+ u64 hole_start = (inode->i_size + mask) & ~mask;
+ u64 block_end = (size + mask) & ~mask;
+ u64 last_byte;
+ u64 cur_offset;
+ u64 hole_size;
int err;
- err = inode_change_ok(inode, attr);
+ if (size <= hole_start)
+ return 0;
+
+ err = btrfs_check_free_space(root, 1, 0);
if (err)
return err;
- if (S_ISREG(inode->i_mode) &&
- attr->ia_valid & ATTR_SIZE && attr->ia_size > inode->i_size) {
- struct btrfs_trans_handle *trans;
- struct btrfs_root *root = BTRFS_I(inode)->root;
- struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
-
- u64 mask = root->sectorsize - 1;
- u64 hole_start = (inode->i_size + mask) & ~mask;
- u64 block_end = (attr->ia_size + mask) & ~mask;
- u64 hole_size;
- u64 alloc_hint = 0;
-
- if (attr->ia_size <= hole_start)
- goto out;
-
- err = btrfs_check_free_space(root, 1, 0);
- if (err)
- goto fail;
+ btrfs_truncate_page(inode->i_mapping, inode->i_size);
- btrfs_truncate_page(inode->i_mapping, inode->i_size);
+ while (1) {
+ struct btrfs_ordered_extent *ordered;
+ btrfs_wait_ordered_range(inode, hole_start,
+ block_end - hole_start);
+ lock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS);
+ ordered = btrfs_lookup_ordered_extent(inode, hole_start);
+ if (!ordered)
+ break;
+ unlock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS);
+ btrfs_put_ordered_extent(ordered);
+ }
- hole_size = block_end - hole_start;
- while(1) {
- struct btrfs_ordered_extent *ordered;
- btrfs_wait_ordered_range(inode, hole_start, hole_size);
+ trans = btrfs_start_transaction(root, 1);
+ btrfs_set_trans_block_group(trans, inode);
- lock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS);
- ordered = btrfs_lookup_ordered_extent(inode, hole_start);
- if (ordered) {
- unlock_extent(io_tree, hole_start,
- block_end - 1, GFP_NOFS);
- btrfs_put_ordered_extent(ordered);
- } else {
+ cur_offset = hole_start;
+ while (1) {
+ em = btrfs_get_extent(inode, NULL, 0, cur_offset,
+ block_end - cur_offset, 0);
+ BUG_ON(IS_ERR(em) || !em);
+ last_byte = min(extent_map_end(em), block_end);
+ last_byte = (last_byte + mask) & ~mask;
+ if (test_bit(EXTENT_FLAG_VACANCY, &em->flags)) {
+ u64 hint_byte = 0;
+ hole_size = last_byte - cur_offset;
+ err = btrfs_drop_extents(trans, root, inode,
+ cur_offset,
+ cur_offset + hole_size,
+ cur_offset, &hint_byte);
+ if (err)
break;
- }
- }
-
- trans = btrfs_start_transaction(root, 1);
- btrfs_set_trans_block_group(trans, inode);
- mutex_lock(&BTRFS_I(inode)->extent_mutex);
- err = btrfs_drop_extents(trans, root, inode,
- hole_start, block_end, hole_start,
- &alloc_hint);
-
- if (alloc_hint != EXTENT_MAP_INLINE) {
err = btrfs_insert_file_extent(trans, root,
- inode->i_ino,
- hole_start, 0, 0,
- hole_size, 0);
+ inode->i_ino, cur_offset, 0,
+ 0, hole_size, 0, hole_size,
+ 0, 0, 0);
btrfs_drop_extent_cache(inode, hole_start,
- (u64)-1);
- btrfs_check_file(root, inode);
+ last_byte - 1, 0);
}
- mutex_unlock(&BTRFS_I(inode)->extent_mutex);
- btrfs_end_transaction(trans, root);
- unlock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS);
+ free_extent_map(em);
+ cur_offset = last_byte;
+ if (err || cur_offset >= block_end)
+ break;
+ }
+
+ btrfs_end_transaction(trans, root);
+ unlock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS);
+ return err;
+}
+
+static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
+{
+ struct inode *inode = dentry->d_inode;
+ int err;
+
+ err = inode_change_ok(inode, attr);
+ if (err)
+ return err;
+
+ if (S_ISREG(inode->i_mode) &&
+ attr->ia_valid & ATTR_SIZE && attr->ia_size > inode->i_size) {
+ err = btrfs_cont_expand(inode, attr->ia_size);
if (err)
return err;
}
-out:
+
err = inode_setattr(inode, attr);
if (!err && ((attr->ia_valid & ATTR_MODE)))
err = btrfs_acl_chmod(inode);
-fail:
return err;
}
btrfs_wait_ordered_range(inode, 0, (u64)-1);
btrfs_i_size_write(inode, 0);
- trans = btrfs_start_transaction(root, 1);
+ trans = btrfs_join_transaction(root, 1);
btrfs_set_trans_block_group(trans, inode);
ret = btrfs_truncate_inode_items(trans, root, inode, inode->i_size, 0);
namelen, 0);
if (IS_ERR(di))
ret = PTR_ERR(di);
- if (!di || IS_ERR(di)) {
+
+ if (!di || IS_ERR(di))
goto out_err;
- }
+
btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
out:
btrfs_free_path(path);
bi->i_default_acl = NULL;
bi->generation = 0;
+ bi->sequence = 0;
bi->last_trans = 0;
bi->logged_trans = 0;
bi->delalloc_bytes = 0;
bi->disk_i_size = 0;
bi->flags = 0;
bi->index_cnt = (u64)-1;
+ bi->log_dirty_trans = 0;
extent_map_tree_init(&BTRFS_I(inode)->extent_tree, GFP_NOFS);
extent_io_tree_init(&BTRFS_I(inode)->io_tree,
inode->i_mapping, GFP_NOFS);
inode->i_mapping, GFP_NOFS);
INIT_LIST_HEAD(&BTRFS_I(inode)->delalloc_inodes);
btrfs_ordered_inode_tree_init(&BTRFS_I(inode)->ordered_tree);
- mutex_init(&BTRFS_I(inode)->csum_mutex);
mutex_init(&BTRFS_I(inode)->extent_mutex);
mutex_init(&BTRFS_I(inode)->log_mutex);
}
static int btrfs_find_actor(struct inode *inode, void *opaque)
{
- struct btrfs_iget_args *args = opaque;
- return (args->ino == inode->i_ino &&
- args->root == BTRFS_I(inode)->root);
+ struct btrfs_iget_args *args = opaque;
+ return args->ino == inode->i_ino &&
+ args->root == BTRFS_I(inode)->root;
+}
+
+struct inode *btrfs_ilookup(struct super_block *s, u64 objectid,
+ struct btrfs_root *root, int wait)
+{
+ struct inode *inode;
+ struct btrfs_iget_args args;
+ args.ino = objectid;
+ args.root = root;
+
+ if (wait) {
+ inode = ilookup5(s, objectid, btrfs_find_actor,
+ (void *)&args);
+ } else {
+ inode = ilookup5_nowait(s, objectid, btrfs_find_actor,
+ (void *)&args);
+ }
+ return inode;
}
struct inode *btrfs_iget_locked(struct super_block *s, u64 objectid,
return inode;
}
-static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
- struct nameidata *nd)
+struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
{
- struct inode * inode;
+ struct inode *inode;
struct btrfs_inode *bi = BTRFS_I(dir);
struct btrfs_root *root = bi->root;
struct btrfs_root *sub_root = root;
struct btrfs_key location;
- int ret, new, do_orphan = 0;
+ int ret, new;
if (dentry->d_name.len > BTRFS_NAME_LEN)
return ERR_PTR(-ENAMETOOLONG);
inode = btrfs_iget(dir->i_sb, &location, sub_root, &new);
if (IS_ERR(inode))
return ERR_CAST(inode);
-
- /* the inode and parent dir are two different roots */
- if (new && root != sub_root) {
- igrab(inode);
- sub_root->inode = inode;
- do_orphan = 1;
- }
}
+ return inode;
+}
+
+static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
+ struct nameidata *nd)
+{
+ struct inode *inode;
+
+ if (dentry->d_name.len > BTRFS_NAME_LEN)
+ return ERR_PTR(-ENAMETOOLONG);
- if (unlikely(do_orphan))
- btrfs_orphan_cleanup(sub_root);
+ inode = btrfs_lookup_dentry(dir, dentry);
+ if (IS_ERR(inode))
+ return ERR_CAST(inode);
return d_splice_alias(inode, dentry);
}
return 0;
filp->f_pos = 2;
}
-
path = btrfs_alloc_path();
path->reada = 2;
path->slots[0]++;
}
}
+
advance = 1;
item = btrfs_item_nr(leaf, slot);
btrfs_item_key_to_cpu(leaf, &found_key, slot);
d_type = btrfs_filetype_table[btrfs_dir_type(leaf, di)];
btrfs_dir_item_key_to_cpu(leaf, di, &location);
+
+ /* is this a reference to our own snapshot? If so
+ * skip it
+ */
+ if (location.type == BTRFS_ROOT_ITEM_KEY &&
+ location.objectid == root->root_key.objectid) {
+ over = 0;
+ goto skip;
+ }
over = filldir(dirent, name_ptr, name_len,
found_key.offset, location.objectid,
d_type);
+skip:
if (name_ptr != tmp_name)
kfree(name_ptr);
if (over)
goto nopos;
-
di_len = btrfs_dir_name_len(leaf, di) +
btrfs_dir_data_len(leaf, di) + sizeof(*di);
di_cur += di_len;
return ret;
}
-/* Kernels earlier than 2.6.28 still have the NFS deadlock where nfsd
- will call the file system's ->lookup() method from within its
- filldir callback, which in turn was called from the file system's
- ->readdir() method. And will deadlock for many file systems. */
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
-
-struct nfshack_dirent {
- u64 ino;
- loff_t offset;
- int namlen;
- unsigned int d_type;
- char name[];
-};
-
-struct nfshack_readdir {
- char *dirent;
- size_t used;
- int full;
-};
-
-
-
-static int btrfs_nfshack_filldir(void *__buf, const char *name, int namlen,
- loff_t offset, u64 ino, unsigned int d_type)
-{
- struct nfshack_readdir *buf = __buf;
- struct nfshack_dirent *de = (void *)(buf->dirent + buf->used);
- unsigned int reclen;
-
- reclen = ALIGN(sizeof(struct nfshack_dirent) + namlen, sizeof(u64));
- if (buf->used + reclen > PAGE_SIZE) {
- buf->full = 1;
- return -EINVAL;
- }
-
- de->namlen = namlen;
- de->offset = offset;
- de->ino = ino;
- de->d_type = d_type;
- memcpy(de->name, name, namlen);
- buf->used += reclen;
-
- return 0;
-}
-
-static int btrfs_nfshack_readdir(struct file *file, void *dirent,
- filldir_t filldir)
-{
- struct nfshack_readdir buf;
- struct nfshack_dirent *de;
- int err;
- int size;
- loff_t offset;
-
- buf.dirent = (void *)__get_free_page(GFP_KERNEL);
- if (!buf.dirent)
- return -ENOMEM;
-
- offset = file->f_pos;
-
- do {
- unsigned int reclen;
-
- buf.used = 0;
- buf.full = 0;
- err = btrfs_real_readdir(file, &buf, btrfs_nfshack_filldir);
- if (err)
- break;
-
- size = buf.used;
-
- if (!size)
- break;
-
- de = (struct nfshack_dirent *)buf.dirent;
- while (size > 0) {
- offset = de->offset;
-
- if (filldir(dirent, de->name, de->namlen, de->offset,
- de->ino, de->d_type))
- goto done;
- offset = file->f_pos;
-
- reclen = ALIGN(sizeof(*de) + de->namlen,
- sizeof(u64));
- size -= reclen;
- de = (struct nfshack_dirent *)((char *)de + reclen);
- }
- } while (buf.full);
-
- done:
- free_page((unsigned long)buf.dirent);
- file->f_pos = offset;
-
- return err;
-}
-#endif
-
int btrfs_write_inode(struct inode *inode, int wait)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_trans_handle *trans;
int ret = 0;
- if (root->fs_info->closing > 1)
+ if (root->fs_info->btree_inode == inode)
return 0;
if (wait) {
btrfs_end_transaction(trans, root);
}
+/*
+ * find the highest existing sequence number in a directory
+ * and then set the in-memory index_cnt variable to reflect
+ * free sequence numbers
+ */
static int btrfs_set_inode_index_count(struct inode *inode)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
return ret;
}
-static int btrfs_set_inode_index(struct inode *dir, struct inode *inode,
- u64 *index)
+/*
+ * helper to find a free sequence number in a given directory. This current
+ * code is very simple, later versions will do smarter things in the btree
+ */
+int btrfs_set_inode_index(struct inode *dir, u64 *index)
{
int ret = 0;
if (BTRFS_I(dir)->index_cnt == (u64)-1) {
ret = btrfs_set_inode_index_count(dir);
- if (ret) {
+ if (ret)
return ret;
- }
}
*index = BTRFS_I(dir)->index_cnt;
struct btrfs_root *root,
struct inode *dir,
const char *name, int name_len,
- u64 ref_objectid,
- u64 objectid,
- struct btrfs_block_group_cache *group,
- int mode, u64 *index)
+ u64 ref_objectid, u64 objectid,
+ u64 alloc_hint, int mode, u64 *index)
{
struct inode *inode;
struct btrfs_inode_item *inode_item;
- struct btrfs_block_group_cache *new_inode_group;
struct btrfs_key *location;
struct btrfs_path *path;
struct btrfs_inode_ref *ref;
return ERR_PTR(-ENOMEM);
if (dir) {
- ret = btrfs_set_inode_index(dir, inode, index);
+ ret = btrfs_set_inode_index(dir, index);
if (ret)
return ERR_PTR(ret);
}
owner = 0;
else
owner = 1;
- new_inode_group = btrfs_find_block_group(root, group, 0,
- BTRFS_BLOCK_GROUP_METADATA, owner);
- if (!new_inode_group) {
- printk("find_block group failed\n");
- new_inode_group = group;
+ BTRFS_I(inode)->block_group =
+ btrfs_find_block_group(root, 0, alloc_hint, owner);
+ if ((mode & S_IFREG)) {
+ if (btrfs_test_opt(root, NODATASUM))
+ btrfs_set_flag(inode, NODATASUM);
+ if (btrfs_test_opt(root, NODATACOW))
+ btrfs_set_flag(inode, NODATACOW);
}
- BTRFS_I(inode)->block_group = new_inode_group;
key[0].objectid = objectid;
btrfs_set_key_type(&key[0], BTRFS_INODE_ITEM_KEY);
if (objectid > root->highest_inode)
root->highest_inode = objectid;
- inode->i_uid = current->fsuid;
- inode->i_gid = current->fsgid;
+ inode->i_uid = current_fsuid();
+ inode->i_gid = current_fsgid();
inode->i_mode = mode;
inode->i_ino = objectid;
- inode->i_blocks = 0;
+ inode_set_bytes(inode, 0);
inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
struct btrfs_inode_item);
return btrfs_type_by_mode[(inode->i_mode & S_IFMT) >> S_SHIFT];
}
+/*
+ * utility function to add 'inode' into 'parent_inode' with
+ * a give name and a given sequence number.
+ * if 'add_backref' is true, also insert a backref from the
+ * inode to the parent directory.
+ */
int btrfs_add_link(struct btrfs_trans_handle *trans,
struct inode *parent_inode, struct inode *inode,
const char *name, int name_len, int add_backref, u64 index)
err = btrfs_check_free_space(root, 1, 0);
if (err)
goto fail;
- err = btrfs_set_inode_index(dir, inode, &index);
+ err = btrfs_set_inode_index(dir, &index);
if (err)
goto fail;
return err;
}
+/* helper for btfs_get_extent. Given an existing extent in the tree,
+ * and an extent that you want to insert, deal with overlap and insert
+ * the new extent into the tree.
+ */
static int merge_extent_mapping(struct extent_map_tree *em_tree,
struct extent_map *existing,
struct extent_map *em,
start_diff = map_start - em->start;
em->start = map_start;
em->len = map_len;
- if (em->block_start < EXTENT_MAP_LAST_BYTE)
+ if (em->block_start < EXTENT_MAP_LAST_BYTE &&
+ !test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
em->block_start += start_diff;
+ em->block_len -= start_diff;
+ }
return add_extent_mapping(em_tree, em);
}
+static noinline int uncompress_inline(struct btrfs_path *path,
+ struct inode *inode, struct page *page,
+ size_t pg_offset, u64 extent_offset,
+ struct btrfs_file_extent_item *item)
+{
+ int ret;
+ struct extent_buffer *leaf = path->nodes[0];
+ char *tmp;
+ size_t max_size;
+ unsigned long inline_size;
+ unsigned long ptr;
+
+ WARN_ON(pg_offset != 0);
+ max_size = btrfs_file_extent_ram_bytes(leaf, item);
+ inline_size = btrfs_file_extent_inline_item_len(leaf,
+ btrfs_item_nr(leaf, path->slots[0]));
+ tmp = kmalloc(inline_size, GFP_NOFS);
+ ptr = btrfs_file_extent_inline_start(item);
+
+ read_extent_buffer(leaf, tmp, ptr, inline_size);
+
+ max_size = min_t(unsigned long, PAGE_CACHE_SIZE, max_size);
+ ret = btrfs_zlib_decompress(tmp, page, extent_offset,
+ inline_size, max_size);
+ if (ret) {
+ char *kaddr = kmap_atomic(page, KM_USER0);
+ unsigned long copy_size = min_t(u64,
+ PAGE_CACHE_SIZE - pg_offset,
+ max_size - extent_offset);
+ memset(kaddr + pg_offset, 0, copy_size);
+ kunmap_atomic(kaddr, KM_USER0);
+ }
+ kfree(tmp);
+ return 0;
+}
+
+/*
+ * a bit scary, this does extent mapping from logical file offset to the disk.
+ * the ugly parts come from merging extents from the disk with the in-ram
+ * representation. This gets more complex because of the data=ordered code,
+ * where the in-ram extents might be locked pending data=ordered completion.
+ *
+ * This also copies inline extents directly into the page.
+ */
+
struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
size_t pg_offset, u64 start, u64 len,
int create)
struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
struct btrfs_trans_handle *trans = NULL;
+ int compressed;
again:
spin_lock(&em_tree->lock);
}
em->bdev = root->fs_info->fs_devices->latest_bdev;
em->start = EXTENT_MAP_HOLE;
+ em->orig_start = EXTENT_MAP_HOLE;
em->len = (u64)-1;
+ em->block_len = (u64)-1;
if (!path) {
path = btrfs_alloc_path();
found_type = btrfs_file_extent_type(leaf, item);
extent_start = found_key.offset;
- if (found_type == BTRFS_FILE_EXTENT_REG) {
+ compressed = btrfs_file_extent_compression(leaf, item);
+ if (found_type == BTRFS_FILE_EXTENT_REG ||
+ found_type == BTRFS_FILE_EXTENT_PREALLOC) {
extent_end = extent_start +
btrfs_file_extent_num_bytes(leaf, item);
- err = 0;
- if (start < extent_start || start >= extent_end) {
- em->start = start;
- if (start < extent_start) {
- if (start + len <= extent_start)
- goto not_found;
- em->len = extent_end - extent_start;
- } else {
- em->len = len;
+ } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
+ size_t size;
+ size = btrfs_file_extent_inline_len(leaf, item);
+ extent_end = (extent_start + size + root->sectorsize - 1) &
+ ~((u64)root->sectorsize - 1);
+ }
+
+ if (start >= extent_end) {
+ path->slots[0]++;
+ if (path->slots[0] >= btrfs_header_nritems(leaf)) {
+ ret = btrfs_next_leaf(root, path);
+ if (ret < 0) {
+ err = ret;
+ goto out;
}
- goto not_found_em;
+ if (ret > 0)
+ goto not_found;
+ leaf = path->nodes[0];
}
+ btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
+ if (found_key.objectid != objectid ||
+ found_key.type != BTRFS_EXTENT_DATA_KEY)
+ goto not_found;
+ if (start + len <= found_key.offset)
+ goto not_found;
+ em->start = start;
+ em->len = found_key.offset - start;
+ goto not_found_em;
+ }
+
+ if (found_type == BTRFS_FILE_EXTENT_REG ||
+ found_type == BTRFS_FILE_EXTENT_PREALLOC) {
+ em->start = extent_start;
+ em->len = extent_end - extent_start;
+ em->orig_start = extent_start -
+ btrfs_file_extent_offset(leaf, item);
bytenr = btrfs_file_extent_disk_bytenr(leaf, item);
if (bytenr == 0) {
- em->start = extent_start;
- em->len = extent_end - extent_start;
em->block_start = EXTENT_MAP_HOLE;
goto insert;
}
- bytenr += btrfs_file_extent_offset(leaf, item);
- em->block_start = bytenr;
- em->start = extent_start;
- em->len = extent_end - extent_start;
+ if (compressed) {
+ set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
+ em->block_start = bytenr;
+ em->block_len = btrfs_file_extent_disk_num_bytes(leaf,
+ item);
+ } else {
+ bytenr += btrfs_file_extent_offset(leaf, item);
+ em->block_start = bytenr;
+ em->block_len = em->len;
+ if (found_type == BTRFS_FILE_EXTENT_PREALLOC)
+ set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
+ }
goto insert;
} else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
- u64 page_start;
unsigned long ptr;
char *map;
size_t size;
size_t extent_offset;
size_t copy_size;
- size = btrfs_file_extent_inline_len(leaf, btrfs_item_nr(leaf,
- path->slots[0]));
- extent_end = (extent_start + size + root->sectorsize - 1) &
- ~((u64)root->sectorsize - 1);
- if (start < extent_start || start >= extent_end) {
- em->start = start;
- if (start < extent_start) {
- if (start + len <= extent_start)
- goto not_found;
- em->len = extent_end - extent_start;
- } else {
- em->len = len;
- }
- goto not_found_em;
- }
em->block_start = EXTENT_MAP_INLINE;
-
- if (!page) {
+ if (!page || create) {
em->start = extent_start;
- em->len = size;
+ em->len = extent_end - extent_start;
goto out;
}
- page_start = page_offset(page) + pg_offset;
- extent_offset = page_start - extent_start;
+ size = btrfs_file_extent_inline_len(leaf, item);
+ extent_offset = page_offset(page) + pg_offset - extent_start;
copy_size = min_t(u64, PAGE_CACHE_SIZE - pg_offset,
size - extent_offset);
em->start = extent_start + extent_offset;
em->len = (copy_size + root->sectorsize - 1) &
~((u64)root->sectorsize - 1);
- map = kmap(page);
+ em->orig_start = EXTENT_MAP_INLINE;
+ if (compressed)
+ set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
ptr = btrfs_file_extent_inline_start(item) + extent_offset;
if (create == 0 && !PageUptodate(page)) {
- read_extent_buffer(leaf, map + pg_offset, ptr,
- copy_size);
+ if (btrfs_file_extent_compression(leaf, item) ==
+ BTRFS_COMPRESS_ZLIB) {
+ ret = uncompress_inline(path, inode, page,
+ pg_offset,
+ extent_offset, item);
+ BUG_ON(ret);
+ } else {
+ map = kmap(page);
+ read_extent_buffer(leaf, map + pg_offset, ptr,
+ copy_size);
+ kunmap(page);
+ }
flush_dcache_page(page);
} else if (create && PageUptodate(page)) {
if (!trans) {
trans = btrfs_join_transaction(root, 1);
goto again;
}
+ map = kmap(page);
write_extent_buffer(leaf, map + pg_offset, ptr,
copy_size);
+ kunmap(page);
btrfs_mark_buffer_dirty(leaf);
}
- kunmap(page);
set_extent_uptodate(io_tree, em->start,
extent_map_end(em) - 1, GFP_NOFS);
goto insert;
} else {
- printk("unkknown found_type %d\n", found_type);
+ printk(KERN_ERR "btrfs unknown found_type %d\n", found_type);
WARN_ON(1);
}
not_found:
em->len = len;
not_found_em:
em->block_start = EXTENT_MAP_HOLE;
+ set_bit(EXTENT_FLAG_VACANCY, &em->flags);
insert:
btrfs_release_path(root, path);
if (em->start > start || extent_map_end(em) <= start) {
- printk("bad extent! em: [%Lu %Lu] passed [%Lu %Lu]\n", em->start, em->len, start, len);
+ printk(KERN_ERR "Btrfs: bad extent! em: [%llu %llu] passed "
+ "[%llu %llu]\n", (unsigned long long)em->start,
+ (unsigned long long)em->len,
+ (unsigned long long)start,
+ (unsigned long long)len);
err = -EIO;
goto out;
}
}
} else {
err = -EIO;
- printk("failing to insert %Lu %Lu\n",
- start, len);
free_extent_map(em);
em = NULL;
}
btrfs_free_path(path);
if (trans) {
ret = btrfs_end_transaction(trans, root);
- if (!err) {
+ if (!err)
err = ret;
- }
}
if (err) {
free_extent_map(em);
return em;
}
-#if 0 /* waiting for O_DIRECT reads */
-static int btrfs_get_block(struct inode *inode, sector_t iblock,
- struct buffer_head *bh_result, int create)
-{
- struct extent_map *em;
- u64 start = (u64)iblock << inode->i_blkbits;
- struct btrfs_multi_bio *multi = NULL;
- struct btrfs_root *root = BTRFS_I(inode)->root;
- u64 len;
- u64 logical;
- u64 map_length;
- int ret = 0;
-
- em = btrfs_get_extent(inode, NULL, 0, start, bh_result->b_size, 0);
-
- if (!em || IS_ERR(em))
- goto out;
-
- if (em->start > start || em->start + em->len <= start) {
- goto out;
- }
-
- if (em->block_start == EXTENT_MAP_INLINE) {
- ret = -EINVAL;
- goto out;
- }
-
- len = em->start + em->len - start;
- len = min_t(u64, len, INT_LIMIT(typeof(bh_result->b_size)));
-
- if (em->block_start == EXTENT_MAP_HOLE ||
- em->block_start == EXTENT_MAP_DELALLOC) {
- bh_result->b_size = len;
- goto out;
- }
-
- logical = start - em->start;
- logical = em->block_start + logical;
-
- map_length = len;
- ret = btrfs_map_block(&root->fs_info->mapping_tree, READ,
- logical, &map_length, &multi, 0);
- BUG_ON(ret);
- bh_result->b_blocknr = multi->stripes[0].physical >> inode->i_blkbits;
- bh_result->b_size = min(map_length, len);
-
- bh_result->b_bdev = multi->stripes[0].dev->bdev;
- set_buffer_mapped(bh_result);
- kfree(multi);
-out:
- free_extent_map(em);
- return ret;
-}
-#endif
-
static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
const struct iovec *iov, loff_t offset,
unsigned long nr_segs)
{
return -EINVAL;
-#if 0
- struct file *file = iocb->ki_filp;
- struct inode *inode = file->f_mapping->host;
-
- if (rw == WRITE)
- return -EINVAL;
-
- return blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
- offset, nr_segs, btrfs_get_block, NULL);
-#endif
}
static sector_t btrfs_bmap(struct address_space *mapping, sector_t iblock)
struct writeback_control *wbc)
{
struct extent_io_tree *tree;
+
tree = &BTRFS_I(mapping->host)->io_tree;
return extent_writepages(tree, mapping, btrfs_get_extent, wbc);
}
static int btrfs_releasepage(struct page *page, gfp_t gfp_flags)
{
+ if (PageWriteback(page) || PageDirty(page))
+ return 0;
return __btrfs_releasepage(page, gfp_flags);
}
}
/*
- * Invalidate a single dcache entry at the root of the filesystem.
- * Needed after creation of snapshot or subvolume.
+ * create a new subvolume directory/inode (helper for the ioctl).
*/
-void btrfs_invalidate_dcache_root(struct btrfs_root *root, char *name,
- int namelen)
-{
- struct dentry *alias, *entry;
- struct qstr qstr;
-
- alias = d_find_alias(root->fs_info->sb->s_root->d_inode);
- if (alias) {
- qstr.name = name;
- qstr.len = namelen;
- /* change me if btrfs ever gets a d_hash operation */
- qstr.hash = full_name_hash(qstr.name, qstr.len);
- entry = d_lookup(alias, &qstr);
- dput(alias);
- if (entry) {
- d_invalidate(entry);
- dput(entry);
- }
- }
-}
-
-int btrfs_create_subvol_root(struct btrfs_root *new_root,
- struct btrfs_trans_handle *trans, u64 new_dirid,
- struct btrfs_block_group_cache *block_group)
+int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
+ struct btrfs_root *new_root, struct dentry *dentry,
+ u64 new_dirid, u64 alloc_hint)
{
struct inode *inode;
+ int error;
u64 index = 0;
inode = btrfs_new_inode(trans, new_root, NULL, "..", 2, new_dirid,
- new_dirid, block_group, S_IFDIR | 0700, &index);
+ new_dirid, alloc_hint, S_IFDIR | 0700, &index);
if (IS_ERR(inode))
return PTR_ERR(inode);
inode->i_op = &btrfs_dir_inode_operations;
inode->i_fop = &btrfs_dir_file_operations;
- new_root->inode = inode;
inode->i_nlink = 1;
btrfs_i_size_write(inode, 0);
- return btrfs_update_inode(trans, new_root, inode);
+ error = btrfs_update_inode(trans, new_root, inode);
+ if (error)
+ return error;
+
+ d_instantiate(dentry, inode);
+ return 0;
}
+/* helper function for file defrag and space balancing. This
+ * forces readahead on a given range of bytes in an inode
+ */
unsigned long btrfs_force_ra(struct address_space *mapping,
struct file_ra_state *ra, struct file *file,
pgoff_t offset, pgoff_t last_index)
{
pgoff_t req_size = last_index - offset + 1;
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
- offset = page_cache_readahead(mapping, ra, file, offset, req_size);
- return offset;
-#else
page_cache_sync_readahead(mapping, ra, file, offset, req_size);
return offset + req_size;
-#endif
}
struct inode *btrfs_alloc_inode(struct super_block *sb)
}
spin_unlock(&BTRFS_I(inode)->root->list_lock);
- while(1) {
+ while (1) {
ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1);
if (!ordered)
break;
else {
- printk("found ordered extent %Lu %Lu\n",
- ordered->file_offset, ordered->len);
+ printk(KERN_ERR "btrfs found ordered "
+ "extent %llu %llu on inode cleanup\n",
+ (unsigned long long)ordered->file_offset,
+ (unsigned long long)ordered->len);
btrfs_remove_ordered_extent(inode, ordered);
btrfs_put_ordered_extent(ordered);
btrfs_put_ordered_extent(ordered);
}
}
- btrfs_drop_extent_cache(inode, 0, (u64)-1);
+ btrfs_drop_extent_cache(inode, 0, (u64)-1, 0);
kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
}
-#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26)
static void init_once(void *foo)
-#elif LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
-static void init_once(struct kmem_cache * cachep, void *foo)
-#else
-static void init_once(void * foo, struct kmem_cache * cachep,
- unsigned long flags)
-#endif
{
struct btrfs_inode *ei = (struct btrfs_inode *) foo;
struct kmem_cache *btrfs_cache_create(const char *name, size_t size,
unsigned long extra_flags,
-#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26)
- void (*ctor)(void *)
-#elif LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
- void (*ctor)(struct kmem_cache *, void *)
-#else
- void (*ctor)(void *, struct kmem_cache *,
- unsigned long)
-#endif
- )
+ void (*ctor)(void *))
{
return kmem_cache_create(name, size, 0, (SLAB_RECLAIM_ACCOUNT |
- SLAB_MEM_SPREAD | extra_flags), ctor
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
- ,NULL
-#endif
- );
+ SLAB_MEM_SPREAD | extra_flags), ctor);
}
int btrfs_init_cachep(void)
{
struct inode *inode = dentry->d_inode;
generic_fillattr(inode, stat);
+ stat->dev = BTRFS_I(inode)->root->anon_super.s_dev;
stat->blksize = PAGE_CACHE_SIZE;
- stat->blocks = inode->i_blocks + (BTRFS_I(inode)->delalloc_bytes >> 9);
+ stat->blocks = (inode_get_bytes(inode) +
+ BTRFS_I(inode)->delalloc_bytes) >> 9;
return 0;
}
-static int btrfs_rename(struct inode * old_dir, struct dentry *old_dentry,
- struct inode * new_dir,struct dentry *new_dentry)
+static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry)
{
struct btrfs_trans_handle *trans;
struct btrfs_root *root = BTRFS_I(old_dir)->root;
u64 index = 0;
int ret;
+ /* we're not allowed to rename between subvolumes */
+ if (BTRFS_I(old_inode)->root->root_key.objectid !=
+ BTRFS_I(new_dir)->root->root_key.objectid)
+ return -EXDEV;
+
if (S_ISDIR(old_inode->i_mode) && new_inode &&
new_inode->i_size > BTRFS_EMPTY_DIR_SIZE) {
return -ENOTEMPTY;
}
+ /* to rename a snapshot or subvolume, we need to juggle the
+ * backrefs. This isn't coded yet
+ */
+ if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
+ return -EXDEV;
+
ret = btrfs_check_free_space(root, 1, 0);
if (ret)
goto out_unlock;
}
}
- ret = btrfs_set_inode_index(new_dir, old_inode, &index);
+ ret = btrfs_set_inode_index(new_dir, &index);
if (ret)
goto out_fail;
return ret;
}
+/*
+ * some fairly slow code that needs optimization. This walks the list
+ * of all the inodes with pending delalloc and forces them to disk.
+ */
int btrfs_start_delalloc_inodes(struct btrfs_root *root)
{
struct list_head *head = &root->fs_info->delalloc_inodes;
struct btrfs_inode *binode;
- unsigned long flags;
+ struct inode *inode;
- spin_lock_irqsave(&root->fs_info->delalloc_lock, flags);
- while(!list_empty(head)) {
+ if (root->fs_info->sb->s_flags & MS_RDONLY)
+ return -EROFS;
+
+ spin_lock(&root->fs_info->delalloc_lock);
+ while (!list_empty(head)) {
binode = list_entry(head->next, struct btrfs_inode,
delalloc_inodes);
- atomic_inc(&binode->vfs_inode.i_count);
- spin_unlock_irqrestore(&root->fs_info->delalloc_lock, flags);
- filemap_write_and_wait(binode->vfs_inode.i_mapping);
- iput(&binode->vfs_inode);
- spin_lock_irqsave(&root->fs_info->delalloc_lock, flags);
+ inode = igrab(&binode->vfs_inode);
+ if (!inode)
+ list_del_init(&binode->delalloc_inodes);
+ spin_unlock(&root->fs_info->delalloc_lock);
+ if (inode) {
+ filemap_flush(inode->i_mapping);
+ iput(inode);
+ }
+ cond_resched();
+ spin_lock(&root->fs_info->delalloc_lock);
+ }
+ spin_unlock(&root->fs_info->delalloc_lock);
+
+ /* the filemap_flush will queue IO into the worker threads, but
+ * we have to make sure the IO is actually started and that
+ * ordered extents get created before we return
+ */
+ atomic_inc(&root->fs_info->async_submit_draining);
+ while (atomic_read(&root->fs_info->nr_async_submits) ||
+ atomic_read(&root->fs_info->async_delalloc_pages)) {
+ wait_event(root->fs_info->async_submit_wait,
+ (atomic_read(&root->fs_info->nr_async_submits) == 0 &&
+ atomic_read(&root->fs_info->async_delalloc_pages) == 0));
}
- spin_unlock_irqrestore(&root->fs_info->delalloc_lock, flags);
+ atomic_dec(&root->fs_info->async_submit_draining);
return 0;
}
btrfs_set_file_extent_generation(leaf, ei, trans->transid);
btrfs_set_file_extent_type(leaf, ei,
BTRFS_FILE_EXTENT_INLINE);
+ btrfs_set_file_extent_encryption(leaf, ei, 0);
+ btrfs_set_file_extent_compression(leaf, ei, 0);
+ btrfs_set_file_extent_other_encoding(leaf, ei, 0);
+ btrfs_set_file_extent_ram_bytes(leaf, ei, name_len);
+
ptr = btrfs_file_extent_inline_start(ei);
write_extent_buffer(leaf, symname, ptr, name_len);
btrfs_mark_buffer_dirty(leaf);
inode->i_op = &btrfs_symlink_inode_operations;
inode->i_mapping->a_ops = &btrfs_symlink_aops;
inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
+ inode_set_bytes(inode, name_len);
btrfs_i_size_write(inode, name_len - 1);
err = btrfs_update_inode(trans, root, inode);
if (err)
return err;
}
+static int prealloc_file_range(struct inode *inode, u64 start, u64 end,
+ u64 alloc_hint, int mode)
+{
+ struct btrfs_trans_handle *trans;
+ struct btrfs_root *root = BTRFS_I(inode)->root;
+ struct btrfs_key ins;
+ u64 alloc_size;
+ u64 cur_offset = start;
+ u64 num_bytes = end - start;
+ int ret = 0;
+
+ trans = btrfs_join_transaction(root, 1);
+ BUG_ON(!trans);
+ btrfs_set_trans_block_group(trans, inode);
+
+ while (num_bytes > 0) {
+ alloc_size = min(num_bytes, root->fs_info->max_extent);
+ ret = btrfs_reserve_extent(trans, root, alloc_size,
+ root->sectorsize, 0, alloc_hint,
+ (u64)-1, &ins, 1);
+ if (ret) {
+ WARN_ON(1);
+ goto out;
+ }
+ ret = insert_reserved_file_extent(trans, inode,
+ cur_offset, ins.objectid,
+ ins.offset, ins.offset,
+ ins.offset, 0, 0, 0,
+ BTRFS_FILE_EXTENT_PREALLOC);
+ BUG_ON(ret);
+ num_bytes -= ins.offset;
+ cur_offset += ins.offset;
+ alloc_hint = ins.objectid + ins.offset;
+ }
+out:
+ if (cur_offset > start) {
+ inode->i_ctime = CURRENT_TIME;
+ btrfs_set_flag(inode, PREALLOC);
+ if (!(mode & FALLOC_FL_KEEP_SIZE) &&
+ cur_offset > i_size_read(inode))
+ btrfs_i_size_write(inode, cur_offset);
+ ret = btrfs_update_inode(trans, root, inode);
+ BUG_ON(ret);
+ }
+
+ btrfs_end_transaction(trans, root);
+ return ret;
+}
+
+static long btrfs_fallocate(struct inode *inode, int mode,
+ loff_t offset, loff_t len)
+{
+ u64 cur_offset;
+ u64 last_byte;
+ u64 alloc_start;
+ u64 alloc_end;
+ u64 alloc_hint = 0;
+ u64 mask = BTRFS_I(inode)->root->sectorsize - 1;
+ struct extent_map *em;
+ int ret;
+
+ alloc_start = offset & ~mask;
+ alloc_end = (offset + len + mask) & ~mask;
+
+ mutex_lock(&inode->i_mutex);
+ if (alloc_start > inode->i_size) {
+ ret = btrfs_cont_expand(inode, alloc_start);
+ if (ret)
+ goto out;
+ }
+
+ while (1) {
+ struct btrfs_ordered_extent *ordered;
+ lock_extent(&BTRFS_I(inode)->io_tree, alloc_start,
+ alloc_end - 1, GFP_NOFS);
+ ordered = btrfs_lookup_first_ordered_extent(inode,
+ alloc_end - 1);
+ if (ordered &&
+ ordered->file_offset + ordered->len > alloc_start &&
+ ordered->file_offset < alloc_end) {
+ btrfs_put_ordered_extent(ordered);
+ unlock_extent(&BTRFS_I(inode)->io_tree,
+ alloc_start, alloc_end - 1, GFP_NOFS);
+ btrfs_wait_ordered_range(inode, alloc_start,
+ alloc_end - alloc_start);
+ } else {
+ if (ordered)
+ btrfs_put_ordered_extent(ordered);
+ break;
+ }
+ }
+
+ cur_offset = alloc_start;
+ while (1) {
+ em = btrfs_get_extent(inode, NULL, 0, cur_offset,
+ alloc_end - cur_offset, 0);
+ BUG_ON(IS_ERR(em) || !em);
+ last_byte = min(extent_map_end(em), alloc_end);
+ last_byte = (last_byte + mask) & ~mask;
+ if (em->block_start == EXTENT_MAP_HOLE) {
+ ret = prealloc_file_range(inode, cur_offset,
+ last_byte, alloc_hint, mode);
+ if (ret < 0) {
+ free_extent_map(em);
+ break;
+ }
+ }
+ if (em->block_start <= EXTENT_MAP_LAST_BYTE)
+ alloc_hint = em->block_start;
+ free_extent_map(em);
+
+ cur_offset = last_byte;
+ if (cur_offset >= alloc_end) {
+ ret = 0;
+ break;
+ }
+ }
+ unlock_extent(&BTRFS_I(inode)->io_tree, alloc_start, alloc_end - 1,
+ GFP_NOFS);
+out:
+ mutex_unlock(&inode->i_mutex);
+ return ret;
+}
+
static int btrfs_set_page_dirty(struct page *page)
{
return __set_page_dirty_nobuffers(page);
}
-#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26)
static int btrfs_permission(struct inode *inode, int mask)
-#else
-static int btrfs_permission(struct inode *inode, int mask,
- struct nameidata *nd)
-#endif
{
if (btrfs_test_flag(inode, READONLY) && (mask & MAY_WRITE))
return -EACCES;
}
static struct inode_operations btrfs_dir_inode_operations = {
+ .getattr = btrfs_getattr,
.lookup = btrfs_lookup,
.create = btrfs_create,
.unlink = btrfs_unlink,
static struct file_operations btrfs_dir_file_operations = {
.llseek = generic_file_llseek,
.read = generic_read_dir,
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
- .readdir = btrfs_nfshack_readdir,
-#else /* NFSd readdir/lookup deadlock is fixed */
.readdir = btrfs_real_readdir,
-#endif
.unlocked_ioctl = btrfs_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = btrfs_ioctl,
.listxattr = btrfs_listxattr,
.removexattr = btrfs_removexattr,
.permission = btrfs_permission,
+ .fallocate = btrfs_fallocate,
};
static struct inode_operations btrfs_special_inode_operations = {
.getattr = btrfs_getattr,