#include <linux/buffer_head.h> // for block_sync_page
#include <linux/workqueue.h>
#include <linux/kthread.h>
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
# include <linux/freezer.h>
-#else
-# include <linux/sched.h>
-#endif
#include "crc32c.h"
#include "ctree.h"
#include "disk-io.h"
static struct extent_io_ops btree_extent_io_ops;
static void end_workqueue_fn(struct btrfs_work *work);
+/*
+ * end_io_wq structs are used to do processing in task context when an IO is
+ * complete. This is used during reads to verify checksums, and it is used
+ * by writes to insert metadata for new file extents after IO is complete.
+ */
struct end_io_wq {
struct bio *bio;
bio_end_io_t *end_io;
struct btrfs_work work;
};
+/*
+ * async submit bios are used to offload expensive checksumming
+ * onto the worker threads. They checksum file and metadata bios
+ * just before they are sent down the IO stack.
+ */
struct async_submit_bio {
struct inode *inode;
struct bio *bio;
struct list_head list;
- extent_submit_bio_hook_t *submit_bio_hook;
+ extent_submit_bio_hook_t *submit_bio_start;
+ extent_submit_bio_hook_t *submit_bio_done;
int rw;
int mirror_num;
+ unsigned long bio_flags;
struct btrfs_work work;
};
+/*
+ * extents on the btree inode are pretty simple, there's one extent
+ * that covers the entire device
+ */
struct extent_map *btree_get_extent(struct inode *inode, struct page *page,
size_t page_offset, u64 start, u64 len,
int create)
}
em->start = 0;
em->len = (u64)-1;
+ em->block_len = (u64)-1;
em->block_start = 0;
em->bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
*(__le32 *)result = ~cpu_to_le32(crc);
}
+/*
+ * compute the csum for a btree block, and either verify it or write it
+ * into the csum field of the block.
+ */
static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf,
int verify)
{
return 0;
}
+/*
+ * we can't consider a given block up to date unless the transid of the
+ * block matches the transid in the parent node's pointer. This is how we
+ * detect blocks that either didn't get written at all or got written
+ * in the wrong place.
+ */
static int verify_parent_transid(struct extent_io_tree *io_tree,
struct extent_buffer *eb, u64 parent_transid)
{
unlock_extent(io_tree, eb->start, eb->start + eb->len - 1,
GFP_NOFS);
return ret;
-
}
+/*
+ * helper to read a given tree block, doing retries as required when
+ * the checksums don't match and we have alternate mirrors to try.
+ */
static int btree_read_extent_buffer_pages(struct btrfs_root *root,
struct extent_buffer *eb,
u64 start, u64 parent_transid)
return -EIO;
}
+/*
+ * checksum a dirty tree block before IO. This has extra checks to make
+ * sure we only fill in the checksum field in the first page of a multi-page block
+ */
int csum_dirty_buffer(struct btrfs_root *root, struct page *page)
{
struct extent_io_tree *tree;
return 0;
}
-static int btree_writepage_io_hook(struct page *page, u64 start, u64 end)
-{
- struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
-
- csum_dirty_buffer(root, page);
- return 0;
-}
-
int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
struct extent_state *state)
{
eb = alloc_extent_buffer(tree, start, len, page, GFP_NOFS);
found_start = btrfs_header_bytenr(eb);
- if (0 && found_start != start) {
+ if (found_start != start) {
printk("bad tree block start %llu %llu\n",
(unsigned long long)found_start,
(unsigned long long)eb->start);
return ret;
}
-#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
static void end_workqueue_bio(struct bio *bio, int err)
-#else
-static int end_workqueue_bio(struct bio *bio,
- unsigned int bytes_done, int err)
-#endif
{
struct end_io_wq *end_io_wq = bio->bi_private;
struct btrfs_fs_info *fs_info;
-#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
- if (bio->bi_size)
- return 1;
-#endif
-
fs_info = end_io_wq->info;
end_io_wq->error = err;
end_io_wq->work.func = end_workqueue_fn;
&end_io_wq->work);
else
btrfs_queue_worker(&fs_info->endio_workers, &end_io_wq->work);
-
-#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
- return 0;
-#endif
}
int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
btrfs_async_submit_limit(info);
}
-static void run_one_async_submit(struct btrfs_work *work)
+static void run_one_async_start(struct btrfs_work *work)
+{
+ struct btrfs_fs_info *fs_info;
+ struct async_submit_bio *async;
+
+ async = container_of(work, struct async_submit_bio, work);
+ fs_info = BTRFS_I(async->inode)->root->fs_info;
+ async->submit_bio_start(async->inode, async->rw, async->bio,
+ async->mirror_num, async->bio_flags);
+}
+
+static void run_one_async_done(struct btrfs_work *work)
{
struct btrfs_fs_info *fs_info;
struct async_submit_bio *async;
waitqueue_active(&fs_info->async_submit_wait))
wake_up(&fs_info->async_submit_wait);
- async->submit_bio_hook(async->inode, async->rw, async->bio,
- async->mirror_num);
+ async->submit_bio_done(async->inode, async->rw, async->bio,
+ async->mirror_num, async->bio_flags);
+}
+
+static void run_one_async_free(struct btrfs_work *work)
+{
+ struct async_submit_bio *async;
+
+ async = container_of(work, struct async_submit_bio, work);
kfree(async);
}
int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
int rw, struct bio *bio, int mirror_num,
- extent_submit_bio_hook_t *submit_bio_hook)
+ unsigned long bio_flags,
+ extent_submit_bio_hook_t *submit_bio_start,
+ extent_submit_bio_hook_t *submit_bio_done)
{
struct async_submit_bio *async;
int limit = btrfs_async_submit_limit(fs_info);
async->rw = rw;
async->bio = bio;
async->mirror_num = mirror_num;
- async->submit_bio_hook = submit_bio_hook;
- async->work.func = run_one_async_submit;
+ async->submit_bio_start = submit_bio_start;
+ async->submit_bio_done = submit_bio_done;
+
+ async->work.func = run_one_async_start;
+ async->work.ordered_func = run_one_async_done;
+ async->work.ordered_free = run_one_async_free;
+
async->work.flags = 0;
+ async->bio_flags = bio_flags;
+
+ while(atomic_read(&fs_info->async_submit_draining) &&
+ atomic_read(&fs_info->nr_async_submits)) {
+ wait_event(fs_info->async_submit_wait,
+ (atomic_read(&fs_info->nr_async_submits) == 0));
+ }
+
atomic_inc(&fs_info->nr_async_submits);
btrfs_queue_worker(&fs_info->workers, &async->work);
(atomic_read(&fs_info->nr_async_bios) < limit),
HZ/10);
}
+
+ while(atomic_read(&fs_info->async_submit_draining) &&
+ atomic_read(&fs_info->nr_async_submits)) {
+ wait_event(fs_info->async_submit_wait,
+ (atomic_read(&fs_info->nr_async_submits) == 0));
+ }
+
return 0;
}
-static int __btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
- int mirror_num)
+static int btree_csum_one_bio(struct bio *bio)
{
- struct btrfs_root *root = BTRFS_I(inode)->root;
- u64 offset;
- int ret;
+ struct bio_vec *bvec = bio->bi_io_vec;
+ int bio_index = 0;
+ struct btrfs_root *root;
- offset = bio->bi_sector << 9;
+ WARN_ON(bio->bi_vcnt <= 0);
+ while(bio_index < bio->bi_vcnt) {
+ root = BTRFS_I(bvec->bv_page->mapping->host)->root;
+ csum_dirty_buffer(root, bvec->bv_page);
+ bio_index++;
+ bvec++;
+ }
+ return 0;
+}
+static int __btree_submit_bio_start(struct inode *inode, int rw,
+ struct bio *bio, int mirror_num,
+ unsigned long bio_flags)
+{
/*
* when we're called for a write, we're already in the async
* submission context. Just jump into btrfs_map_bio
*/
- if (rw & (1 << BIO_RW)) {
- return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio,
- mirror_num, 1);
- }
+ btree_csum_one_bio(bio);
+ return 0;
+}
+static int __btree_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
+ int mirror_num, unsigned long bio_flags)
+{
/*
- * called for a read, do the setup so that checksum validation
- * can happen in the async kernel threads
+ * when we're called for a write, we're already in the async
+ * submission context. Just jump into btrfs_map_bio
*/
- ret = btrfs_bio_wq_end_io(root->fs_info, bio, 1);
- BUG_ON(ret);
-
return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, mirror_num, 1);
}
static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
- int mirror_num)
+ int mirror_num, unsigned long bio_flags)
{
/*
* kthread helpers are used to submit writes so that checksumming
* can happen in parallel across all CPUs
*/
if (!(rw & (1 << BIO_RW))) {
- return __btree_submit_bio_hook(inode, rw, bio, mirror_num);
+ int ret;
+ /*
+ * called for a read, do the setup so that checksum validation
+ * can happen in the async kernel threads
+ */
+ ret = btrfs_bio_wq_end_io(BTRFS_I(inode)->root->fs_info,
+ bio, 1);
+ BUG_ON(ret);
+
+ return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio,
+ mirror_num, 1);
}
return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
- inode, rw, bio, mirror_num,
- __btree_submit_bio_hook);
+ inode, rw, bio, mirror_num, 0,
+ __btree_submit_bio_start,
+ __btree_submit_bio_done);
}
static int btree_writepage(struct page *page, struct writeback_control *wbc)
if (wbc->sync_mode == WB_SYNC_NONE) {
u64 num_dirty;
u64 start = 0;
- unsigned long thresh = 8 * 1024 * 1024;
+ unsigned long thresh = 32 * 1024 * 1024;
if (wbc->for_kupdate)
return 0;
int btrfs_write_tree_block(struct extent_buffer *buf)
{
return btrfs_fdatawrite_range(buf->first_page->mapping, buf->start,
- buf->start + buf->len - 1, WB_SYNC_NONE);
+ buf->start + buf->len - 1, WB_SYNC_ALL);
}
int btrfs_wait_tree_block_writeback(struct extent_buffer *buf)
{
int ret;
u32 blocksize;
+ u64 generation;
__setup_root(tree_root->nodesize, tree_root->leafsize,
tree_root->sectorsize, tree_root->stripesize,
&root->root_item, &root->root_key);
BUG_ON(ret);
+ generation = btrfs_root_generation(&root->root_item);
blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item));
root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
- blocksize, 0);
+ blocksize, generation);
BUG_ON(!root->node);
return 0;
}
root->ref_cows = 0;
root->node = btrfs_alloc_free_block(trans, root, root->leafsize,
- BTRFS_TREE_LOG_OBJECTID,
- 0, 0, 0, 0, 0);
+ 0, BTRFS_TREE_LOG_OBJECTID,
+ trans->transid, 0, 0, 0);
btrfs_set_header_nritems(root->node, 0);
btrfs_set_header_level(root->node, 0);
struct btrfs_path *path;
struct extent_buffer *l;
u64 highest_inode;
+ u64 generation;
u32 blocksize;
int ret = 0;
kfree(root);
return ERR_PTR(ret);
}
+ generation = btrfs_root_generation(&root->root_item);
blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item));
root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
- blocksize, 0);
+ blocksize, generation);
BUG_ON(!root->node);
insert:
if (location->objectid != BTRFS_TREE_LOG_OBJECTID) {
return;
inode = mapping->host;
+
+ /*
+ * don't do the expensive searching for a small number of
+ * devices
+ */
+ if (BTRFS_I(inode)->root->fs_info->fs_devices->open_devices <= 2) {
+ __unplug_io_fn(bdi, page);
+ return;
+ }
+
offset = page_offset(page);
em_tree = &BTRFS_I(inode)->extent_tree;
static int setup_bdi(struct btrfs_fs_info *info, struct backing_dev_info *bdi)
{
-#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
bdi_init(bdi);
-#endif
bdi->ra_pages = default_backing_dev_info.ra_pages;
bdi->state = 0;
bdi->capabilities = default_backing_dev_info.capabilities;
bio->bi_private = end_io_wq->private;
bio->bi_end_io = end_io_wq->end_io;
kfree(end_io_wq);
-#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
- bio_endio(bio, bio->bi_size, error);
-#else
bio_endio(bio, error);
-#endif
}
static int cleaner_kthread(void *arg)
u32 leafsize;
u32 blocksize;
u32 stripesize;
+ u64 generation;
struct buffer_head *bh;
struct btrfs_root *extent_root = kzalloc(sizeof(struct btrfs_root),
GFP_NOFS);
struct btrfs_super_block *disk_super;
- if (!extent_root || !tree_root || !fs_info) {
+ if (!extent_root || !tree_root || !fs_info ||
+ !chunk_root || !dev_root) {
err = -ENOMEM;
goto fail;
}
INIT_LIST_HEAD(&fs_info->space_info);
btrfs_mapping_init(&fs_info->mapping_tree);
atomic_set(&fs_info->nr_async_submits, 0);
+ atomic_set(&fs_info->async_delalloc_pages, 0);
+ atomic_set(&fs_info->async_submit_draining, 0);
atomic_set(&fs_info->nr_async_bios, 0);
atomic_set(&fs_info->throttles, 0);
atomic_set(&fs_info->throttle_gen, 0);
fs_info->btree_inode = new_inode(sb);
fs_info->btree_inode->i_ino = 1;
fs_info->btree_inode->i_nlink = 1;
+
fs_info->thread_pool_size = min(num_online_cpus() + 2, 8);
INIT_LIST_HEAD(&fs_info->ordered_extents);
BTRFS_I(fs_info->btree_inode)->io_tree.ops = &btree_extent_io_ops;
- extent_io_tree_init(&fs_info->free_space_cache,
- fs_info->btree_inode->i_mapping, GFP_NOFS);
- extent_io_tree_init(&fs_info->block_group_cache,
- fs_info->btree_inode->i_mapping, GFP_NOFS);
+ spin_lock_init(&fs_info->block_group_cache_lock);
+ fs_info->block_group_cache_tree.rb_node = NULL;
+
extent_io_tree_init(&fs_info->pinned_extents,
fs_info->btree_inode->i_mapping, GFP_NOFS);
extent_io_tree_init(&fs_info->pending_del,
fs_info->btree_inode->i_mapping, GFP_NOFS);
fs_info->do_barriers = 1;
+ INIT_LIST_HEAD(&fs_info->dead_reloc_roots);
+ btrfs_leaf_ref_tree_init(&fs_info->reloc_ref_tree);
+ btrfs_leaf_ref_tree_init(&fs_info->shared_ref_tree);
+
BTRFS_I(fs_info->btree_inode)->root = tree_root;
memset(&BTRFS_I(fs_info->btree_inode)->location, 0,
sizeof(struct btrfs_key));
mutex_init(&fs_info->trans_mutex);
mutex_init(&fs_info->tree_log_mutex);
mutex_init(&fs_info->drop_mutex);
- mutex_init(&fs_info->alloc_mutex);
+ mutex_init(&fs_info->extent_ins_mutex);
+ mutex_init(&fs_info->pinned_mutex);
mutex_init(&fs_info->chunk_mutex);
mutex_init(&fs_info->transaction_kthread_mutex);
mutex_init(&fs_info->cleaner_mutex);
mutex_init(&fs_info->volume_mutex);
+ mutex_init(&fs_info->tree_reloc_mutex);
init_waitqueue_head(&fs_info->transaction_throttle);
init_waitqueue_head(&fs_info->transaction_wait);
init_waitqueue_head(&fs_info->async_submit_wait);
*/
btrfs_init_workers(&fs_info->workers, "worker",
fs_info->thread_pool_size);
+
+ btrfs_init_workers(&fs_info->delalloc_workers, "delalloc",
+ fs_info->thread_pool_size);
+
btrfs_init_workers(&fs_info->submit_workers, "submit",
min_t(u64, fs_devices->num_devices,
fs_info->thread_pool_size));
*/
fs_info->submit_workers.idle_thresh = 64;
- /* fs_info->workers is responsible for checksumming file data
- * blocks and metadata. Using a larger idle thresh allows each
- * worker thread to operate on things in roughly the order they
- * were sent by the writeback daemons, improving overall locality
- * of the IO going down the pipe.
- */
- fs_info->workers.idle_thresh = 128;
+ fs_info->workers.idle_thresh = 16;
+ fs_info->workers.ordered = 1;
+
+ fs_info->delalloc_workers.idle_thresh = 2;
+ fs_info->delalloc_workers.ordered = 1;
btrfs_init_workers(&fs_info->fixup_workers, "fixup", 1);
btrfs_init_workers(&fs_info->endio_workers, "endio",
btrfs_start_workers(&fs_info->workers, 1);
btrfs_start_workers(&fs_info->submit_workers, 1);
+ btrfs_start_workers(&fs_info->delalloc_workers, 1);
btrfs_start_workers(&fs_info->fixup_workers, 1);
btrfs_start_workers(&fs_info->endio_workers, fs_info->thread_pool_size);
btrfs_start_workers(&fs_info->endio_write_workers,
}
fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super);
+ fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages,
+ 4 * 1024 * 1024 / PAGE_CACHE_SIZE);
nodesize = btrfs_super_nodesize(disk_super);
leafsize = btrfs_super_leafsize(disk_super);
blocksize = btrfs_level_size(tree_root,
btrfs_super_chunk_root_level(disk_super));
+ generation = btrfs_super_chunk_root_generation(disk_super);
__setup_root(nodesize, leafsize, sectorsize, stripesize,
chunk_root, fs_info, BTRFS_CHUNK_TREE_OBJECTID);
chunk_root->node = read_tree_block(chunk_root,
btrfs_super_chunk_root(disk_super),
- blocksize, 0);
+ blocksize, generation);
BUG_ON(!chunk_root->node);
read_extent_buffer(chunk_root->node, fs_info->chunk_tree_uuid,
blocksize = btrfs_level_size(tree_root,
btrfs_super_root_level(disk_super));
-
+ generation = btrfs_super_generation(disk_super);
tree_root->node = read_tree_block(tree_root,
btrfs_super_root(disk_super),
- blocksize, 0);
+ blocksize, generation);
if (!tree_root->node)
goto fail_sb_buffer;
log_tree_root, fs_info, BTRFS_TREE_LOG_OBJECTID);
log_tree_root->node = read_tree_block(tree_root, bytenr,
- blocksize, 0);
+ blocksize,
+ generation + 1);
ret = btrfs_recover_log_trees(log_tree_root);
BUG_ON(ret);
}
fs_info->last_trans_committed = btrfs_super_generation(disk_super);
+
+ ret = btrfs_cleanup_reloc_trees(tree_root);
+ BUG_ON(ret);
+
return tree_root;
fail_cleaner:
fail_sys_array:
fail_sb_buffer:
btrfs_stop_workers(&fs_info->fixup_workers);
+ btrfs_stop_workers(&fs_info->delalloc_workers);
btrfs_stop_workers(&fs_info->workers);
btrfs_stop_workers(&fs_info->endio_workers);
btrfs_stop_workers(&fs_info->endio_write_workers);
kfree(extent_root);
kfree(tree_root);
-#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
bdi_destroy(&fs_info->bdi);
-#endif
kfree(fs_info);
+ kfree(chunk_root);
+ kfree(dev_root);
return ERR_PTR(err);
}
truncate_inode_pages(fs_info->btree_inode->i_mapping, 0);
btrfs_stop_workers(&fs_info->fixup_workers);
+ btrfs_stop_workers(&fs_info->delalloc_workers);
btrfs_stop_workers(&fs_info->workers);
btrfs_stop_workers(&fs_info->endio_workers);
btrfs_stop_workers(&fs_info->endio_write_workers);
btrfs_close_devices(fs_info->fs_devices);
btrfs_mapping_tree_free(&fs_info->mapping_tree);
-#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
bdi_destroy(&fs_info->bdi);
-#endif
kfree(fs_info->extent_root);
kfree(fs_info->tree_root);
struct extent_io_tree *tree;
u64 num_dirty;
u64 start = 0;
- unsigned long thresh = 96 * 1024 * 1024;
+ unsigned long thresh = 32 * 1024 * 1024;
tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree;
if (current_is_pdflush() || current->flags & PF_MEMALLOC)
static struct extent_io_ops btree_extent_io_ops = {
.write_cache_pages_lock_hook = btree_lock_page_hook,
- .writepage_io_hook = btree_writepage_io_hook,
.readpage_end_io_hook = btree_readpage_end_io_hook,
.submit_bio_hook = btree_submit_bio_hook,
/* note we're sharing with inode.c for the merge bio hook */