* Boston, MA 021110-1307, USA.
*/
+#include <linux/version.h>
#include <linux/fs.h>
#include <linux/blkdev.h>
#include <linux/scatterlist.h>
#include <linux/writeback.h>
#include <linux/buffer_head.h> // for block_sync_page
#include <linux/workqueue.h>
+#include <linux/kthread.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
+# include <linux/freezer.h>
+#else
+# include <linux/sched.h>
+#endif
#include "crc32c.h"
#include "ctree.h"
#include "disk-io.h"
#include "btrfs_inode.h"
#include "volumes.h"
#include "print-tree.h"
+#include "async-thread.h"
+#include "locking.h"
#if 0
static int check_tree_block(struct btrfs_root *root, struct extent_buffer *buf)
#endif
static struct extent_io_ops btree_extent_io_ops;
-static struct workqueue_struct *end_io_workqueue;
-static struct workqueue_struct *async_submit_workqueue;
+static void end_workqueue_fn(struct btrfs_work *work);
struct end_io_wq {
struct bio *bio;
int error;
int metadata;
struct list_head list;
+ struct btrfs_work work;
};
struct async_submit_bio {
extent_submit_bio_hook_t *submit_bio_hook;
int rw;
int mirror_num;
+ struct btrfs_work work;
};
struct extent_map *btree_get_extent(struct inode *inode, struct page *page,
ret = btree_read_extent_buffer_pages(root, eb, start + PAGE_CACHE_SIZE,
btrfs_header_generation(eb));
BUG_ON(ret);
- btrfs_clear_buffer_defrag(eb);
found_start = btrfs_header_bytenr(eb);
if (found_start != start) {
printk("warning: eb start incorrect %Lu buffer %Lu len %lu\n",
}
eb = alloc_extent_buffer(tree, start, len, page, GFP_NOFS);
- btrfs_clear_buffer_defrag(eb);
found_start = btrfs_header_bytenr(eb);
if (found_start != start) {
ret = -EIO;
end = min_t(u64, eb->len, PAGE_CACHE_SIZE);
end = eb->start + end - 1;
- release_extent_buffer_tail_pages(eb);
err:
free_extent_buffer(eb);
out:
{
struct end_io_wq *end_io_wq = bio->bi_private;
struct btrfs_fs_info *fs_info;
- unsigned long flags;
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
if (bio->bi_size)
#endif
fs_info = end_io_wq->info;
- spin_lock_irqsave(&fs_info->end_io_work_lock, flags);
end_io_wq->error = err;
- list_add_tail(&end_io_wq->list, &fs_info->end_io_work_list);
- spin_unlock_irqrestore(&fs_info->end_io_work_lock, flags);
- queue_work(end_io_workqueue, &fs_info->end_io_work);
+ end_io_wq->work.func = end_workqueue_fn;
+ end_io_wq->work.flags = 0;
+ if (bio->bi_rw & (1 << BIO_RW))
+ btrfs_queue_worker(&fs_info->endio_write_workers,
+ &end_io_wq->work);
+ else
+ btrfs_queue_worker(&fs_info->endio_workers, &end_io_wq->work);
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
return 0;
return 0;
}
+static void run_one_async_submit(struct btrfs_work *work)
+{
+ struct btrfs_fs_info *fs_info;
+ struct async_submit_bio *async;
+
+ async = container_of(work, struct async_submit_bio, work);
+ fs_info = BTRFS_I(async->inode)->root->fs_info;
+ atomic_dec(&fs_info->nr_async_submits);
+ async->submit_bio_hook(async->inode, async->rw, async->bio,
+ async->mirror_num);
+ kfree(async);
+}
+
int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
int rw, struct bio *bio, int mirror_num,
extent_submit_bio_hook_t *submit_bio_hook)
async->bio = bio;
async->mirror_num = mirror_num;
async->submit_bio_hook = submit_bio_hook;
-
- spin_lock(&fs_info->async_submit_work_lock);
- list_add_tail(&async->list, &fs_info->async_submit_work_list);
+ async->work.func = run_one_async_submit;
+ async->work.flags = 0;
atomic_inc(&fs_info->nr_async_submits);
- spin_unlock(&fs_info->async_submit_work_lock);
-
- queue_work(async_submit_workqueue, &fs_info->async_submit_work);
+ btrfs_queue_worker(&fs_info->workers, &async->work);
return 0;
}
offset = bio->bi_sector << 9;
+ /*
+ * when we're called for a write, we're already in the async
+ * submission context. Just jump ingo btrfs_map_bio
+ */
if (rw & (1 << BIO_RW)) {
- return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, mirror_num);
+ return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio,
+ mirror_num, 0);
}
+ /*
+ * called for a read, do the setup so that checksum validation
+ * can happen in the async kernel threads
+ */
ret = btrfs_bio_wq_end_io(root->fs_info, bio, 1);
BUG_ON(ret);
- return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, mirror_num);
+ return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, mirror_num, 1);
}
static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
int mirror_num)
{
+ /*
+ * kthread helpers are used to submit writes so that checksumming
+ * can happen in parallel across all CPUs
+ */
if (!(rw & (1 << BIO_RW))) {
return __btree_submit_bio_hook(inode, rw, bio, mirror_num);
}
struct extent_map_tree *map;
int ret;
- if (page_count(page) > 3) {
- /* once for page->private, once for the caller, once
- * once for the page cache
- */
- return 0;
- }
tree = &BTRFS_I(page->mapping->host)->io_tree;
map = &BTRFS_I(page->mapping->host)->extent_tree;
+
ret = try_release_extent_state(map, tree, page, gfp_flags);
+ if (!ret) {
+ return 0;
+ }
+
+ ret = try_release_extent_buffer(tree, page);
if (ret == 1) {
- invalidate_extent_lru(tree, page_offset(page), PAGE_CACHE_SIZE);
ClearPagePrivate(page);
set_page_private(page, 0);
page_cache_release(page);
}
+
return ret;
}
extent_invalidatepage(tree, page, offset);
btree_releasepage(page, GFP_NOFS);
if (PagePrivate(page)) {
- invalidate_extent_lru(tree, page_offset(page), PAGE_CACHE_SIZE);
+ printk("warning page private not zero on page %Lu\n",
+ page_offset(page));
ClearPagePrivate(page);
set_page_private(page, 0);
page_cache_release(page);
{
struct inode *btree_inode = root->fs_info->btree_inode;
if (btrfs_header_generation(buf) ==
- root->fs_info->running_transaction->transid)
+ root->fs_info->running_transaction->transid) {
+ WARN_ON(!btrfs_tree_locked(buf));
clear_extent_buffer_dirty(&BTRFS_I(btree_inode)->io_tree,
buf);
+ }
return 0;
}
root->in_sysfs = 0;
INIT_LIST_HEAD(&root->dirty_list);
+ spin_lock_init(&root->node_lock);
+ mutex_init(&root->objectid_mutex);
memset(&root->root_key, 0, sizeof(root->root_key));
memset(&root->root_item, 0, sizeof(root->root_item));
memset(&root->defrag_progress, 0, sizeof(root->defrag_progress));
memset(&root->root_kobj, 0, sizeof(root->root_kobj));
+ root->defrag_trans_start = fs_info->generation;
init_completion(&root->kobj_unregister);
root->defrag_running = 0;
root->defrag_level = 0;
spin_lock(&em_tree->lock);
em = lookup_extent_mapping(em_tree, offset, PAGE_CACHE_SIZE);
spin_unlock(&em_tree->lock);
- if (!em)
+ if (!em) {
+ __unplug_io_fn(bdi, page);
return;
+ }
+ if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
+ free_extent_map(em);
+ __unplug_io_fn(bdi, page);
+ return;
+ }
offset = offset - em->start;
btrfs_unplug_page(&BTRFS_I(inode)->root->fs_info->mapping_tree,
em->block_start + offset, page);
return ret;
}
-#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
-static void btrfs_end_io_csum(void *p)
-#else
-static void btrfs_end_io_csum(struct work_struct *work)
-#endif
+/*
+ * called by the kthread helper functions to finally call the bio end_io
+ * functions. This is where read checksum verification actually happens
+ */
+static void end_workqueue_fn(struct btrfs_work *work)
{
-#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
- struct btrfs_fs_info *fs_info = p;
-#else
- struct btrfs_fs_info *fs_info = container_of(work,
- struct btrfs_fs_info,
- end_io_work);
-#endif
- unsigned long flags;
- struct end_io_wq *end_io_wq;
struct bio *bio;
- struct list_head *next;
+ struct end_io_wq *end_io_wq;
+ struct btrfs_fs_info *fs_info;
int error;
- int was_empty;
- while(1) {
- spin_lock_irqsave(&fs_info->end_io_work_lock, flags);
- if (list_empty(&fs_info->end_io_work_list)) {
- spin_unlock_irqrestore(&fs_info->end_io_work_lock,
- flags);
- return;
- }
- next = fs_info->end_io_work_list.next;
- list_del(next);
- spin_unlock_irqrestore(&fs_info->end_io_work_lock, flags);
-
- end_io_wq = list_entry(next, struct end_io_wq, list);
-
- bio = end_io_wq->bio;
- if (end_io_wq->metadata && !bio_ready_for_csum(bio)) {
- spin_lock_irqsave(&fs_info->end_io_work_lock, flags);
- was_empty = list_empty(&fs_info->end_io_work_list);
- list_add_tail(&end_io_wq->list,
- &fs_info->end_io_work_list);
- spin_unlock_irqrestore(&fs_info->end_io_work_lock,
- flags);
- if (was_empty)
- return;
- continue;
- }
- error = end_io_wq->error;
- bio->bi_private = end_io_wq->private;
- bio->bi_end_io = end_io_wq->end_io;
- kfree(end_io_wq);
+ end_io_wq = container_of(work, struct end_io_wq, work);
+ bio = end_io_wq->bio;
+ fs_info = end_io_wq->info;
+
+ /* metadata bios are special because the whole tree block must
+ * be checksummed at once. This makes sure the entire block is in
+ * ram and up to date before trying to verify things. For
+ * blocksize <= pagesize, it is basically a noop
+ */
+ if (end_io_wq->metadata && !bio_ready_for_csum(bio)) {
+ btrfs_queue_worker(&fs_info->endio_workers,
+ &end_io_wq->work);
+ return;
+ }
+ error = end_io_wq->error;
+ bio->bi_private = end_io_wq->private;
+ bio->bi_end_io = end_io_wq->end_io;
+ kfree(end_io_wq);
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
- bio_endio(bio, bio->bi_size, error);
+ bio_endio(bio, bio->bi_size, error);
#else
- bio_endio(bio, error);
+ bio_endio(bio, error);
#endif
- }
}
-#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
-static void btrfs_async_submit_work(void *p)
-#else
-static void btrfs_async_submit_work(struct work_struct *work)
-#endif
+static int cleaner_kthread(void *arg)
{
-#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
- struct btrfs_fs_info *fs_info = p;
-#else
- struct btrfs_fs_info *fs_info = container_of(work,
- struct btrfs_fs_info,
- async_submit_work);
-#endif
- struct async_submit_bio *async;
- struct list_head *next;
+ struct btrfs_root *root = arg;
- while(1) {
- spin_lock(&fs_info->async_submit_work_lock);
- if (list_empty(&fs_info->async_submit_work_list)) {
- spin_unlock(&fs_info->async_submit_work_lock);
- return;
+ do {
+ smp_mb();
+ if (root->fs_info->closing)
+ break;
+
+ vfs_check_frozen(root->fs_info->sb, SB_FREEZE_WRITE);
+ mutex_lock(&root->fs_info->cleaner_mutex);
+ btrfs_clean_old_snapshots(root);
+ mutex_unlock(&root->fs_info->cleaner_mutex);
+
+ if (freezing(current)) {
+ refrigerator();
+ } else {
+ smp_mb();
+ if (root->fs_info->closing)
+ break;
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule();
+ __set_current_state(TASK_RUNNING);
}
- next = fs_info->async_submit_work_list.next;
- list_del(next);
- atomic_dec(&fs_info->nr_async_submits);
- spin_unlock(&fs_info->async_submit_work_lock);
-
- async = list_entry(next, struct async_submit_bio, list);
- async->submit_bio_hook(async->inode, async->rw, async->bio,
- async->mirror_num);
- kfree(async);
- }
+ } while (!kthread_should_stop());
+ return 0;
+}
+
+static int transaction_kthread(void *arg)
+{
+ struct btrfs_root *root = arg;
+ struct btrfs_trans_handle *trans;
+ struct btrfs_transaction *cur;
+ unsigned long now;
+ unsigned long delay;
+ int ret;
+
+ do {
+ smp_mb();
+ if (root->fs_info->closing)
+ break;
+
+ delay = HZ * 30;
+ vfs_check_frozen(root->fs_info->sb, SB_FREEZE_WRITE);
+ mutex_lock(&root->fs_info->transaction_kthread_mutex);
+
+ mutex_lock(&root->fs_info->trans_mutex);
+ cur = root->fs_info->running_transaction;
+ if (!cur) {
+ mutex_unlock(&root->fs_info->trans_mutex);
+ goto sleep;
+ }
+ now = get_seconds();
+ if (now < cur->start_time || now - cur->start_time < 30) {
+ mutex_unlock(&root->fs_info->trans_mutex);
+ delay = HZ * 5;
+ goto sleep;
+ }
+ mutex_unlock(&root->fs_info->trans_mutex);
+ trans = btrfs_start_transaction(root, 1);
+ ret = btrfs_commit_transaction(trans, root);
+sleep:
+ wake_up_process(root->fs_info->cleaner_kthread);
+ mutex_unlock(&root->fs_info->transaction_kthread_mutex);
+
+ if (freezing(current)) {
+ refrigerator();
+ } else {
+ if (root->fs_info->closing)
+ break;
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(delay);
+ __set_current_state(TASK_RUNNING);
+ }
+ } while (!kthread_should_stop());
+ return 0;
}
struct btrfs_root *open_ctree(struct super_block *sb,
GFP_NOFS);
int ret;
int err = -EINVAL;
+
struct btrfs_super_block *disk_super;
if (!extent_root || !tree_root || !fs_info) {
err = -ENOMEM;
goto fail;
}
- end_io_workqueue = create_workqueue("btrfs-end-io");
- BUG_ON(!end_io_workqueue);
- async_submit_workqueue = create_workqueue("btrfs-async-submit");
-
INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_NOFS);
INIT_LIST_HEAD(&fs_info->trans_list);
INIT_LIST_HEAD(&fs_info->dead_roots);
INIT_LIST_HEAD(&fs_info->hashers);
- INIT_LIST_HEAD(&fs_info->end_io_work_list);
- INIT_LIST_HEAD(&fs_info->async_submit_work_list);
spin_lock_init(&fs_info->hash_lock);
- spin_lock_init(&fs_info->end_io_work_lock);
- spin_lock_init(&fs_info->async_submit_work_lock);
spin_lock_init(&fs_info->delalloc_lock);
spin_lock_init(&fs_info->new_trans_lock);
INIT_LIST_HEAD(&fs_info->space_info);
btrfs_mapping_init(&fs_info->mapping_tree);
atomic_set(&fs_info->nr_async_submits, 0);
+ atomic_set(&fs_info->throttles, 0);
fs_info->sb = sb;
fs_info->max_extent = (u64)-1;
fs_info->max_inline = 8192 * 1024;
fs_info->btree_inode = new_inode(sb);
fs_info->btree_inode->i_ino = 1;
fs_info->btree_inode->i_nlink = 1;
+ fs_info->thread_pool_size = min(num_online_cpus() + 2, 8);
sb->s_blocksize = 4096;
sb->s_blocksize_bits = blksize_bits(4096);
fs_info->btree_inode->i_mapping, GFP_NOFS);
fs_info->do_barriers = 1;
-#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
- INIT_WORK(&fs_info->end_io_work, btrfs_end_io_csum, fs_info);
- INIT_WORK(&fs_info->async_submit_work, btrfs_async_submit_work,
- fs_info);
- INIT_WORK(&fs_info->trans_work, btrfs_transaction_cleaner, fs_info);
-#else
- INIT_WORK(&fs_info->end_io_work, btrfs_end_io_csum);
- INIT_WORK(&fs_info->async_submit_work, btrfs_async_submit_work);
- INIT_DELAYED_WORK(&fs_info->trans_work, btrfs_transaction_cleaner);
-#endif
BTRFS_I(fs_info->btree_inode)->root = tree_root;
memset(&BTRFS_I(fs_info->btree_inode)->location, 0,
sizeof(struct btrfs_key));
mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS);
mutex_init(&fs_info->trans_mutex);
- mutex_init(&fs_info->fs_mutex);
+ mutex_init(&fs_info->drop_mutex);
+ mutex_init(&fs_info->alloc_mutex);
+ mutex_init(&fs_info->chunk_mutex);
+ mutex_init(&fs_info->transaction_kthread_mutex);
+ mutex_init(&fs_info->cleaner_mutex);
+ mutex_init(&fs_info->volume_mutex);
+ init_waitqueue_head(&fs_info->transaction_throttle);
+ init_waitqueue_head(&fs_info->transaction_wait);
#if 0
ret = add_hasher(fs_info, "crc32c");
if (err)
goto fail_sb_buffer;
+ /*
+ * we need to start all the end_io workers up front because the
+ * queue work function gets called at interrupt time, and so it
+ * cannot dynamically grow.
+ */
+ btrfs_init_workers(&fs_info->workers, fs_info->thread_pool_size);
+ btrfs_init_workers(&fs_info->submit_workers, fs_info->thread_pool_size);
+ btrfs_init_workers(&fs_info->fixup_workers, 1);
+ btrfs_init_workers(&fs_info->endio_workers, fs_info->thread_pool_size);
+ btrfs_init_workers(&fs_info->endio_write_workers,
+ fs_info->thread_pool_size);
+ btrfs_start_workers(&fs_info->workers, 1);
+ btrfs_start_workers(&fs_info->submit_workers, 1);
+ btrfs_start_workers(&fs_info->fixup_workers, 1);
+ btrfs_start_workers(&fs_info->endio_workers, fs_info->thread_pool_size);
+ btrfs_start_workers(&fs_info->endio_write_workers,
+ fs_info->thread_pool_size);
+
err = -EINVAL;
if (btrfs_super_num_devices(disk_super) > fs_devices->open_devices) {
printk("Btrfs: wanted %llu devices, but found %llu\n",
goto fail_sb_buffer;
}
- mutex_lock(&fs_info->fs_mutex);
-
+ mutex_lock(&fs_info->chunk_mutex);
ret = btrfs_read_sys_array(tree_root);
+ mutex_unlock(&fs_info->chunk_mutex);
if (ret) {
printk("btrfs: failed to read the system array on %s\n",
sb->s_id);
(unsigned long)btrfs_header_chunk_tree_uuid(chunk_root->node),
BTRFS_UUID_SIZE);
+ mutex_lock(&fs_info->chunk_mutex);
ret = btrfs_read_chunk_tree(chunk_root);
+ mutex_unlock(&fs_info->chunk_mutex);
BUG_ON(ret);
btrfs_close_extra_devices(fs_devices);
fs_info->data_alloc_profile = (u64)-1;
fs_info->metadata_alloc_profile = (u64)-1;
fs_info->system_alloc_profile = fs_info->metadata_alloc_profile;
+ fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root,
+ "btrfs-cleaner");
+ if (!fs_info->cleaner_kthread)
+ goto fail_extent_root;
+
+ fs_info->transaction_kthread = kthread_run(transaction_kthread,
+ tree_root,
+ "btrfs-transaction");
+ if (!fs_info->transaction_kthread)
+ goto fail_cleaner;
+
- mutex_unlock(&fs_info->fs_mutex);
return tree_root;
+fail_cleaner:
+ kthread_stop(fs_info->cleaner_kthread);
fail_extent_root:
free_extent_buffer(extent_root->node);
fail_tree_root:
free_extent_buffer(tree_root->node);
fail_sys_array:
- mutex_unlock(&fs_info->fs_mutex);
fail_sb_buffer:
- extent_io_tree_empty_lru(&BTRFS_I(fs_info->btree_inode)->io_tree);
+ btrfs_stop_workers(&fs_info->fixup_workers);
+ btrfs_stop_workers(&fs_info->workers);
+ btrfs_stop_workers(&fs_info->endio_workers);
+ btrfs_stop_workers(&fs_info->endio_write_workers);
+ btrfs_stop_workers(&fs_info->submit_workers);
fail_iput:
iput(fs_info->btree_inode);
fail:
struct btrfs_fs_info *fs_info = root->fs_info;
fs_info->closing = 1;
- btrfs_transaction_flush_work(root);
- mutex_lock(&fs_info->fs_mutex);
- btrfs_defrag_dirty_roots(root->fs_info);
+ smp_mb();
+
+ kthread_stop(root->fs_info->transaction_kthread);
+ kthread_stop(root->fs_info->cleaner_kthread);
+
+ btrfs_clean_old_snapshots(root);
trans = btrfs_start_transaction(root, 1);
ret = btrfs_commit_transaction(trans, root);
/* run commit again to drop the original snapshot */
BUG_ON(ret);
write_ctree_super(NULL, root);
- mutex_unlock(&fs_info->fs_mutex);
-
- btrfs_transaction_flush_work(root);
if (fs_info->delalloc_bytes) {
printk("btrfs: at unmount delalloc count %Lu\n",
filemap_write_and_wait(fs_info->btree_inode->i_mapping);
- extent_io_tree_empty_lru(&fs_info->free_space_cache);
- extent_io_tree_empty_lru(&fs_info->block_group_cache);
- extent_io_tree_empty_lru(&fs_info->pinned_extents);
- extent_io_tree_empty_lru(&fs_info->pending_del);
- extent_io_tree_empty_lru(&fs_info->extent_ins);
- extent_io_tree_empty_lru(&BTRFS_I(fs_info->btree_inode)->io_tree);
-
- flush_workqueue(async_submit_workqueue);
- flush_workqueue(end_io_workqueue);
-
truncate_inode_pages(fs_info->btree_inode->i_mapping, 0);
- flush_workqueue(async_submit_workqueue);
- destroy_workqueue(async_submit_workqueue);
-
- flush_workqueue(end_io_workqueue);
- destroy_workqueue(end_io_workqueue);
+ btrfs_stop_workers(&fs_info->fixup_workers);
+ btrfs_stop_workers(&fs_info->workers);
+ btrfs_stop_workers(&fs_info->endio_workers);
+ btrfs_stop_workers(&fs_info->endio_write_workers);
+ btrfs_stop_workers(&fs_info->submit_workers);
iput(fs_info->btree_inode);
#if 0
u64 transid = btrfs_header_generation(buf);
struct inode *btree_inode = root->fs_info->btree_inode;
+ WARN_ON(!btrfs_tree_locked(buf));
if (transid != root->fs_info->generation) {
printk(KERN_CRIT "transid mismatch buffer %llu, found %Lu running %Lu\n",
(unsigned long long)buf->start,
set_extent_buffer_dirty(&BTRFS_I(btree_inode)->io_tree, buf);
}
-void btrfs_throttle(struct btrfs_root *root)
-{
- struct backing_dev_info *bdi;
-
- bdi = &root->fs_info->bdi;
- if (root->fs_info->throttles && bdi_write_congested(bdi)) {
-#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18)
- congestion_wait(WRITE, HZ/20);
-#else
- blk_congestion_wait(WRITE, HZ/20);
-#endif
- }
-}
-
void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr)
{
/*
return;
}
-void btrfs_set_buffer_defrag(struct extent_buffer *buf)
-{
- struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;
- struct inode *btree_inode = root->fs_info->btree_inode;
- set_extent_bits(&BTRFS_I(btree_inode)->io_tree, buf->start,
- buf->start + buf->len - 1, EXTENT_DEFRAG, GFP_NOFS);
-}
-
-void btrfs_set_buffer_defrag_done(struct extent_buffer *buf)
-{
- struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;
- struct inode *btree_inode = root->fs_info->btree_inode;
- set_extent_bits(&BTRFS_I(btree_inode)->io_tree, buf->start,
- buf->start + buf->len - 1, EXTENT_DEFRAG_DONE,
- GFP_NOFS);
-}
-
-int btrfs_buffer_defrag(struct extent_buffer *buf)
-{
- struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;
- struct inode *btree_inode = root->fs_info->btree_inode;
- return test_range_bit(&BTRFS_I(btree_inode)->io_tree,
- buf->start, buf->start + buf->len - 1, EXTENT_DEFRAG, 0);
-}
-
-int btrfs_buffer_defrag_done(struct extent_buffer *buf)
-{
- struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;
- struct inode *btree_inode = root->fs_info->btree_inode;
- return test_range_bit(&BTRFS_I(btree_inode)->io_tree,
- buf->start, buf->start + buf->len - 1,
- EXTENT_DEFRAG_DONE, 0);
-}
-
-int btrfs_clear_buffer_defrag_done(struct extent_buffer *buf)
-{
- struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;
- struct inode *btree_inode = root->fs_info->btree_inode;
- return clear_extent_bits(&BTRFS_I(btree_inode)->io_tree,
- buf->start, buf->start + buf->len - 1,
- EXTENT_DEFRAG_DONE, GFP_NOFS);
-}
-
-int btrfs_clear_buffer_defrag(struct extent_buffer *buf)
-{
- struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;
- struct inode *btree_inode = root->fs_info->btree_inode;
- return clear_extent_bits(&BTRFS_I(btree_inode)->io_tree,
- buf->start, buf->start + buf->len - 1,
- EXTENT_DEFRAG, GFP_NOFS);
-}
-
int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid)
{
struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;