#include <linux/swap.h>
#include <linux/radix-tree.h>
#include <linux/writeback.h>
+#include <linux/buffer_head.h> // for block_sync_page
#include "ctree.h"
#include "disk-io.h"
#include "transaction.h"
#include "btrfs_inode.h"
+#include "print-tree.h"
-u64 bh_blocknr(struct buffer_head *bh)
+#if 0
+static int check_tree_block(struct btrfs_root *root, struct extent_buffer *buf)
{
- return bh->b_blocknr;
-}
-
-static int check_tree_block(struct btrfs_root *root, struct buffer_head *buf)
-{
- struct btrfs_node *node = btrfs_buffer_node(buf);
- if (bh_blocknr(buf) != btrfs_header_blocknr(&node->header)) {
- printk(KERN_CRIT "bh_blocknr(buf) is %llu, header is %llu\n",
- (unsigned long long)bh_blocknr(buf),
- (unsigned long long)btrfs_header_blocknr(&node->header));
+ if (extent_buffer_blocknr(buf) != btrfs_header_blocknr(buf)) {
+ printk(KERN_CRIT "buf blocknr(buf) is %llu, header is %llu\n",
+ (unsigned long long)extent_buffer_blocknr(buf),
+ (unsigned long long)btrfs_header_blocknr(buf));
return 1;
}
return 0;
}
+#endif
-struct buffer_head *btrfs_find_tree_block(struct btrfs_root *root, u64 blocknr)
-{
- struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
- int blockbits = root->fs_info->sb->s_blocksize_bits;
- unsigned long index = blocknr >> (PAGE_CACHE_SHIFT - blockbits);
- struct page *page;
- struct buffer_head *bh;
- struct buffer_head *head;
- struct buffer_head *ret = NULL;
+static struct extent_map_ops btree_extent_map_ops;
-
- page = find_lock_page(mapping, index);
- if (!page)
- return NULL;
-
- if (!page_has_buffers(page))
- goto out_unlock;
-
- head = page_buffers(page);
- bh = head;
- do {
- if (buffer_mapped(bh) && bh_blocknr(bh) == blocknr) {
- ret = bh;
- get_bh(bh);
- goto out_unlock;
- }
- bh = bh->b_this_page;
- } while (bh != head);
-out_unlock:
- unlock_page(page);
- page_cache_release(page);
- return ret;
+struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root,
+ u64 bytenr, u32 blocksize)
+{
+ struct inode *btree_inode = root->fs_info->btree_inode;
+ struct extent_buffer *eb;
+ eb = find_extent_buffer(&BTRFS_I(btree_inode)->extent_tree,
+ bytenr, blocksize, GFP_NOFS);
+ return eb;
}
-int btrfs_map_bh_to_logical(struct btrfs_root *root, struct buffer_head *bh,
- u64 logical)
+struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
+ u64 bytenr, u32 blocksize)
{
- if (logical == 0) {
- bh->b_bdev = NULL;
- bh->b_blocknr = 0;
- set_buffer_mapped(bh);
- } else {
- map_bh(bh, root->fs_info->sb, logical);
- }
- return 0;
+ struct inode *btree_inode = root->fs_info->btree_inode;
+ struct extent_buffer *eb;
+
+ eb = alloc_extent_buffer(&BTRFS_I(btree_inode)->extent_tree,
+ bytenr, blocksize, NULL, GFP_NOFS);
+ return eb;
}
-struct buffer_head *btrfs_find_create_tree_block(struct btrfs_root *root,
- u64 blocknr)
+struct extent_map *btree_get_extent(struct inode *inode, struct page *page,
+ size_t page_offset, u64 start, u64 end,
+ int create)
{
- struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
- int blockbits = root->fs_info->sb->s_blocksize_bits;
- unsigned long index = blocknr >> (PAGE_CACHE_SHIFT - blockbits);
- struct page *page;
- struct buffer_head *bh;
- struct buffer_head *head;
- struct buffer_head *ret = NULL;
- int err;
- u64 first_block = index << (PAGE_CACHE_SHIFT - blockbits);
-
- page = find_or_create_page(mapping, index, GFP_NOFS);
- if (!page)
- return NULL;
+ struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
+ struct extent_map *em;
+ int ret;
- if (!page_has_buffers(page))
- create_empty_buffers(page, root->fs_info->sb->s_blocksize, 0);
- head = page_buffers(page);
- bh = head;
- do {
- if (!buffer_mapped(bh)) {
- err = btrfs_map_bh_to_logical(root, bh, first_block);
- BUG_ON(err);
- }
- if (bh_blocknr(bh) == blocknr) {
- ret = bh;
- get_bh(bh);
- goto out_unlock;
- }
- bh = bh->b_this_page;
- first_block++;
- } while (bh != head);
-out_unlock:
- unlock_page(page);
- if (ret)
- touch_buffer(ret);
- page_cache_release(page);
- return ret;
+again:
+ em = lookup_extent_mapping(em_tree, start, end);
+ if (em) {
+ goto out;
+ }
+ em = alloc_extent_map(GFP_NOFS);
+ if (!em) {
+ em = ERR_PTR(-ENOMEM);
+ goto out;
+ }
+ em->start = 0;
+ em->end = (i_size_read(inode) & ~((u64)PAGE_CACHE_SIZE -1)) - 1;
+ em->block_start = 0;
+ em->block_end = em->end;
+ em->bdev = inode->i_sb->s_bdev;
+ ret = add_extent_mapping(em_tree, em);
+ if (ret == -EEXIST) {
+ free_extent_map(em);
+ em = NULL;
+ goto again;
+ } else if (ret) {
+ em = ERR_PTR(ret);
+ }
+out:
+ return em;
}
-static int btree_get_block(struct inode *inode, sector_t iblock,
- struct buffer_head *bh, int create)
+u32 btrfs_csum_data(struct btrfs_root *root, char *data, u32 seed, size_t len)
{
- int err;
- struct btrfs_root *root = BTRFS_I(bh->b_page->mapping->host)->root;
- err = btrfs_map_bh_to_logical(root, bh, iblock);
- return err;
+ return crc32c(seed, data, len);
}
-int btrfs_csum_data(struct btrfs_root * root, char *data, size_t len,
- char *result)
+void btrfs_csum_final(u32 crc, char *result)
{
- u32 crc;
- crc = crc32c(0, data, len);
- memcpy(result, &crc, BTRFS_CRC32_SIZE);
- return 0;
+ *(__le32 *)result = ~cpu_to_le32(crc);
}
-static int csum_tree_block(struct btrfs_root *root, struct buffer_head *bh,
+static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf,
int verify)
{
char result[BTRFS_CRC32_SIZE];
- int ret;
- struct btrfs_node *node;
+ unsigned long len;
+ unsigned long cur_len;
+ unsigned long offset = BTRFS_CSUM_SIZE;
+ char *map_token = NULL;
+ char *kaddr;
+ unsigned long map_start;
+ unsigned long map_len;
+ int err;
+ u32 crc = ~(u32)0;
+
+ len = buf->len - offset;
+ while(len > 0) {
+ err = map_private_extent_buffer(buf, offset, 32,
+ &map_token, &kaddr,
+ &map_start, &map_len, KM_USER0);
+ if (err) {
+ printk("failed to map extent buffer! %lu\n",
+ offset);
+ return 1;
+ }
+ cur_len = min(len, map_len - (offset - map_start));
+ crc = btrfs_csum_data(root, kaddr + offset - map_start,
+ crc, cur_len);
+ len -= cur_len;
+ offset += cur_len;
+ unmap_extent_buffer(buf, map_token, KM_USER0);
+ }
+ btrfs_csum_final(crc, result);
- ret = btrfs_csum_data(root, bh->b_data + BTRFS_CSUM_SIZE,
- bh->b_size - BTRFS_CSUM_SIZE, result);
- if (ret)
- return ret;
if (verify) {
- if (memcmp(bh->b_data, result, BTRFS_CRC32_SIZE)) {
+ if (memcmp_extent_buffer(buf, result, 0, BTRFS_CRC32_SIZE)) {
printk("btrfs: %s checksum verify failed on %llu\n",
root->fs_info->sb->s_id,
- (unsigned long long)bh_blocknr(bh));
+ buf->start);
return 1;
}
} else {
- node = btrfs_buffer_node(bh);
- memcpy(node->header.csum, result, BTRFS_CRC32_SIZE);
+ write_extent_buffer(buf, result, 0, BTRFS_CRC32_SIZE);
+ }
+ return 0;
+}
+
+
+int csum_dirty_buffer(struct btrfs_root *root, struct page *page)
+{
+ struct extent_map_tree *tree;
+ u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
+ u64 found_start;
+ int found_level;
+ unsigned long len;
+ struct extent_buffer *eb;
+ tree = &BTRFS_I(page->mapping->host)->extent_tree;
+
+ if (page->private == EXTENT_PAGE_PRIVATE)
+ goto out;
+ if (!page->private)
+ goto out;
+ len = page->private >> 2;
+ if (len == 0) {
+ WARN_ON(1);
}
+ eb = alloc_extent_buffer(tree, start, len, page, GFP_NOFS);
+ read_extent_buffer_pages(tree, eb, start + PAGE_CACHE_SIZE, 1);
+ found_start = btrfs_header_bytenr(eb);
+ if (found_start != start) {
+ printk("warning: eb start incorrect %Lu buffer %Lu len %lu\n",
+ start, found_start, len);
+ }
+ found_level = btrfs_header_level(eb);
+ csum_tree_block(root, eb, 0);
+ free_extent_buffer(eb);
+out:
+ return 0;
+}
+
+static int btree_writepage_io_hook(struct page *page, u64 start, u64 end)
+{
+ struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
+
+ csum_dirty_buffer(root, page);
return 0;
}
+static int btree_writepage(struct page *page, struct writeback_control *wbc)
+{
+ struct extent_map_tree *tree;
+ tree = &BTRFS_I(page->mapping->host)->extent_tree;
+ return extent_write_full_page(tree, page, btree_get_extent, wbc);
+}
+
+static int btree_writepages(struct address_space *mapping,
+ struct writeback_control *wbc)
+{
+ struct extent_map_tree *tree;
+ tree = &BTRFS_I(mapping->host)->extent_tree;
+ if (wbc->sync_mode == WB_SYNC_NONE && current_is_pdflush()) {
+ u64 num_dirty;
+ u64 start = 0;
+ unsigned long thresh = 96 * 1024 * 1024;
+ num_dirty = count_range_bits(tree, &start, thresh, EXTENT_DIRTY);
+ if (num_dirty < thresh) {
+ return 0;
+ }
+ }
+ return extent_writepages(tree, mapping, btree_get_extent, wbc);
+}
+
+int btree_readpage(struct file *file, struct page *page)
+{
+ struct extent_map_tree *tree;
+ tree = &BTRFS_I(page->mapping->host)->extent_tree;
+ return extent_read_full_page(tree, page, btree_get_extent);
+}
+
+static int btree_releasepage(struct page *page, gfp_t unused_gfp_flags)
+{
+ struct extent_map_tree *tree;
+ int ret;
+
+ tree = &BTRFS_I(page->mapping->host)->extent_tree;
+ ret = try_release_extent_mapping(tree, page);
+ if (ret == 1) {
+ ClearPagePrivate(page);
+ set_page_private(page, 0);
+ page_cache_release(page);
+ }
+ return ret;
+}
+
+static void btree_invalidatepage(struct page *page, unsigned long offset)
+{
+ struct extent_map_tree *tree;
+ tree = &BTRFS_I(page->mapping->host)->extent_tree;
+ extent_invalidatepage(tree, page, offset);
+ btree_releasepage(page, GFP_NOFS);
+}
+
+#if 0
static int btree_writepage(struct page *page, struct writeback_control *wbc)
{
struct buffer_head *bh;
} while (bh != head);
return block_write_full_page(page, btree_get_block, wbc);
}
-
-static int btree_readpage(struct file * file, struct page * page)
-{
- return block_read_full_page(page, btree_get_block);
-}
+#endif
static struct address_space_operations btree_aops = {
.readpage = btree_readpage,
.writepage = btree_writepage,
+ .writepages = btree_writepages,
+ .releasepage = btree_releasepage,
+ .invalidatepage = btree_invalidatepage,
.sync_page = block_sync_page,
};
-int readahead_tree_block(struct btrfs_root *root, u64 blocknr)
+int readahead_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize)
{
- struct buffer_head *bh = NULL;
+ struct extent_buffer *buf = NULL;
+ struct inode *btree_inode = root->fs_info->btree_inode;
int ret = 0;
- bh = btrfs_find_create_tree_block(root, blocknr);
- if (!bh)
+ buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
+ if (!buf)
return 0;
- if (buffer_uptodate(bh)) {
- ret = 1;
- goto done;
- }
- if (test_set_buffer_locked(bh)) {
- ret = 1;
- goto done;
- }
- if (!buffer_uptodate(bh)) {
- get_bh(bh);
- bh->b_end_io = end_buffer_read_sync;
- submit_bh(READ, bh);
- } else {
- unlock_buffer(bh);
- ret = 1;
- }
-done:
- brelse(bh);
+ read_extent_buffer_pages(&BTRFS_I(btree_inode)->extent_tree,
+ buf, 0, 0);
+ free_extent_buffer(buf);
return ret;
}
-struct buffer_head *read_tree_block(struct btrfs_root *root, u64 blocknr)
+struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
+ u32 blocksize)
{
- struct buffer_head *bh = NULL;
+ struct extent_buffer *buf = NULL;
+ struct inode *btree_inode = root->fs_info->btree_inode;
+ struct extent_map_tree *extent_tree;
+ int ret;
- bh = btrfs_find_create_tree_block(root, blocknr);
- if (!bh)
- return bh;
- if (buffer_uptodate(bh))
- goto uptodate;
- lock_buffer(bh);
- if (!buffer_uptodate(bh)) {
- get_bh(bh);
- bh->b_end_io = end_buffer_read_sync;
- submit_bh(READ, bh);
- wait_on_buffer(bh);
- if (!buffer_uptodate(bh))
- goto fail;
- } else {
- unlock_buffer(bh);
+ extent_tree = &BTRFS_I(btree_inode)->extent_tree;
+
+ buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
+ if (!buf)
+ return NULL;
+ read_extent_buffer_pages(&BTRFS_I(btree_inode)->extent_tree,
+ buf, 0, 1);
+ if (buf->flags & EXTENT_CSUM) {
+ return buf;
}
-uptodate:
- if (!buffer_checked(bh)) {
- csum_tree_block(root, bh, 1);
- set_buffer_checked(bh);
+ if (test_range_bit(extent_tree, buf->start, buf->start + buf->len - 1,
+ EXTENT_CSUM, 1)) {
+ buf->flags |= EXTENT_CSUM;
+ return buf;
}
- if (check_tree_block(root, bh))
- goto fail;
- return bh;
-fail:
- brelse(bh);
- return NULL;
+ ret = csum_tree_block(root, buf, 1);
+ set_extent_bits(extent_tree, buf->start,
+ buf->start + buf->len - 1,
+ EXTENT_CSUM, GFP_NOFS);
+ buf->flags |= EXTENT_CSUM;
+ return buf;
}
int clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root,
- struct buffer_head *buf)
+ struct extent_buffer *buf)
{
- WARN_ON(atomic_read(&buf->b_count) == 0);
- clear_buffer_dirty(buf);
+ struct inode *btree_inode = root->fs_info->btree_inode;
+ clear_extent_buffer_dirty(&BTRFS_I(btree_inode)->extent_tree, buf);
return 0;
}
-static int __setup_root(int blocksize,
+int wait_on_tree_block_writeback(struct btrfs_root *root,
+ struct extent_buffer *buf)
+{
+ struct inode *btree_inode = root->fs_info->btree_inode;
+ wait_on_extent_buffer_writeback(&BTRFS_I(btree_inode)->extent_tree,
+ buf);
+ return 0;
+}
+
+static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
struct btrfs_root *root,
struct btrfs_fs_info *fs_info,
u64 objectid)
root->node = NULL;
root->inode = NULL;
root->commit_root = NULL;
- root->blocksize = blocksize;
+ root->sectorsize = sectorsize;
+ root->nodesize = nodesize;
+ root->leafsize = leafsize;
root->ref_cows = 0;
root->fs_info = fs_info;
root->objectid = objectid;
root->last_trans = 0;
root->highest_inode = 0;
root->last_inode_alloc = 0;
+ root->name = NULL;
memset(&root->root_key, 0, sizeof(root->root_key));
memset(&root->root_item, 0, sizeof(root->root_item));
+ memset(&root->defrag_progress, 0, sizeof(root->defrag_progress));
+ memset(&root->root_kobj, 0, sizeof(root->root_kobj));
+ init_completion(&root->kobj_unregister);
+ init_rwsem(&root->snap_sem);
+ root->defrag_running = 0;
+ root->defrag_level = 0;
root->root_key.objectid = objectid;
return 0;
}
-static int find_and_setup_root(int blocksize,
- struct btrfs_root *tree_root,
+static int find_and_setup_root(struct btrfs_root *tree_root,
struct btrfs_fs_info *fs_info,
u64 objectid,
struct btrfs_root *root)
{
int ret;
+ u32 blocksize;
- __setup_root(blocksize, root, fs_info, objectid);
+ __setup_root(tree_root->nodesize, tree_root->leafsize,
+ tree_root->sectorsize, root, fs_info, objectid);
ret = btrfs_find_last_root(tree_root, objectid,
&root->root_item, &root->root_key);
BUG_ON(ret);
- root->node = read_tree_block(root,
- btrfs_root_blocknr(&root->root_item));
+ blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item));
+ root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
+ blocksize);
BUG_ON(!root->node);
return 0;
}
struct btrfs_root *root;
struct btrfs_root *tree_root = fs_info->tree_root;
struct btrfs_path *path;
- struct btrfs_leaf *l;
+ struct extent_buffer *l;
u64 highest_inode;
+ u32 blocksize;
int ret = 0;
root = kzalloc(sizeof(*root), GFP_NOFS);
if (!root)
return ERR_PTR(-ENOMEM);
if (location->offset == (u64)-1) {
- ret = find_and_setup_root(fs_info->sb->s_blocksize,
- fs_info->tree_root, fs_info,
+ ret = find_and_setup_root(tree_root, fs_info,
location->objectid, root);
if (ret) {
kfree(root);
goto insert;
}
- __setup_root(fs_info->sb->s_blocksize, root, fs_info,
+ __setup_root(tree_root->nodesize, tree_root->leafsize,
+ tree_root->sectorsize, root, fs_info,
location->objectid);
path = btrfs_alloc_path();
ret = -ENOENT;
goto out;
}
- l = btrfs_buffer_leaf(path->nodes[0]);
- memcpy(&root->root_item,
- btrfs_item_ptr(l, path->slots[0], struct btrfs_root_item),
+ l = path->nodes[0];
+ read_extent_buffer(l, &root->root_item,
+ btrfs_item_ptr_offset(l, path->slots[0]),
sizeof(root->root_item));
memcpy(&root->root_key, location, sizeof(*location));
ret = 0;
kfree(root);
return ERR_PTR(ret);
}
- root->node = read_tree_block(root,
- btrfs_root_blocknr(&root->root_item));
+ blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item));
+ root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
+ blocksize);
BUG_ON(!root->node);
insert:
root->ref_cows = 1;
}
struct btrfs_root *btrfs_read_fs_root(struct btrfs_fs_info *fs_info,
- struct btrfs_key *location)
+ struct btrfs_key *location,
+ const char *name, int namelen)
{
struct btrfs_root *root;
int ret;
(unsigned long)root->root_key.objectid,
root);
if (ret) {
- brelse(root->node);
+ free_extent_buffer(root->node);
+ kfree(root);
+ return ERR_PTR(ret);
+ }
+
+ ret = btrfs_set_root_name(root, name, namelen);
+ if (ret) {
+ free_extent_buffer(root->node);
+ kfree(root);
+ return ERR_PTR(ret);
+ }
+
+ ret = btrfs_sysfs_add_root(root);
+ if (ret) {
+ free_extent_buffer(root->node);
+ kfree(root->name);
kfree(root);
return ERR_PTR(ret);
}
+
+ ret = btrfs_find_dead_roots(fs_info->tree_root,
+ root->root_key.objectid, root);
+ BUG_ON(ret);
+
return root;
}
-
+#if 0
+static int add_hasher(struct btrfs_fs_info *info, char *type) {
+ struct btrfs_hasher *hasher;
+
+ hasher = kmalloc(sizeof(*hasher), GFP_NOFS);
+ if (!hasher)
+ return -ENOMEM;
+ hasher->hash_tfm = crypto_alloc_hash(type, 0, CRYPTO_ALG_ASYNC);
+ if (!hasher->hash_tfm) {
+ kfree(hasher);
+ return -EINVAL;
+ }
+ spin_lock(&info->hash_lock);
+ list_add(&hasher->list, &info->hashers);
+ spin_unlock(&info->hash_lock);
+ return 0;
+}
+#endif
struct btrfs_root *open_ctree(struct super_block *sb)
{
+ u32 sectorsize;
+ u32 nodesize;
+ u32 leafsize;
+ u32 blocksize;
struct btrfs_root *extent_root = kmalloc(sizeof(struct btrfs_root),
GFP_NOFS);
struct btrfs_root *tree_root = kmalloc(sizeof(struct btrfs_root),
err = -ENOMEM;
goto fail;
}
- init_bit_radix(&fs_info->pinned_radix);
- init_bit_radix(&fs_info->pending_del_radix);
- init_bit_radix(&fs_info->extent_map_radix);
INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_NOFS);
- INIT_RADIX_TREE(&fs_info->block_group_radix, GFP_KERNEL);
- INIT_RADIX_TREE(&fs_info->block_group_data_radix, GFP_KERNEL);
INIT_LIST_HEAD(&fs_info->trans_list);
INIT_LIST_HEAD(&fs_info->dead_roots);
+ INIT_LIST_HEAD(&fs_info->hashers);
+ spin_lock_init(&fs_info->hash_lock);
+
+ memset(&fs_info->super_kobj, 0, sizeof(fs_info->super_kobj));
+ init_completion(&fs_info->kobj_unregister);
sb_set_blocksize(sb, 4096);
fs_info->running_transaction = NULL;
+ fs_info->last_trans_committed = 0;
fs_info->tree_root = tree_root;
fs_info->extent_root = extent_root;
fs_info->sb = sb;
fs_info->btree_inode->i_nlink = 1;
fs_info->btree_inode->i_size = sb->s_bdev->bd_inode->i_size;
fs_info->btree_inode->i_mapping->a_ops = &btree_aops;
+ extent_map_tree_init(&BTRFS_I(fs_info->btree_inode)->extent_tree,
+ fs_info->btree_inode->i_mapping,
+ GFP_NOFS);
+ BTRFS_I(fs_info->btree_inode)->extent_tree.ops = &btree_extent_map_ops;
+
+ extent_map_tree_init(&fs_info->free_space_cache,
+ fs_info->btree_inode->i_mapping, GFP_NOFS);
+ extent_map_tree_init(&fs_info->block_group_cache,
+ fs_info->btree_inode->i_mapping, GFP_NOFS);
+ extent_map_tree_init(&fs_info->pinned_extents,
+ fs_info->btree_inode->i_mapping, GFP_NOFS);
+ extent_map_tree_init(&fs_info->pending_del,
+ fs_info->btree_inode->i_mapping, GFP_NOFS);
+ extent_map_tree_init(&fs_info->extent_ins,
+ fs_info->btree_inode->i_mapping, GFP_NOFS);
fs_info->do_barriers = 1;
- fs_info->extent_tree_insert_nr = 0;
- fs_info->extent_tree_prealloc_nr = 0;
fs_info->closing = 0;
+ fs_info->total_pinned = 0;
INIT_DELAYED_WORK(&fs_info->trans_work, btrfs_transaction_cleaner);
BTRFS_I(fs_info->btree_inode)->root = tree_root;
mutex_init(&fs_info->trans_mutex);
mutex_init(&fs_info->fs_mutex);
- __setup_root(sb->s_blocksize, tree_root,
+#if 0
+ ret = add_hasher(fs_info, "crc32c");
+ if (ret) {
+ printk("btrfs: failed hash setup, modprobe cryptomgr?\n");
+ err = -ENOMEM;
+ goto fail_iput;
+ }
+#endif
+ __setup_root(512, 512, 512, tree_root,
fs_info, BTRFS_ROOT_TREE_OBJECTID);
fs_info->sb_buffer = read_tree_block(tree_root,
- BTRFS_SUPER_INFO_OFFSET /
- sb->s_blocksize);
+ BTRFS_SUPER_INFO_OFFSET,
+ 512);
if (!fs_info->sb_buffer)
goto fail_iput;
- disk_super = (struct btrfs_super_block *)fs_info->sb_buffer->b_data;
- fs_info->disk_super = disk_super;
- memcpy(&fs_info->super_copy, disk_super, sizeof(fs_info->super_copy));
+ read_extent_buffer(fs_info->sb_buffer, &fs_info->super_copy, 0,
+ sizeof(fs_info->super_copy));
+
+ read_extent_buffer(fs_info->sb_buffer, fs_info->fsid,
+ (unsigned long)btrfs_super_fsid(fs_info->sb_buffer),
+ BTRFS_FSID_SIZE);
+ disk_super = &fs_info->super_copy;
if (!btrfs_super_root(disk_super))
goto fail_sb_buffer;
- i_size_write(fs_info->btree_inode,
- btrfs_super_total_blocks(disk_super) <<
- fs_info->btree_inode->i_blkbits);
+ nodesize = btrfs_super_nodesize(disk_super);
+ leafsize = btrfs_super_leafsize(disk_super);
+ sectorsize = btrfs_super_sectorsize(disk_super);
+ tree_root->nodesize = nodesize;
+ tree_root->leafsize = leafsize;
+ tree_root->sectorsize = sectorsize;
+ sb_set_blocksize(sb, sectorsize);
+ i_size_write(fs_info->btree_inode,
+ btrfs_super_total_bytes(disk_super));
if (strncmp((char *)(&disk_super->magic), BTRFS_MAGIC,
sizeof(disk_super->magic))) {
printk("btrfs: valid FS not found on %s\n", sb->s_id);
goto fail_sb_buffer;
}
+
+ blocksize = btrfs_level_size(tree_root,
+ btrfs_super_root_level(disk_super));
+
tree_root->node = read_tree_block(tree_root,
- btrfs_super_root(disk_super));
+ btrfs_super_root(disk_super),
+ blocksize);
if (!tree_root->node)
goto fail_sb_buffer;
mutex_lock(&fs_info->fs_mutex);
- ret = find_and_setup_root(sb->s_blocksize, tree_root, fs_info,
+
+ ret = find_and_setup_root(tree_root, fs_info,
BTRFS_EXTENT_TREE_OBJECTID, extent_root);
if (ret) {
mutex_unlock(&fs_info->fs_mutex);
btrfs_read_block_groups(extent_root);
fs_info->generation = btrfs_super_generation(disk_super) + 1;
- ret = btrfs_find_dead_roots(tree_root);
- if (ret)
- goto fail_tree_root;
mutex_unlock(&fs_info->fs_mutex);
return tree_root;
fail_tree_root:
- btrfs_block_release(tree_root, tree_root->node);
+ free_extent_buffer(tree_root->node);
fail_sb_buffer:
- btrfs_block_release(tree_root, fs_info->sb_buffer);
+ free_extent_buffer(fs_info->sb_buffer);
fail_iput:
iput(fs_info->btree_inode);
fail:
*root)
{
int ret;
- struct buffer_head *bh = root->fs_info->sb_buffer;
-
- lock_buffer(bh);
- WARN_ON(atomic_read(&bh->b_count) < 1);
- clear_buffer_dirty(bh);
- csum_tree_block(root, bh, 0);
- bh->b_end_io = end_buffer_write_sync;
- get_bh(bh);
- if (root->fs_info->do_barriers)
- ret = submit_bh(WRITE_BARRIER, bh);
- else
- ret = submit_bh(WRITE, bh);
- if (ret == -EOPNOTSUPP) {
- get_bh(bh);
- lock_buffer(bh);
- set_buffer_uptodate(bh);
- root->fs_info->do_barriers = 0;
- ret = submit_bh(WRITE, bh);
- }
- wait_on_buffer(bh);
- if (!buffer_uptodate(bh)) {
- WARN_ON(1);
- return -EIO;
- }
- return 0;
+ struct extent_buffer *super = root->fs_info->sb_buffer;
+ struct inode *btree_inode = root->fs_info->btree_inode;
+
+ set_extent_buffer_dirty(&BTRFS_I(btree_inode)->extent_tree, super);
+ ret = sync_page_range_nolock(btree_inode, btree_inode->i_mapping,
+ super->start, super->len);
+ return ret;
}
int btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
{
radix_tree_delete(&fs_info->fs_roots_radix,
(unsigned long)root->root_key.objectid);
+ btrfs_sysfs_del_root(root);
if (root->inode)
iput(root->inode);
if (root->node)
- brelse(root->node);
+ free_extent_buffer(root->node);
if (root->commit_root)
- brelse(root->commit_root);
+ free_extent_buffer(root->commit_root);
+ if (root->name)
+ kfree(root->name);
kfree(root);
return 0;
}
fs_info->closing = 1;
btrfs_transaction_flush_work(root);
mutex_lock(&fs_info->fs_mutex);
+ btrfs_defrag_dirty_roots(root->fs_info);
trans = btrfs_start_transaction(root, 1);
ret = btrfs_commit_transaction(trans, root);
/* run commit again to drop the original snapshot */
mutex_unlock(&fs_info->fs_mutex);
if (fs_info->extent_root->node)
- btrfs_block_release(fs_info->extent_root,
- fs_info->extent_root->node);
+ free_extent_buffer(fs_info->extent_root->node);
+
if (fs_info->tree_root->node)
- btrfs_block_release(fs_info->tree_root,
- fs_info->tree_root->node);
- btrfs_block_release(root, fs_info->sb_buffer);
- truncate_inode_pages(fs_info->btree_inode->i_mapping, 0);
- iput(fs_info->btree_inode);
+ free_extent_buffer(fs_info->tree_root->node);
+
+ free_extent_buffer(fs_info->sb_buffer);
btrfs_free_block_groups(root->fs_info);
del_fs_roots(fs_info);
+ extent_map_tree_empty_lru(&BTRFS_I(fs_info->btree_inode)->extent_tree);
+ truncate_inode_pages(fs_info->btree_inode->i_mapping, 0);
+ iput(fs_info->btree_inode);
+#if 0
+ while(!list_empty(&fs_info->hashers)) {
+ struct btrfs_hasher *hasher;
+ hasher = list_entry(fs_info->hashers.next, struct btrfs_hasher,
+ hashers);
+ list_del(&hasher->hashers);
+ crypto_free_hash(&fs_info->hash_tfm);
+ kfree(hasher);
+ }
+#endif
kfree(fs_info->extent_root);
kfree(fs_info->tree_root);
return 0;
}
-void btrfs_mark_buffer_dirty(struct buffer_head *bh)
+int btrfs_buffer_uptodate(struct extent_buffer *buf)
+{
+ struct inode *btree_inode = buf->first_page->mapping->host;
+ return extent_buffer_uptodate(&BTRFS_I(btree_inode)->extent_tree, buf);
+}
+
+int btrfs_set_buffer_uptodate(struct extent_buffer *buf)
+{
+ struct inode *btree_inode = buf->first_page->mapping->host;
+ return set_extent_buffer_uptodate(&BTRFS_I(btree_inode)->extent_tree,
+ buf);
+}
+
+void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
{
- struct btrfs_root *root = BTRFS_I(bh->b_page->mapping->host)->root;
- u64 transid = btrfs_header_generation(btrfs_buffer_header(bh));
- WARN_ON(!atomic_read(&bh->b_count));
+ struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;
+ u64 transid = btrfs_header_generation(buf);
+ struct inode *btree_inode = root->fs_info->btree_inode;
+
if (transid != root->fs_info->generation) {
printk(KERN_CRIT "transid mismatch buffer %llu, found %Lu running %Lu\n",
- (unsigned long long)bh->b_blocknr,
+ (unsigned long long)buf->start,
transid, root->fs_info->generation);
WARN_ON(1);
}
- mark_buffer_dirty(bh);
+ set_extent_buffer_dirty(&BTRFS_I(btree_inode)->extent_tree, buf);
+}
+
+void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr)
+{
+ balance_dirty_pages_ratelimited_nr(
+ root->fs_info->btree_inode->i_mapping, 1);
}
-void btrfs_block_release(struct btrfs_root *root, struct buffer_head *buf)
+void btrfs_set_buffer_defrag(struct extent_buffer *buf)
{
- brelse(buf);
+ struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;
+ struct inode *btree_inode = root->fs_info->btree_inode;
+ set_extent_bits(&BTRFS_I(btree_inode)->extent_tree, buf->start,
+ buf->start + buf->len - 1, EXTENT_DEFRAG, GFP_NOFS);
}
-void btrfs_btree_balance_dirty(struct btrfs_root *root)
+void btrfs_set_buffer_defrag_done(struct extent_buffer *buf)
{
- balance_dirty_pages_ratelimited(root->fs_info->btree_inode->i_mapping);
+ struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;
+ struct inode *btree_inode = root->fs_info->btree_inode;
+ set_extent_bits(&BTRFS_I(btree_inode)->extent_tree, buf->start,
+ buf->start + buf->len - 1, EXTENT_DEFRAG_DONE,
+ GFP_NOFS);
}
+
+int btrfs_buffer_defrag(struct extent_buffer *buf)
+{
+ struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;
+ struct inode *btree_inode = root->fs_info->btree_inode;
+ return test_range_bit(&BTRFS_I(btree_inode)->extent_tree,
+ buf->start, buf->start + buf->len - 1, EXTENT_DEFRAG, 0);
+}
+
+int btrfs_buffer_defrag_done(struct extent_buffer *buf)
+{
+ struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;
+ struct inode *btree_inode = root->fs_info->btree_inode;
+ return test_range_bit(&BTRFS_I(btree_inode)->extent_tree,
+ buf->start, buf->start + buf->len - 1,
+ EXTENT_DEFRAG_DONE, 0);
+}
+
+int btrfs_clear_buffer_defrag_done(struct extent_buffer *buf)
+{
+ struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;
+ struct inode *btree_inode = root->fs_info->btree_inode;
+ return clear_extent_bits(&BTRFS_I(btree_inode)->extent_tree,
+ buf->start, buf->start + buf->len - 1,
+ EXTENT_DEFRAG_DONE, GFP_NOFS);
+}
+
+int btrfs_clear_buffer_defrag(struct extent_buffer *buf)
+{
+ struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;
+ struct inode *btree_inode = root->fs_info->btree_inode;
+ return clear_extent_bits(&BTRFS_I(btree_inode)->extent_tree,
+ buf->start, buf->start + buf->len - 1,
+ EXTENT_DEFRAG, GFP_NOFS);
+}
+
+int btrfs_read_buffer(struct extent_buffer *buf)
+{
+ struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;
+ struct inode *btree_inode = root->fs_info->btree_inode;
+ return read_extent_buffer_pages(&BTRFS_I(btree_inode)->extent_tree,
+ buf, 0, 1);
+}
+
+static struct extent_map_ops btree_extent_map_ops = {
+ .writepage_io_hook = btree_writepage_io_hook,
+};