}
#endif
+static struct extent_map_ops btree_extent_map_ops;
+
struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root,
u64 bytenr, u32 blocksize)
{
int csum_dirty_buffer(struct btrfs_root *root, struct page *page)
{
struct extent_map_tree *tree;
- u64 start = page->index << PAGE_CACHE_SHIFT;
+ u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
u64 found_start;
int found_level;
unsigned long len;
return 0;
}
-static int btree_writepage(struct page *page, struct writeback_control *wbc)
+static int btree_writepage_io_hook(struct page *page, u64 start, u64 end)
{
- struct extent_map_tree *tree;
struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
- tree = &BTRFS_I(page->mapping->host)->extent_tree;
csum_dirty_buffer(root, page);
+ return 0;
+}
+
+static int btree_writepage(struct page *page, struct writeback_control *wbc)
+{
+ struct extent_map_tree *tree;
+ tree = &BTRFS_I(page->mapping->host)->extent_tree;
return extent_write_full_page(tree, page, btree_get_extent, wbc);
}
+
+static int btree_writepages(struct address_space *mapping,
+ struct writeback_control *wbc)
+{
+ struct extent_map_tree *tree;
+ tree = &BTRFS_I(mapping->host)->extent_tree;
+ if (wbc->sync_mode == WB_SYNC_NONE) {
+ u64 num_dirty;
+ u64 start = 0;
+ unsigned long thresh = 96 * 1024 * 1024;
+
+ if (wbc->for_kupdate)
+ return 0;
+
+ if (current_is_pdflush()) {
+ thresh = 96 * 1024 * 1024;
+ } else {
+ thresh = 8 * 1024 * 1024;
+ }
+ num_dirty = count_range_bits(tree, &start, thresh, EXTENT_DIRTY);
+ if (num_dirty < thresh) {
+ return 0;
+ }
+ }
+ return extent_writepages(tree, mapping, btree_get_extent, wbc);
+}
+
int btree_readpage(struct file *file, struct page *page)
{
struct extent_map_tree *tree;
static struct address_space_operations btree_aops = {
.readpage = btree_readpage,
.writepage = btree_writepage,
+ .writepages = btree_writepages,
.releasepage = btree_releasepage,
.invalidatepage = btree_invalidatepage,
.sync_page = block_sync_page,
}
static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
- struct btrfs_root *root,
+ u32 stripesize, struct btrfs_root *root,
struct btrfs_fs_info *fs_info,
u64 objectid)
{
root->sectorsize = sectorsize;
root->nodesize = nodesize;
root->leafsize = leafsize;
+ root->stripesize = stripesize;
root->ref_cows = 0;
root->fs_info = fs_info;
root->objectid = objectid;
u32 blocksize;
__setup_root(tree_root->nodesize, tree_root->leafsize,
- tree_root->sectorsize, root, fs_info, objectid);
+ tree_root->sectorsize, tree_root->stripesize,
+ root, fs_info, objectid);
ret = btrfs_find_last_root(tree_root, objectid,
&root->root_item, &root->root_key);
BUG_ON(ret);
}
__setup_root(tree_root->nodesize, tree_root->leafsize,
- tree_root->sectorsize, root, fs_info,
- location->objectid);
+ tree_root->sectorsize, tree_root->stripesize,
+ root, fs_info, location->objectid);
path = btrfs_alloc_path();
BUG_ON(!path);
read_extent_buffer(l, &root->root_item,
btrfs_item_ptr_offset(l, path->slots[0]),
sizeof(root->root_item));
+ memcpy(&root->root_key, location, sizeof(*location));
ret = 0;
out:
btrfs_release_path(root, path);
u32 nodesize;
u32 leafsize;
u32 blocksize;
+ u32 stripesize;
struct btrfs_root *extent_root = kmalloc(sizeof(struct btrfs_root),
GFP_NOFS);
struct btrfs_root *tree_root = kmalloc(sizeof(struct btrfs_root),
fs_info->tree_root = tree_root;
fs_info->extent_root = extent_root;
fs_info->sb = sb;
+ fs_info->mount_opt = 0;
+ fs_info->max_extent = (u64)-1;
fs_info->btree_inode = new_inode(sb);
fs_info->btree_inode->i_ino = 1;
fs_info->btree_inode->i_nlink = 1;
extent_map_tree_init(&BTRFS_I(fs_info->btree_inode)->extent_tree,
fs_info->btree_inode->i_mapping,
GFP_NOFS);
+ BTRFS_I(fs_info->btree_inode)->extent_tree.ops = &btree_extent_map_ops;
+
extent_map_tree_init(&fs_info->free_space_cache,
fs_info->btree_inode->i_mapping, GFP_NOFS);
extent_map_tree_init(&fs_info->block_group_cache,
fs_info->btree_inode->i_mapping, GFP_NOFS);
fs_info->do_barriers = 1;
fs_info->closing = 0;
-
+ fs_info->total_pinned = 0;
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
+ INIT_WORK(&fs_info->trans_work, btrfs_transaction_cleaner, fs_info);
+#else
INIT_DELAYED_WORK(&fs_info->trans_work, btrfs_transaction_cleaner);
+#endif
BTRFS_I(fs_info->btree_inode)->root = tree_root;
memset(&BTRFS_I(fs_info->btree_inode)->location, 0,
sizeof(struct btrfs_key));
goto fail_iput;
}
#endif
- __setup_root(512, 512, 512, tree_root,
+ __setup_root(512, 512, 512, 512, tree_root,
fs_info, BTRFS_ROOT_TREE_OBJECTID);
fs_info->sb_buffer = read_tree_block(tree_root,
nodesize = btrfs_super_nodesize(disk_super);
leafsize = btrfs_super_leafsize(disk_super);
sectorsize = btrfs_super_sectorsize(disk_super);
+ stripesize = btrfs_super_stripesize(disk_super);
tree_root->nodesize = nodesize;
tree_root->leafsize = leafsize;
tree_root->sectorsize = sectorsize;
+ tree_root->stripesize = stripesize;
sb_set_blocksize(sb, sectorsize);
i_size_write(fs_info->btree_inode,
btrfs_free_block_groups(root->fs_info);
del_fs_roots(fs_info);
+
+ filemap_write_and_wait(fs_info->btree_inode->i_mapping);
+
+ extent_map_tree_empty_lru(&fs_info->free_space_cache);
+ extent_map_tree_empty_lru(&fs_info->block_group_cache);
+ extent_map_tree_empty_lru(&fs_info->pinned_extents);
+ extent_map_tree_empty_lru(&fs_info->pending_del);
+ extent_map_tree_empty_lru(&fs_info->extent_ins);
extent_map_tree_empty_lru(&BTRFS_I(fs_info->btree_inode)->extent_tree);
+
truncate_inode_pages(fs_info->btree_inode->i_mapping, 0);
+
iput(fs_info->btree_inode);
#if 0
while(!list_empty(&fs_info->hashers)) {
return read_extent_buffer_pages(&BTRFS_I(btree_inode)->extent_tree,
buf, 0, 1);
}
+
+static struct extent_map_ops btree_extent_map_ops = {
+ .writepage_io_hook = btree_writepage_io_hook,
+};