1 #include <linux/module.h>
3 #include <linux/blkdev.h>
4 #include <linux/crypto.h>
5 #include <linux/scatterlist.h>
6 #include <linux/swap.h>
9 #include "transaction.h"
11 static int check_tree_block(struct btrfs_root *root, struct buffer_head *buf)
13 struct btrfs_node *node = btrfs_buffer_node(buf);
14 if (buf->b_blocknr != btrfs_header_blocknr(&node->header)) {
17 if (root->node && btrfs_header_parentid(&node->header) !=
18 btrfs_header_parentid(btrfs_buffer_header(root->node))) {
24 struct buffer_head *btrfs_find_tree_block(struct btrfs_root *root, u64 blocknr)
26 struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
27 int blockbits = root->fs_info->sb->s_blocksize_bits;
28 unsigned long index = blocknr >> (PAGE_CACHE_SHIFT - blockbits);
30 struct buffer_head *bh;
31 struct buffer_head *head;
32 struct buffer_head *ret = NULL;
35 page = find_lock_page(mapping, index);
39 if (!page_has_buffers(page))
42 head = page_buffers(page);
45 if (buffer_mapped(bh) && bh->b_blocknr == blocknr) {
57 page_cache_release(page);
61 struct buffer_head *btrfs_find_create_tree_block(struct btrfs_root *root,
64 struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
65 int blockbits = root->fs_info->sb->s_blocksize_bits;
66 unsigned long index = blocknr >> (PAGE_CACHE_SHIFT - blockbits);
68 struct buffer_head *bh;
69 struct buffer_head *head;
70 struct buffer_head *ret = NULL;
71 u64 first_block = index << (PAGE_CACHE_SHIFT - blockbits);
73 page = grab_cache_page(mapping, index);
77 if (!page_has_buffers(page))
78 create_empty_buffers(page, root->fs_info->sb->s_blocksize, 0);
79 head = page_buffers(page);
82 if (!buffer_mapped(bh)) {
83 bh->b_bdev = root->fs_info->sb->s_bdev;
84 bh->b_blocknr = first_block;
85 set_buffer_mapped(bh);
87 if (bh->b_blocknr == blocknr) {
99 page_cache_release(page);
103 static sector_t max_block(struct block_device *bdev)
105 sector_t retval = ~((sector_t)0);
106 loff_t sz = i_size_read(bdev->bd_inode);
109 unsigned int size = block_size(bdev);
110 unsigned int sizebits = blksize_bits(size);
111 retval = (sz >> sizebits);
116 static int btree_get_block(struct inode *inode, sector_t iblock,
117 struct buffer_head *bh, int create)
119 if (iblock >= max_block(inode->i_sb->s_bdev)) {
124 * for reads, we're just trying to fill a partial page.
125 * return a hole, they will have to call get_block again
126 * before they can fill it, and they will get -EIO at that
131 bh->b_bdev = inode->i_sb->s_bdev;
132 bh->b_blocknr = iblock;
133 set_buffer_mapped(bh);
137 int btrfs_csum_data(struct btrfs_root * root, char *data, size_t len,
140 struct scatterlist sg;
141 struct crypto_hash *tfm = root->fs_info->hash_tfm;
142 struct hash_desc desc;
147 sg_init_one(&sg, data, len);
148 spin_lock(&root->fs_info->hash_lock);
149 ret = crypto_hash_digest(&desc, &sg, 1, result);
150 spin_unlock(&root->fs_info->hash_lock);
152 printk("sha256 digest failed\n");
156 static int csum_tree_block(struct btrfs_root *root, struct buffer_head *bh,
159 char result[BTRFS_CSUM_SIZE];
161 struct btrfs_node *node;
164 ret = btrfs_csum_data(root, bh->b_data + BTRFS_CSUM_SIZE,
165 bh->b_size - BTRFS_CSUM_SIZE, result);
169 if (memcmp(bh->b_data, result, BTRFS_CSUM_SIZE)) {
170 printk("checksum verify failed on %lu\n",
175 node = btrfs_buffer_node(bh);
176 memcpy(node->header.csum, result, BTRFS_CSUM_SIZE);
181 static int btree_writepage(struct page *page, struct writeback_control *wbc)
183 struct buffer_head *bh;
184 struct btrfs_root *root = btrfs_sb(page->mapping->host->i_sb);
185 struct buffer_head *head;
186 if (!page_has_buffers(page)) {
187 create_empty_buffers(page, root->fs_info->sb->s_blocksize,
188 (1 << BH_Dirty)|(1 << BH_Uptodate));
190 head = page_buffers(page);
193 if (buffer_dirty(bh))
194 csum_tree_block(root, bh, 0);
195 bh = bh->b_this_page;
196 } while (bh != head);
197 return block_write_full_page(page, btree_get_block, wbc);
200 static int btree_readpage(struct file * file, struct page * page)
202 return block_read_full_page(page, btree_get_block);
205 static struct address_space_operations btree_aops = {
206 .readpage = btree_readpage,
207 .writepage = btree_writepage,
208 .sync_page = block_sync_page,
211 struct buffer_head *read_tree_block(struct btrfs_root *root, u64 blocknr)
213 struct buffer_head *bh = NULL;
215 bh = btrfs_find_create_tree_block(root, blocknr);
219 if (!buffer_uptodate(bh)) {
221 bh->b_end_io = end_buffer_read_sync;
224 if (!buffer_uptodate(bh))
226 csum_tree_block(root, bh, 1);
230 if (check_tree_block(root, bh))
238 int dirty_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root,
239 struct buffer_head *buf)
241 WARN_ON(atomic_read(&buf->b_count) == 0);
242 mark_buffer_dirty(buf);
246 int clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root,
247 struct buffer_head *buf)
249 WARN_ON(atomic_read(&buf->b_count) == 0);
250 clear_buffer_dirty(buf);
254 static int __setup_root(int blocksize,
255 struct btrfs_root *root,
256 struct btrfs_fs_info *fs_info,
260 root->commit_root = NULL;
261 root->blocksize = blocksize;
263 root->fs_info = fs_info;
264 memset(&root->root_key, 0, sizeof(root->root_key));
265 memset(&root->root_item, 0, sizeof(root->root_item));
269 static int find_and_setup_root(int blocksize,
270 struct btrfs_root *tree_root,
271 struct btrfs_fs_info *fs_info,
273 struct btrfs_root *root)
277 __setup_root(blocksize, root, fs_info, objectid);
278 ret = btrfs_find_last_root(tree_root, objectid,
279 &root->root_item, &root->root_key);
282 root->node = read_tree_block(root,
283 btrfs_root_blocknr(&root->root_item));
288 struct btrfs_root *open_ctree(struct super_block *sb)
290 struct btrfs_root *root = kmalloc(sizeof(struct btrfs_root),
292 struct btrfs_root *extent_root = kmalloc(sizeof(struct btrfs_root),
294 struct btrfs_root *tree_root = kmalloc(sizeof(struct btrfs_root),
296 struct btrfs_root *inode_root = kmalloc(sizeof(struct btrfs_root),
298 struct btrfs_fs_info *fs_info = kmalloc(sizeof(*fs_info),
301 struct btrfs_super_block *disk_super;
303 init_bit_radix(&fs_info->pinned_radix);
304 init_bit_radix(&fs_info->pending_del_radix);
305 sb_set_blocksize(sb, 4096);
306 fs_info->running_transaction = NULL;
307 fs_info->fs_root = root;
308 fs_info->tree_root = tree_root;
309 fs_info->extent_root = extent_root;
310 fs_info->inode_root = inode_root;
311 fs_info->last_inode_alloc = 0;
312 fs_info->last_inode_alloc_dirid = 0;
314 fs_info->btree_inode = new_inode(sb);
315 fs_info->btree_inode->i_ino = 1;
316 fs_info->btree_inode->i_nlink = 1;
317 fs_info->btree_inode->i_size = sb->s_bdev->bd_inode->i_size;
318 fs_info->btree_inode->i_mapping->a_ops = &btree_aops;
319 insert_inode_hash(fs_info->btree_inode);
320 mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS);
321 fs_info->hash_tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
322 spin_lock_init(&fs_info->hash_lock);
323 if (!fs_info->hash_tfm || IS_ERR(fs_info->hash_tfm)) {
324 printk("failed to allocate sha256 hash\n");
327 mutex_init(&fs_info->trans_mutex);
328 mutex_init(&fs_info->fs_mutex);
329 memset(&fs_info->current_insert, 0, sizeof(fs_info->current_insert));
330 memset(&fs_info->last_insert, 0, sizeof(fs_info->last_insert));
332 __setup_root(sb->s_blocksize, tree_root,
333 fs_info, BTRFS_ROOT_TREE_OBJECTID);
334 fs_info->sb_buffer = read_tree_block(tree_root,
335 BTRFS_SUPER_INFO_OFFSET /
338 if (!fs_info->sb_buffer) {
342 disk_super = (struct btrfs_super_block *)fs_info->sb_buffer->b_data;
343 if (!btrfs_super_root(disk_super)) {
346 fs_info->disk_super = disk_super;
347 tree_root->node = read_tree_block(tree_root,
348 btrfs_super_root(disk_super));
349 BUG_ON(!tree_root->node);
351 mutex_lock(&fs_info->fs_mutex);
352 ret = find_and_setup_root(sb->s_blocksize, tree_root, fs_info,
353 BTRFS_EXTENT_TREE_OBJECTID, extent_root);
356 ret = find_and_setup_root(sb->s_blocksize, tree_root, fs_info,
357 BTRFS_INODE_MAP_OBJECTID, inode_root);
360 ret = find_and_setup_root(sb->s_blocksize, tree_root, fs_info,
361 BTRFS_FS_TREE_OBJECTID, root);
362 mutex_unlock(&fs_info->fs_mutex);
364 root->commit_root = root->node;
367 root->fs_info->generation = root->root_key.offset + 1;
371 int write_ctree_super(struct btrfs_trans_handle *trans, struct btrfs_root
374 struct buffer_head *bh = root->fs_info->sb_buffer;
376 btrfs_set_super_root(root->fs_info->disk_super,
377 root->fs_info->tree_root->node->b_blocknr);
379 WARN_ON(atomic_read(&bh->b_count) < 1);
380 clear_buffer_dirty(bh);
381 csum_tree_block(root, bh, 0);
382 bh->b_end_io = end_buffer_write_sync;
384 submit_bh(WRITE, bh);
386 if (!buffer_uptodate(bh)) {
393 int close_ctree(struct btrfs_root *root)
396 struct btrfs_trans_handle *trans;
398 mutex_lock(&root->fs_info->fs_mutex);
399 trans = btrfs_start_transaction(root, 1);
400 btrfs_commit_transaction(trans, root);
401 /* run commit again to drop the original snapshot */
402 trans = btrfs_start_transaction(root, 1);
403 btrfs_commit_transaction(trans, root);
404 ret = btrfs_write_and_wait_transaction(NULL, root);
406 write_ctree_super(NULL, root);
407 mutex_unlock(&root->fs_info->fs_mutex);
410 btrfs_block_release(root, root->node);
411 if (root->fs_info->extent_root->node)
412 btrfs_block_release(root->fs_info->extent_root,
413 root->fs_info->extent_root->node);
414 if (root->fs_info->inode_root->node)
415 btrfs_block_release(root->fs_info->inode_root,
416 root->fs_info->inode_root->node);
417 if (root->fs_info->tree_root->node)
418 btrfs_block_release(root->fs_info->tree_root,
419 root->fs_info->tree_root->node);
420 btrfs_block_release(root, root->commit_root);
421 btrfs_block_release(root, root->fs_info->sb_buffer);
422 crypto_free_hash(root->fs_info->hash_tfm);
423 truncate_inode_pages(root->fs_info->btree_inode->i_mapping, 0);
424 iput(root->fs_info->btree_inode);
425 kfree(root->fs_info->extent_root);
426 kfree(root->fs_info->inode_root);
427 kfree(root->fs_info->tree_root);
428 kfree(root->fs_info);
433 void btrfs_block_release(struct btrfs_root *root, struct buffer_head *buf)