1 #include <linux/module.h>
3 #include <linux/blkdev.h>
4 #include <linux/crypto.h>
5 #include <linux/scatterlist.h>
6 #include <linux/swap.h>
9 #include "transaction.h"
11 static int check_tree_block(struct btrfs_root *root, struct buffer_head *buf)
13 struct btrfs_node *node = btrfs_buffer_node(buf);
14 if (buf->b_blocknr != btrfs_header_blocknr(&node->header)) {
17 if (root->node && btrfs_header_parentid(&node->header) !=
18 btrfs_header_parentid(btrfs_buffer_header(root->node))) {
24 struct buffer_head *btrfs_find_tree_block(struct btrfs_root *root, u64 blocknr)
26 return sb_find_get_block(root->fs_info->sb, blocknr);
28 struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
29 int blockbits = root->fs_info->sb->s_blocksize_bits;
30 unsigned long index = blocknr >> (PAGE_CACHE_SHIFT - blockbits);
32 struct buffer_head *bh;
33 struct buffer_head *head;
34 struct buffer_head *ret = NULL;
37 page = find_lock_page(mapping, index);
41 if (!page_has_buffers(page))
44 head = page_buffers(page);
47 if (buffer_mapped(bh) && bh->b_blocknr == blocknr) {
59 page_cache_release(page);
64 struct buffer_head *btrfs_find_create_tree_block(struct btrfs_root *root,
67 return sb_getblk(root->fs_info->sb, blocknr);
69 struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
70 int blockbits = root->fs_info->sb->s_blocksize_bits;
71 unsigned long index = blocknr >> (PAGE_CACHE_SHIFT - blockbits);
73 struct buffer_head *bh;
74 struct buffer_head *head;
75 struct buffer_head *ret = NULL;
76 u64 first_block = index << (PAGE_CACHE_SHIFT - blockbits);
78 page = grab_cache_page(mapping, index);
82 if (!page_has_buffers(page))
83 create_empty_buffers(page, root->fs_info->sb->s_blocksize, 0);
84 head = page_buffers(page);
87 if (!buffer_mapped(bh)) {
88 bh->b_bdev = root->fs_info->sb->s_bdev;
89 bh->b_blocknr = first_block;
90 set_buffer_mapped(bh);
92 if (bh->b_blocknr == blocknr) {
104 page_cache_release(page);
109 static sector_t max_block(struct block_device *bdev)
111 sector_t retval = ~((sector_t)0);
112 loff_t sz = i_size_read(bdev->bd_inode);
115 unsigned int size = block_size(bdev);
116 unsigned int sizebits = blksize_bits(size);
117 retval = (sz >> sizebits);
122 static int btree_get_block(struct inode *inode, sector_t iblock,
123 struct buffer_head *bh, int create)
125 if (iblock >= max_block(inode->i_sb->s_bdev)) {
130 * for reads, we're just trying to fill a partial page.
131 * return a hole, they will have to call get_block again
132 * before they can fill it, and they will get -EIO at that
137 bh->b_bdev = inode->i_sb->s_bdev;
138 bh->b_blocknr = iblock;
139 set_buffer_mapped(bh);
143 int btrfs_csum_data(struct btrfs_root * root, char *data, size_t len,
146 struct scatterlist sg;
147 struct crypto_hash *tfm = root->fs_info->hash_tfm;
148 struct hash_desc desc;
153 sg_init_one(&sg, data, len);
154 spin_lock(&root->fs_info->hash_lock);
155 ret = crypto_hash_digest(&desc, &sg, 1, result);
156 spin_unlock(&root->fs_info->hash_lock);
158 printk("sha256 digest failed\n");
162 static int csum_tree_block(struct btrfs_root *root, struct buffer_head *bh,
165 char result[BTRFS_CSUM_SIZE];
167 struct btrfs_node *node;
170 ret = btrfs_csum_data(root, bh->b_data + BTRFS_CSUM_SIZE,
171 bh->b_size - BTRFS_CSUM_SIZE, result);
175 if (memcmp(bh->b_data, result, BTRFS_CSUM_SIZE)) {
176 printk("checksum verify failed on %lu\n",
181 node = btrfs_buffer_node(bh);
182 memcpy(node->header.csum, result, BTRFS_CSUM_SIZE);
187 static int btree_writepage(struct page *page, struct writeback_control *wbc)
190 struct buffer_head *bh;
191 struct btrfs_root *root = btrfs_sb(page->mapping->host->i_sb);
192 struct buffer_head *head;
193 if (!page_has_buffers(page)) {
194 create_empty_buffers(page, root->fs_info->sb->s_blocksize,
195 (1 << BH_Dirty)|(1 << BH_Uptodate));
197 head = page_buffers(page);
200 if (buffer_dirty(bh))
201 csum_tree_block(root, bh, 0);
202 bh = bh->b_this_page;
203 } while (bh != head);
205 return block_write_full_page(page, btree_get_block, wbc);
208 static int btree_readpage(struct file * file, struct page * page)
210 return block_read_full_page(page, btree_get_block);
213 static struct address_space_operations btree_aops = {
214 .readpage = btree_readpage,
215 .writepage = btree_writepage,
216 .sync_page = block_sync_page,
219 struct buffer_head *read_tree_block(struct btrfs_root *root, u64 blocknr)
221 return sb_bread(root->fs_info->sb, blocknr);
223 struct buffer_head *bh = NULL;
225 bh = btrfs_find_create_tree_block(root, blocknr);
229 if (!buffer_uptodate(bh)) {
231 bh->b_end_io = end_buffer_read_sync;
234 if (!buffer_uptodate(bh))
236 csum_tree_block(root, bh, 1);
240 if (check_tree_block(root, bh))
250 int dirty_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root,
251 struct buffer_head *buf)
253 WARN_ON(atomic_read(&buf->b_count) == 0);
254 mark_buffer_dirty(buf);
258 int clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root,
259 struct buffer_head *buf)
261 WARN_ON(atomic_read(&buf->b_count) == 0);
262 clear_buffer_dirty(buf);
266 static int __setup_root(int blocksize,
267 struct btrfs_root *root,
268 struct btrfs_fs_info *fs_info,
272 root->commit_root = NULL;
273 root->blocksize = blocksize;
275 root->fs_info = fs_info;
276 memset(&root->root_key, 0, sizeof(root->root_key));
277 memset(&root->root_item, 0, sizeof(root->root_item));
281 static int find_and_setup_root(int blocksize,
282 struct btrfs_root *tree_root,
283 struct btrfs_fs_info *fs_info,
285 struct btrfs_root *root)
289 __setup_root(blocksize, root, fs_info, objectid);
290 ret = btrfs_find_last_root(tree_root, objectid,
291 &root->root_item, &root->root_key);
294 root->node = read_tree_block(root,
295 btrfs_root_blocknr(&root->root_item));
300 struct btrfs_root *open_ctree(struct super_block *sb)
302 struct btrfs_root *root = kmalloc(sizeof(struct btrfs_root),
304 struct btrfs_root *extent_root = kmalloc(sizeof(struct btrfs_root),
306 struct btrfs_root *tree_root = kmalloc(sizeof(struct btrfs_root),
308 struct btrfs_root *inode_root = kmalloc(sizeof(struct btrfs_root),
310 struct btrfs_fs_info *fs_info = kmalloc(sizeof(*fs_info),
313 struct btrfs_super_block *disk_super;
315 init_bit_radix(&fs_info->pinned_radix);
316 init_bit_radix(&fs_info->pending_del_radix);
317 sb_set_blocksize(sb, 4096);
318 fs_info->running_transaction = NULL;
319 fs_info->fs_root = root;
320 fs_info->tree_root = tree_root;
321 fs_info->extent_root = extent_root;
322 fs_info->inode_root = inode_root;
323 fs_info->last_inode_alloc = 0;
324 fs_info->last_inode_alloc_dirid = 0;
326 fs_info->btree_inode = NULL;
328 fs_info->btree_inode = new_inode(sb);
329 fs_info->btree_inode->i_ino = 1;
330 fs_info->btree_inode->i_nlink = 1;
331 fs_info->btree_inode->i_size = sb->s_bdev->bd_inode->i_size;
332 fs_info->btree_inode->i_mapping->a_ops = &btree_aops;
333 insert_inode_hash(fs_info->btree_inode);
334 mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS);
336 fs_info->hash_tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
337 spin_lock_init(&fs_info->hash_lock);
338 if (!fs_info->hash_tfm || IS_ERR(fs_info->hash_tfm)) {
339 printk("failed to allocate sha256 hash\n");
342 mutex_init(&fs_info->trans_mutex);
343 mutex_init(&fs_info->fs_mutex);
344 memset(&fs_info->current_insert, 0, sizeof(fs_info->current_insert));
345 memset(&fs_info->last_insert, 0, sizeof(fs_info->last_insert));
347 __setup_root(sb->s_blocksize, tree_root,
348 fs_info, BTRFS_ROOT_TREE_OBJECTID);
349 fs_info->sb_buffer = read_tree_block(tree_root,
350 BTRFS_SUPER_INFO_OFFSET /
353 if (!fs_info->sb_buffer) {
357 disk_super = (struct btrfs_super_block *)fs_info->sb_buffer->b_data;
358 if (!btrfs_super_root(disk_super)) {
361 fs_info->disk_super = disk_super;
362 tree_root->node = read_tree_block(tree_root,
363 btrfs_super_root(disk_super));
364 BUG_ON(!tree_root->node);
366 mutex_lock(&fs_info->fs_mutex);
367 ret = find_and_setup_root(sb->s_blocksize, tree_root, fs_info,
368 BTRFS_EXTENT_TREE_OBJECTID, extent_root);
371 ret = find_and_setup_root(sb->s_blocksize, tree_root, fs_info,
372 BTRFS_INODE_MAP_OBJECTID, inode_root);
375 ret = find_and_setup_root(sb->s_blocksize, tree_root, fs_info,
376 BTRFS_FS_TREE_OBJECTID, root);
377 mutex_unlock(&fs_info->fs_mutex);
379 root->commit_root = root->node;
382 root->fs_info->generation = root->root_key.offset + 1;
386 int write_ctree_super(struct btrfs_trans_handle *trans, struct btrfs_root
389 struct buffer_head *bh = root->fs_info->sb_buffer;
391 btrfs_set_super_root(root->fs_info->disk_super,
392 root->fs_info->tree_root->node->b_blocknr);
394 WARN_ON(atomic_read(&bh->b_count) < 1);
395 clear_buffer_dirty(bh);
396 csum_tree_block(root, bh, 0);
397 bh->b_end_io = end_buffer_write_sync;
399 submit_bh(WRITE, bh);
401 if (!buffer_uptodate(bh)) {
408 int close_ctree(struct btrfs_root *root)
411 struct btrfs_trans_handle *trans;
413 mutex_lock(&root->fs_info->fs_mutex);
414 trans = btrfs_start_transaction(root, 1);
415 btrfs_commit_transaction(trans, root);
416 /* run commit again to drop the original snapshot */
417 trans = btrfs_start_transaction(root, 1);
418 btrfs_commit_transaction(trans, root);
419 ret = btrfs_write_and_wait_transaction(NULL, root);
421 write_ctree_super(NULL, root);
422 mutex_unlock(&root->fs_info->fs_mutex);
425 btrfs_block_release(root, root->node);
426 if (root->fs_info->extent_root->node)
427 btrfs_block_release(root->fs_info->extent_root,
428 root->fs_info->extent_root->node);
429 if (root->fs_info->inode_root->node)
430 btrfs_block_release(root->fs_info->inode_root,
431 root->fs_info->inode_root->node);
432 if (root->fs_info->tree_root->node)
433 btrfs_block_release(root->fs_info->tree_root,
434 root->fs_info->tree_root->node);
435 btrfs_block_release(root, root->commit_root);
436 btrfs_block_release(root, root->fs_info->sb_buffer);
437 crypto_free_hash(root->fs_info->hash_tfm);
438 // truncate_inode_pages(root->fs_info->btree_inode->i_mapping, 0);
439 // iput(root->fs_info->btree_inode);
440 kfree(root->fs_info->extent_root);
441 kfree(root->fs_info->inode_root);
442 kfree(root->fs_info->tree_root);
443 kfree(root->fs_info);
448 void btrfs_block_release(struct btrfs_root *root, struct buffer_head *buf)