Btrfs: fix oops after block group lookup
[linux-2.6-block.git] / fs / btrfs / disk-io.c
... / ...
CommitLineData
1#include <linux/module.h>
2#include <linux/fs.h>
3#include <linux/blkdev.h>
4#include <linux/crypto.h>
5#include <linux/scatterlist.h>
6#include <linux/swap.h>
7#include <linux/radix-tree.h>
8#include <linux/writeback.h>
9#include "ctree.h"
10#include "disk-io.h"
11#include "transaction.h"
12#include "btrfs_inode.h"
13
14u64 bh_blocknr(struct buffer_head *bh)
15{
16 return bh->b_blocknr;
17}
18
19static int check_tree_block(struct btrfs_root *root, struct buffer_head *buf)
20{
21 struct btrfs_node *node = btrfs_buffer_node(buf);
22 if (bh_blocknr(buf) != btrfs_header_blocknr(&node->header)) {
23 printk(KERN_CRIT "bh_blocknr(buf) is %Lu, header is %Lu\n",
24 bh_blocknr(buf), btrfs_header_blocknr(&node->header));
25 BUG();
26 }
27 return 0;
28}
29
30struct buffer_head *btrfs_find_tree_block(struct btrfs_root *root, u64 blocknr)
31{
32 struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
33 int blockbits = root->fs_info->sb->s_blocksize_bits;
34 unsigned long index = blocknr >> (PAGE_CACHE_SHIFT - blockbits);
35 struct page *page;
36 struct buffer_head *bh;
37 struct buffer_head *head;
38 struct buffer_head *ret = NULL;
39
40
41 page = find_lock_page(mapping, index);
42 if (!page)
43 return NULL;
44
45 if (!page_has_buffers(page))
46 goto out_unlock;
47
48 head = page_buffers(page);
49 bh = head;
50 do {
51 if (buffer_mapped(bh) && bh_blocknr(bh) == blocknr) {
52 ret = bh;
53 get_bh(bh);
54 goto out_unlock;
55 }
56 bh = bh->b_this_page;
57 } while (bh != head);
58out_unlock:
59 unlock_page(page);
60 page_cache_release(page);
61 return ret;
62}
63
64int btrfs_map_bh_to_logical(struct btrfs_root *root, struct buffer_head *bh,
65 u64 logical)
66{
67 if (logical == 0) {
68 bh->b_bdev = NULL;
69 bh->b_blocknr = 0;
70 set_buffer_mapped(bh);
71 } else {
72 map_bh(bh, root->fs_info->sb, logical);
73 }
74 return 0;
75}
76
77struct buffer_head *btrfs_find_create_tree_block(struct btrfs_root *root,
78 u64 blocknr)
79{
80 struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
81 int blockbits = root->fs_info->sb->s_blocksize_bits;
82 unsigned long index = blocknr >> (PAGE_CACHE_SHIFT - blockbits);
83 struct page *page;
84 struct buffer_head *bh;
85 struct buffer_head *head;
86 struct buffer_head *ret = NULL;
87 int err;
88 u64 first_block = index << (PAGE_CACHE_SHIFT - blockbits);
89
90 page = grab_cache_page(mapping, index);
91 if (!page)
92 return NULL;
93
94 if (!page_has_buffers(page))
95 create_empty_buffers(page, root->fs_info->sb->s_blocksize, 0);
96 head = page_buffers(page);
97 bh = head;
98 do {
99 if (!buffer_mapped(bh)) {
100 err = btrfs_map_bh_to_logical(root, bh, first_block);
101 BUG_ON(err);
102 }
103 if (bh_blocknr(bh) == blocknr) {
104 ret = bh;
105 get_bh(bh);
106 goto out_unlock;
107 }
108 bh = bh->b_this_page;
109 first_block++;
110 } while (bh != head);
111out_unlock:
112 unlock_page(page);
113 if (ret)
114 touch_buffer(ret);
115 page_cache_release(page);
116 return ret;
117}
118
119static int btree_get_block(struct inode *inode, sector_t iblock,
120 struct buffer_head *bh, int create)
121{
122 int err;
123 struct btrfs_root *root = BTRFS_I(bh->b_page->mapping->host)->root;
124 err = btrfs_map_bh_to_logical(root, bh, iblock);
125 return err;
126}
127
128int btrfs_csum_data(struct btrfs_root * root, char *data, size_t len,
129 char *result)
130{
131 struct scatterlist sg;
132 struct crypto_hash *tfm = root->fs_info->hash_tfm;
133 struct hash_desc desc;
134 int ret;
135
136 desc.tfm = tfm;
137 desc.flags = 0;
138 sg_init_one(&sg, data, len);
139 spin_lock(&root->fs_info->hash_lock);
140 ret = crypto_hash_digest(&desc, &sg, 1, result);
141 spin_unlock(&root->fs_info->hash_lock);
142 if (ret) {
143 printk("digest failed\n");
144 }
145 return ret;
146}
147static int csum_tree_block(struct btrfs_root *root, struct buffer_head *bh,
148 int verify)
149{
150 char result[BTRFS_CRC32_SIZE];
151 int ret;
152 struct btrfs_node *node;
153
154 ret = btrfs_csum_data(root, bh->b_data + BTRFS_CSUM_SIZE,
155 bh->b_size - BTRFS_CSUM_SIZE, result);
156 if (ret)
157 return ret;
158 if (verify) {
159 if (memcmp(bh->b_data, result, BTRFS_CRC32_SIZE)) {
160 printk("checksum verify failed on %Lu\n",
161 bh_blocknr(bh));
162 return 1;
163 }
164 } else {
165 node = btrfs_buffer_node(bh);
166 memcpy(node->header.csum, result, BTRFS_CRC32_SIZE);
167 }
168 return 0;
169}
170
171static int btree_writepage(struct page *page, struct writeback_control *wbc)
172{
173 struct buffer_head *bh;
174 struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
175 struct buffer_head *head;
176 if (!page_has_buffers(page)) {
177 create_empty_buffers(page, root->fs_info->sb->s_blocksize,
178 (1 << BH_Dirty)|(1 << BH_Uptodate));
179 }
180 head = page_buffers(page);
181 bh = head;
182 do {
183 if (buffer_dirty(bh))
184 csum_tree_block(root, bh, 0);
185 bh = bh->b_this_page;
186 } while (bh != head);
187 return block_write_full_page(page, btree_get_block, wbc);
188}
189
190static int btree_readpage(struct file * file, struct page * page)
191{
192 return block_read_full_page(page, btree_get_block);
193}
194
195static struct address_space_operations btree_aops = {
196 .readpage = btree_readpage,
197 .writepage = btree_writepage,
198 .sync_page = block_sync_page,
199};
200
201int readahead_tree_block(struct btrfs_root *root, u64 blocknr)
202{
203 struct buffer_head *bh = NULL;
204 int ret = 0;
205
206 bh = btrfs_find_create_tree_block(root, blocknr);
207 if (!bh)
208 return 0;
209 if (buffer_uptodate(bh)) {
210 ret = 1;
211 goto done;
212 }
213 if (test_set_buffer_locked(bh)) {
214 ret = 1;
215 goto done;
216 }
217 if (!buffer_uptodate(bh)) {
218 get_bh(bh);
219 bh->b_end_io = end_buffer_read_sync;
220 submit_bh(READ, bh);
221 } else {
222 unlock_buffer(bh);
223 ret = 1;
224 }
225done:
226 brelse(bh);
227 return ret;
228}
229
230struct buffer_head *read_tree_block(struct btrfs_root *root, u64 blocknr)
231{
232 struct buffer_head *bh = NULL;
233
234 bh = btrfs_find_create_tree_block(root, blocknr);
235 if (!bh)
236 return bh;
237 if (buffer_uptodate(bh))
238 goto uptodate;
239 lock_buffer(bh);
240 if (!buffer_uptodate(bh)) {
241 get_bh(bh);
242 bh->b_end_io = end_buffer_read_sync;
243 submit_bh(READ, bh);
244 wait_on_buffer(bh);
245 if (!buffer_uptodate(bh))
246 goto fail;
247 } else {
248 unlock_buffer(bh);
249 }
250uptodate:
251 if (!buffer_checked(bh)) {
252 csum_tree_block(root, bh, 1);
253 set_buffer_checked(bh);
254 }
255 if (check_tree_block(root, bh))
256 BUG();
257 return bh;
258fail:
259 brelse(bh);
260 return NULL;
261}
262
263int dirty_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root,
264 struct buffer_head *buf)
265{
266 WARN_ON(atomic_read(&buf->b_count) == 0);
267 mark_buffer_dirty(buf);
268 return 0;
269}
270
271int clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root,
272 struct buffer_head *buf)
273{
274 WARN_ON(atomic_read(&buf->b_count) == 0);
275 clear_buffer_dirty(buf);
276 return 0;
277}
278
279static int __setup_root(int blocksize,
280 struct btrfs_root *root,
281 struct btrfs_fs_info *fs_info,
282 u64 objectid)
283{
284 root->node = NULL;
285 root->inode = NULL;
286 root->commit_root = NULL;
287 root->blocksize = blocksize;
288 root->ref_cows = 0;
289 root->fs_info = fs_info;
290 root->objectid = objectid;
291 root->last_trans = 0;
292 root->highest_inode = 0;
293 root->last_inode_alloc = 0;
294 memset(&root->root_key, 0, sizeof(root->root_key));
295 memset(&root->root_item, 0, sizeof(root->root_item));
296 root->root_key.objectid = objectid;
297 return 0;
298}
299
300static int find_and_setup_root(int blocksize,
301 struct btrfs_root *tree_root,
302 struct btrfs_fs_info *fs_info,
303 u64 objectid,
304 struct btrfs_root *root)
305{
306 int ret;
307
308 __setup_root(blocksize, root, fs_info, objectid);
309 ret = btrfs_find_last_root(tree_root, objectid,
310 &root->root_item, &root->root_key);
311 BUG_ON(ret);
312
313 root->node = read_tree_block(root,
314 btrfs_root_blocknr(&root->root_item));
315 BUG_ON(!root->node);
316 return 0;
317}
318
319struct btrfs_root *btrfs_read_fs_root(struct btrfs_fs_info *fs_info,
320 struct btrfs_key *location)
321{
322 struct btrfs_root *root;
323 struct btrfs_root *tree_root = fs_info->tree_root;
324 struct btrfs_path *path;
325 struct btrfs_leaf *l;
326 u64 highest_inode;
327 int ret = 0;
328
329 root = radix_tree_lookup(&fs_info->fs_roots_radix,
330 (unsigned long)location->objectid);
331 if (root)
332 return root;
333 root = kmalloc(sizeof(*root), GFP_NOFS);
334 if (!root)
335 return ERR_PTR(-ENOMEM);
336 if (location->offset == (u64)-1) {
337 ret = find_and_setup_root(fs_info->sb->s_blocksize,
338 fs_info->tree_root, fs_info,
339 location->objectid, root);
340 if (ret) {
341 kfree(root);
342 return ERR_PTR(ret);
343 }
344 goto insert;
345 }
346
347 __setup_root(fs_info->sb->s_blocksize, root, fs_info,
348 location->objectid);
349
350 path = btrfs_alloc_path();
351 BUG_ON(!path);
352 ret = btrfs_search_slot(NULL, tree_root, location, path, 0, 0);
353 if (ret != 0) {
354 if (ret > 0)
355 ret = -ENOENT;
356 goto out;
357 }
358 l = btrfs_buffer_leaf(path->nodes[0]);
359 memcpy(&root->root_item,
360 btrfs_item_ptr(l, path->slots[0], struct btrfs_root_item),
361 sizeof(root->root_item));
362 memcpy(&root->root_key, location, sizeof(*location));
363 ret = 0;
364out:
365 btrfs_release_path(root, path);
366 btrfs_free_path(path);
367 if (ret) {
368 kfree(root);
369 return ERR_PTR(ret);
370 }
371 root->node = read_tree_block(root,
372 btrfs_root_blocknr(&root->root_item));
373 BUG_ON(!root->node);
374insert:
375 root->ref_cows = 1;
376 ret = radix_tree_insert(&fs_info->fs_roots_radix,
377 (unsigned long)root->root_key.objectid,
378 root);
379 if (ret) {
380 brelse(root->node);
381 kfree(root);
382 return ERR_PTR(ret);
383 }
384 ret = btrfs_find_highest_inode(root, &highest_inode);
385 if (ret == 0) {
386 root->highest_inode = highest_inode;
387 root->last_inode_alloc = highest_inode;
388 }
389 return root;
390}
391
392struct btrfs_root *open_ctree(struct super_block *sb)
393{
394 struct btrfs_root *extent_root = kmalloc(sizeof(struct btrfs_root),
395 GFP_NOFS);
396 struct btrfs_root *tree_root = kmalloc(sizeof(struct btrfs_root),
397 GFP_NOFS);
398 struct btrfs_fs_info *fs_info = kmalloc(sizeof(*fs_info),
399 GFP_NOFS);
400 int ret;
401 struct btrfs_super_block *disk_super;
402
403 init_bit_radix(&fs_info->pinned_radix);
404 init_bit_radix(&fs_info->pending_del_radix);
405 init_bit_radix(&fs_info->extent_map_radix);
406 INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_NOFS);
407 INIT_RADIX_TREE(&fs_info->block_group_radix, GFP_KERNEL);
408 INIT_RADIX_TREE(&fs_info->block_group_data_radix, GFP_KERNEL);
409 INIT_LIST_HEAD(&fs_info->trans_list);
410 INIT_LIST_HEAD(&fs_info->dead_roots);
411 sb_set_blocksize(sb, 4096);
412 fs_info->running_transaction = NULL;
413 fs_info->tree_root = tree_root;
414 fs_info->extent_root = extent_root;
415 fs_info->sb = sb;
416 fs_info->btree_inode = new_inode(sb);
417 fs_info->btree_inode->i_ino = 1;
418 fs_info->btree_inode->i_nlink = 1;
419 fs_info->btree_inode->i_size = sb->s_bdev->bd_inode->i_size;
420 fs_info->btree_inode->i_mapping->a_ops = &btree_aops;
421 fs_info->do_barriers = 1;
422 fs_info->extent_tree_insert_nr = 0;
423 fs_info->extent_tree_prealloc_nr = 0;
424 fs_info->closing = 0;
425
426 INIT_DELAYED_WORK(&fs_info->trans_work, btrfs_transaction_cleaner);
427 BTRFS_I(fs_info->btree_inode)->root = tree_root;
428 memset(&BTRFS_I(fs_info->btree_inode)->location, 0,
429 sizeof(struct btrfs_key));
430 insert_inode_hash(fs_info->btree_inode);
431 mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS);
432 fs_info->hash_tfm = crypto_alloc_hash("crc32c", 0, CRYPTO_ALG_ASYNC);
433 spin_lock_init(&fs_info->hash_lock);
434 if (!fs_info->hash_tfm || IS_ERR(fs_info->hash_tfm)) {
435 printk("failed to allocate digest hash\n");
436 return NULL;
437 }
438 mutex_init(&fs_info->trans_mutex);
439 mutex_init(&fs_info->fs_mutex);
440
441 __setup_root(sb->s_blocksize, tree_root,
442 fs_info, BTRFS_ROOT_TREE_OBJECTID);
443
444 fs_info->sb_buffer = read_tree_block(tree_root,
445 BTRFS_SUPER_INFO_OFFSET /
446 sb->s_blocksize);
447
448 if (!fs_info->sb_buffer)
449 return NULL;
450 disk_super = (struct btrfs_super_block *)fs_info->sb_buffer->b_data;
451 if (!btrfs_super_root(disk_super))
452 return NULL;
453
454 i_size_write(fs_info->btree_inode,
455 btrfs_super_total_blocks(disk_super) <<
456 fs_info->btree_inode->i_blkbits);
457
458 fs_info->disk_super = disk_super;
459 tree_root->node = read_tree_block(tree_root,
460 btrfs_super_root(disk_super));
461 BUG_ON(!tree_root->node);
462
463 mutex_lock(&fs_info->fs_mutex);
464 ret = find_and_setup_root(sb->s_blocksize, tree_root, fs_info,
465 BTRFS_EXTENT_TREE_OBJECTID, extent_root);
466 BUG_ON(ret);
467
468 btrfs_read_block_groups(extent_root);
469
470 fs_info->generation = btrfs_super_generation(disk_super) + 1;
471 mutex_unlock(&fs_info->fs_mutex);
472 return tree_root;
473}
474
475int write_ctree_super(struct btrfs_trans_handle *trans, struct btrfs_root
476 *root)
477{
478 int ret;
479 struct buffer_head *bh = root->fs_info->sb_buffer;
480
481 btrfs_set_super_root(root->fs_info->disk_super,
482 bh_blocknr(root->fs_info->tree_root->node));
483 lock_buffer(bh);
484 WARN_ON(atomic_read(&bh->b_count) < 1);
485 clear_buffer_dirty(bh);
486 csum_tree_block(root, bh, 0);
487 bh->b_end_io = end_buffer_write_sync;
488 get_bh(bh);
489 if (root->fs_info->do_barriers)
490 ret = submit_bh(WRITE_BARRIER, bh);
491 else
492 ret = submit_bh(WRITE, bh);
493 if (ret == -EOPNOTSUPP) {
494 set_buffer_uptodate(bh);
495 root->fs_info->do_barriers = 0;
496 ret = submit_bh(WRITE, bh);
497 }
498 wait_on_buffer(bh);
499 if (!buffer_uptodate(bh)) {
500 WARN_ON(1);
501 return -EIO;
502 }
503 return 0;
504}
505
506static int free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
507{
508 radix_tree_delete(&fs_info->fs_roots_radix,
509 (unsigned long)root->root_key.objectid);
510 if (root->inode)
511 iput(root->inode);
512 if (root->node)
513 brelse(root->node);
514 if (root->commit_root)
515 brelse(root->commit_root);
516 kfree(root);
517 return 0;
518}
519
520static int del_fs_roots(struct btrfs_fs_info *fs_info)
521{
522 int ret;
523 struct btrfs_root *gang[8];
524 int i;
525
526 while(1) {
527 ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
528 (void **)gang, 0,
529 ARRAY_SIZE(gang));
530 if (!ret)
531 break;
532 for (i = 0; i < ret; i++)
533 free_fs_root(fs_info, gang[i]);
534 }
535 return 0;
536}
537
538int close_ctree(struct btrfs_root *root)
539{
540 int ret;
541 struct btrfs_trans_handle *trans;
542 struct btrfs_fs_info *fs_info = root->fs_info;
543
544 fs_info->closing = 1;
545 btrfs_transaction_flush_work(root);
546 mutex_lock(&fs_info->fs_mutex);
547 trans = btrfs_start_transaction(root, 1);
548 btrfs_commit_transaction(trans, root);
549 /* run commit again to drop the original snapshot */
550 trans = btrfs_start_transaction(root, 1);
551 btrfs_commit_transaction(trans, root);
552 ret = btrfs_write_and_wait_transaction(NULL, root);
553 BUG_ON(ret);
554 write_ctree_super(NULL, root);
555 mutex_unlock(&fs_info->fs_mutex);
556
557 if (fs_info->extent_root->node)
558 btrfs_block_release(fs_info->extent_root,
559 fs_info->extent_root->node);
560 if (fs_info->tree_root->node)
561 btrfs_block_release(fs_info->tree_root,
562 fs_info->tree_root->node);
563 btrfs_block_release(root, fs_info->sb_buffer);
564 crypto_free_hash(fs_info->hash_tfm);
565 truncate_inode_pages(fs_info->btree_inode->i_mapping, 0);
566 iput(fs_info->btree_inode);
567
568 btrfs_free_block_groups(root->fs_info);
569 del_fs_roots(fs_info);
570 kfree(fs_info->extent_root);
571 kfree(fs_info->tree_root);
572 return 0;
573}
574
575void btrfs_block_release(struct btrfs_root *root, struct buffer_head *buf)
576{
577 brelse(buf);
578}
579
580void btrfs_btree_balance_dirty(struct btrfs_root *root)
581{
582 balance_dirty_pages_ratelimited(root->fs_info->btree_inode->i_mapping);
583}