Btrfs: early support for multiple devices
[linux-2.6-block.git] / fs / btrfs / disk-io.c
... / ...
CommitLineData
1#include <linux/module.h>
2#include <linux/fs.h>
3#include <linux/blkdev.h>
4#include <linux/crypto.h>
5#include <linux/scatterlist.h>
6#include <linux/swap.h>
7#include <linux/radix-tree.h>
8#include "ctree.h"
9#include "disk-io.h"
10#include "transaction.h"
11#include "btrfs_inode.h"
12
13static int check_tree_block(struct btrfs_root *root, struct buffer_head *buf)
14{
15 struct btrfs_node *node = btrfs_buffer_node(buf);
16 if (buf->b_blocknr != btrfs_header_blocknr(&node->header)) {
17 BUG();
18 }
19 return 0;
20}
21
22struct buffer_head *btrfs_find_tree_block(struct btrfs_root *root, u64 blocknr)
23{
24 struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
25 int blockbits = root->fs_info->sb->s_blocksize_bits;
26 unsigned long index = blocknr >> (PAGE_CACHE_SHIFT - blockbits);
27 struct page *page;
28 struct buffer_head *bh;
29 struct buffer_head *head;
30 struct buffer_head *ret = NULL;
31
32
33 page = find_lock_page(mapping, index);
34 if (!page)
35 return NULL;
36
37 if (!page_has_buffers(page))
38 goto out_unlock;
39
40 head = page_buffers(page);
41 bh = head;
42 do {
43 if (buffer_mapped(bh) && bh->b_blocknr == blocknr) {
44 ret = bh;
45 get_bh(bh);
46 goto out_unlock;
47 }
48 bh = bh->b_this_page;
49 } while (bh != head);
50out_unlock:
51 unlock_page(page);
52 if (ret) {
53 touch_buffer(ret);
54 }
55 page_cache_release(page);
56 return ret;
57}
58
59struct buffer_head *btrfs_find_create_tree_block(struct btrfs_root *root,
60 u64 blocknr)
61{
62 struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
63 int blockbits = root->fs_info->sb->s_blocksize_bits;
64 unsigned long index = blocknr >> (PAGE_CACHE_SHIFT - blockbits);
65 struct page *page;
66 struct buffer_head *bh;
67 struct buffer_head *head;
68 struct buffer_head *ret = NULL;
69 u64 first_block = index << (PAGE_CACHE_SHIFT - blockbits);
70
71 page = grab_cache_page(mapping, index);
72 if (!page)
73 return NULL;
74
75 if (!page_has_buffers(page))
76 create_empty_buffers(page, root->fs_info->sb->s_blocksize, 0);
77 head = page_buffers(page);
78 bh = head;
79 do {
80 if (!buffer_mapped(bh)) {
81 bh->b_bdev = root->fs_info->sb->s_bdev;
82 bh->b_blocknr = first_block;
83 set_buffer_mapped(bh);
84 }
85 if (bh->b_blocknr == blocknr) {
86 ret = bh;
87 get_bh(bh);
88 goto out_unlock;
89 }
90 bh = bh->b_this_page;
91 first_block++;
92 } while (bh != head);
93out_unlock:
94 unlock_page(page);
95 if (ret)
96 touch_buffer(ret);
97 page_cache_release(page);
98 return ret;
99}
100
101static sector_t max_block(struct block_device *bdev)
102{
103 sector_t retval = ~((sector_t)0);
104 loff_t sz = i_size_read(bdev->bd_inode);
105
106 if (sz) {
107 unsigned int size = block_size(bdev);
108 unsigned int sizebits = blksize_bits(size);
109 retval = (sz >> sizebits);
110 }
111 return retval;
112}
113
114static int btree_get_block(struct inode *inode, sector_t iblock,
115 struct buffer_head *bh, int create)
116{
117 if (iblock >= max_block(inode->i_sb->s_bdev)) {
118 if (create)
119 return -EIO;
120
121 /*
122 * for reads, we're just trying to fill a partial page.
123 * return a hole, they will have to call get_block again
124 * before they can fill it, and they will get -EIO at that
125 * time
126 */
127 return 0;
128 }
129 bh->b_bdev = inode->i_sb->s_bdev;
130 bh->b_blocknr = iblock;
131 set_buffer_mapped(bh);
132 return 0;
133}
134
135int btrfs_csum_data(struct btrfs_root * root, char *data, size_t len,
136 char *result)
137{
138 struct scatterlist sg;
139 struct crypto_hash *tfm = root->fs_info->hash_tfm;
140 struct hash_desc desc;
141 int ret;
142
143 desc.tfm = tfm;
144 desc.flags = 0;
145 sg_init_one(&sg, data, len);
146 spin_lock(&root->fs_info->hash_lock);
147 ret = crypto_hash_digest(&desc, &sg, 1, result);
148 spin_unlock(&root->fs_info->hash_lock);
149 if (ret) {
150 printk("sha256 digest failed\n");
151 }
152 return ret;
153}
154static int csum_tree_block(struct btrfs_root *root, struct buffer_head *bh,
155 int verify)
156{
157 char result[BTRFS_CSUM_SIZE];
158 int ret;
159 struct btrfs_node *node;
160
161 ret = btrfs_csum_data(root, bh->b_data + BTRFS_CSUM_SIZE,
162 bh->b_size - BTRFS_CSUM_SIZE, result);
163 if (ret)
164 return ret;
165 if (verify) {
166 if (memcmp(bh->b_data, result, BTRFS_CSUM_SIZE)) {
167 printk("checksum verify failed on %lu\n",
168 bh->b_blocknr);
169 return 1;
170 }
171 } else {
172 node = btrfs_buffer_node(bh);
173 memcpy(node->header.csum, result, BTRFS_CSUM_SIZE);
174 }
175 return 0;
176}
177
178static int btree_writepage(struct page *page, struct writeback_control *wbc)
179{
180 struct buffer_head *bh;
181 struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
182 struct buffer_head *head;
183 if (!page_has_buffers(page)) {
184 create_empty_buffers(page, root->fs_info->sb->s_blocksize,
185 (1 << BH_Dirty)|(1 << BH_Uptodate));
186 }
187 head = page_buffers(page);
188 bh = head;
189 do {
190 if (buffer_dirty(bh))
191 csum_tree_block(root, bh, 0);
192 bh = bh->b_this_page;
193 } while (bh != head);
194 return block_write_full_page(page, btree_get_block, wbc);
195}
196
197static int btree_readpage(struct file * file, struct page * page)
198{
199 return block_read_full_page(page, btree_get_block);
200}
201
202static struct address_space_operations btree_aops = {
203 .readpage = btree_readpage,
204 .writepage = btree_writepage,
205 .sync_page = block_sync_page,
206};
207
208struct buffer_head *read_tree_block(struct btrfs_root *root, u64 blocknr)
209{
210 struct buffer_head *bh = NULL;
211
212 bh = btrfs_find_create_tree_block(root, blocknr);
213 if (!bh)
214 return bh;
215 if (buffer_uptodate(bh))
216 goto uptodate;
217 lock_buffer(bh);
218 if (!buffer_uptodate(bh)) {
219 get_bh(bh);
220 bh->b_end_io = end_buffer_read_sync;
221 submit_bh(READ, bh);
222 wait_on_buffer(bh);
223 if (!buffer_uptodate(bh))
224 goto fail;
225 csum_tree_block(root, bh, 1);
226 } else {
227 unlock_buffer(bh);
228 }
229uptodate:
230 if (check_tree_block(root, bh))
231 BUG();
232 return bh;
233fail:
234 brelse(bh);
235 return NULL;
236}
237
238int dirty_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root,
239 struct buffer_head *buf)
240{
241 WARN_ON(atomic_read(&buf->b_count) == 0);
242 mark_buffer_dirty(buf);
243 return 0;
244}
245
246int clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root,
247 struct buffer_head *buf)
248{
249 WARN_ON(atomic_read(&buf->b_count) == 0);
250 clear_buffer_dirty(buf);
251 return 0;
252}
253
254static int __setup_root(int blocksize,
255 struct btrfs_root *root,
256 struct btrfs_fs_info *fs_info,
257 u64 objectid)
258{
259 root->node = NULL;
260 root->inode = NULL;
261 root->commit_root = NULL;
262 root->blocksize = blocksize;
263 root->ref_cows = 0;
264 root->fs_info = fs_info;
265 root->objectid = objectid;
266 root->last_trans = 0;
267 root->highest_inode = 0;
268 root->last_inode_alloc = 0;
269 memset(&root->root_key, 0, sizeof(root->root_key));
270 memset(&root->root_item, 0, sizeof(root->root_item));
271 return 0;
272}
273
274static int find_and_setup_root(int blocksize,
275 struct btrfs_root *tree_root,
276 struct btrfs_fs_info *fs_info,
277 u64 objectid,
278 struct btrfs_root *root)
279{
280 int ret;
281
282 __setup_root(blocksize, root, fs_info, objectid);
283 ret = btrfs_find_last_root(tree_root, objectid,
284 &root->root_item, &root->root_key);
285 BUG_ON(ret);
286
287 root->node = read_tree_block(root,
288 btrfs_root_blocknr(&root->root_item));
289 BUG_ON(!root->node);
290 return 0;
291}
292
293struct btrfs_root *btrfs_read_fs_root(struct btrfs_fs_info *fs_info,
294 struct btrfs_key *location)
295{
296 struct btrfs_root *root;
297 struct btrfs_root *tree_root = fs_info->tree_root;
298 struct btrfs_path *path;
299 struct btrfs_leaf *l;
300 u64 highest_inode;
301 int ret = 0;
302
303printk("read_fs_root looking for %Lu %Lu %u\n", location->objectid, location->offset, location->flags);
304 root = radix_tree_lookup(&fs_info->fs_roots_radix,
305 (unsigned long)location->objectid);
306 if (root) {
307printk("found %p in cache\n", root);
308 return root;
309 }
310 root = kmalloc(sizeof(*root), GFP_NOFS);
311 if (!root) {
312printk("failed1\n");
313 return ERR_PTR(-ENOMEM);
314 }
315 if (location->offset == (u64)-1) {
316 ret = find_and_setup_root(fs_info->sb->s_blocksize,
317 fs_info->tree_root, fs_info,
318 location->objectid, root);
319 if (ret) {
320printk("failed2\n");
321 kfree(root);
322 return ERR_PTR(ret);
323 }
324 goto insert;
325 }
326
327 __setup_root(fs_info->sb->s_blocksize, root, fs_info,
328 location->objectid);
329
330 path = btrfs_alloc_path();
331 BUG_ON(!path);
332 ret = btrfs_search_slot(NULL, tree_root, location, path, 0, 0);
333 if (ret != 0) {
334printk("internal search_slot gives us %d\n", ret);
335 if (ret > 0)
336 ret = -ENOENT;
337 goto out;
338 }
339 l = btrfs_buffer_leaf(path->nodes[0]);
340 memcpy(&root->root_item,
341 btrfs_item_ptr(l, path->slots[0], struct btrfs_root_item),
342 sizeof(root->root_item));
343 memcpy(&root->root_key, location, sizeof(*location));
344 ret = 0;
345out:
346 btrfs_release_path(root, path);
347 btrfs_free_path(path);
348 if (ret) {
349 kfree(root);
350 return ERR_PTR(ret);
351 }
352 root->node = read_tree_block(root,
353 btrfs_root_blocknr(&root->root_item));
354 BUG_ON(!root->node);
355insert:
356printk("inserting %p\n", root);
357 root->ref_cows = 1;
358 ret = radix_tree_insert(&fs_info->fs_roots_radix,
359 (unsigned long)root->root_key.objectid,
360 root);
361 if (ret) {
362printk("radix_tree_insert gives us %d\n", ret);
363 brelse(root->node);
364 kfree(root);
365 return ERR_PTR(ret);
366 }
367 ret = btrfs_find_highest_inode(root, &highest_inode);
368 if (ret == 0) {
369 root->highest_inode = highest_inode;
370 root->last_inode_alloc = highest_inode;
371printk("highest inode is %Lu\n", highest_inode);
372 }
373printk("all worked\n");
374 return root;
375}
376
377struct btrfs_root *open_ctree(struct super_block *sb)
378{
379 struct btrfs_root *extent_root = kmalloc(sizeof(struct btrfs_root),
380 GFP_NOFS);
381 struct btrfs_root *dev_root = kmalloc(sizeof(struct btrfs_root),
382 GFP_NOFS);
383 struct btrfs_root *tree_root = kmalloc(sizeof(struct btrfs_root),
384 GFP_NOFS);
385 struct btrfs_fs_info *fs_info = kmalloc(sizeof(*fs_info),
386 GFP_NOFS);
387 int ret;
388 struct btrfs_super_block *disk_super;
389
390 init_bit_radix(&fs_info->pinned_radix);
391 init_bit_radix(&fs_info->pending_del_radix);
392 INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_NOFS);
393 sb_set_blocksize(sb, 4096);
394 fs_info->running_transaction = NULL;
395 fs_info->tree_root = tree_root;
396 fs_info->extent_root = extent_root;
397 fs_info->dev_root = dev_root;
398 fs_info->sb = sb;
399 fs_info->btree_inode = new_inode(sb);
400 fs_info->btree_inode->i_ino = 1;
401 fs_info->btree_inode->i_nlink = 1;
402 fs_info->btree_inode->i_size = sb->s_bdev->bd_inode->i_size;
403 fs_info->btree_inode->i_mapping->a_ops = &btree_aops;
404 BTRFS_I(fs_info->btree_inode)->root = tree_root;
405 memset(&BTRFS_I(fs_info->btree_inode)->location, 0,
406 sizeof(struct btrfs_key));
407 insert_inode_hash(fs_info->btree_inode);
408 mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS);
409 fs_info->hash_tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
410 spin_lock_init(&fs_info->hash_lock);
411 if (!fs_info->hash_tfm || IS_ERR(fs_info->hash_tfm)) {
412 printk("failed to allocate sha256 hash\n");
413 return NULL;
414 }
415 mutex_init(&fs_info->trans_mutex);
416 mutex_init(&fs_info->fs_mutex);
417 memset(&fs_info->current_insert, 0, sizeof(fs_info->current_insert));
418 memset(&fs_info->last_insert, 0, sizeof(fs_info->last_insert));
419
420 __setup_root(sb->s_blocksize, dev_root,
421 fs_info, BTRFS_DEV_TREE_OBJECTID);
422
423 __setup_root(sb->s_blocksize, tree_root,
424 fs_info, BTRFS_ROOT_TREE_OBJECTID);
425 fs_info->sb_buffer = read_tree_block(tree_root,
426 BTRFS_SUPER_INFO_OFFSET /
427 sb->s_blocksize);
428
429 if (!fs_info->sb_buffer)
430 return NULL;
431 disk_super = (struct btrfs_super_block *)fs_info->sb_buffer->b_data;
432 if (!btrfs_super_root(disk_super))
433 return NULL;
434
435 fs_info->disk_super = disk_super;
436 dev_root->node = read_tree_block(tree_root,
437 btrfs_super_device_root(disk_super));
438 tree_root->node = read_tree_block(tree_root,
439 btrfs_super_root(disk_super));
440 BUG_ON(!tree_root->node);
441
442 mutex_lock(&fs_info->fs_mutex);
443 ret = find_and_setup_root(sb->s_blocksize, tree_root, fs_info,
444 BTRFS_EXTENT_TREE_OBJECTID, extent_root);
445 BUG_ON(ret);
446
447 fs_info->generation = btrfs_super_generation(disk_super) + 1;
448 memset(&fs_info->kobj, 0, sizeof(fs_info->kobj));
449 kobj_set_kset_s(fs_info, btrfs_subsys);
450 kobject_set_name(&fs_info->kobj, "%s", sb->s_id);
451 kobject_register(&fs_info->kobj);
452 mutex_unlock(&fs_info->fs_mutex);
453 return tree_root;
454}
455
456int write_ctree_super(struct btrfs_trans_handle *trans, struct btrfs_root
457 *root)
458{
459 struct buffer_head *bh = root->fs_info->sb_buffer;
460
461 btrfs_set_super_root(root->fs_info->disk_super,
462 root->fs_info->tree_root->node->b_blocknr);
463 lock_buffer(bh);
464 WARN_ON(atomic_read(&bh->b_count) < 1);
465 clear_buffer_dirty(bh);
466 csum_tree_block(root, bh, 0);
467 bh->b_end_io = end_buffer_write_sync;
468 get_bh(bh);
469 submit_bh(WRITE, bh);
470 wait_on_buffer(bh);
471 if (!buffer_uptodate(bh)) {
472 WARN_ON(1);
473 return -EIO;
474 }
475 return 0;
476}
477
478static int free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
479{
480 radix_tree_delete(&fs_info->fs_roots_radix,
481 (unsigned long)root->root_key.objectid);
482 if (root->inode)
483 iput(root->inode);
484 if (root->node)
485 brelse(root->node);
486 if (root->commit_root)
487 brelse(root->commit_root);
488 kfree(root);
489 return 0;
490}
491
492int del_fs_roots(struct btrfs_fs_info *fs_info)
493{
494 int ret;
495 struct btrfs_root *gang[8];
496 int i;
497
498 while(1) {
499 ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
500 (void **)gang, 0,
501 ARRAY_SIZE(gang));
502 if (!ret)
503 break;
504 for (i = 0; i < ret; i++)
505 free_fs_root(fs_info, gang[i]);
506 }
507 return 0;
508}
509
510int close_ctree(struct btrfs_root *root)
511{
512 int ret;
513 struct btrfs_trans_handle *trans;
514 struct btrfs_fs_info *fs_info = root->fs_info;
515
516 mutex_lock(&fs_info->fs_mutex);
517 trans = btrfs_start_transaction(root, 1);
518 btrfs_commit_transaction(trans, root);
519 /* run commit again to drop the original snapshot */
520 trans = btrfs_start_transaction(root, 1);
521 btrfs_commit_transaction(trans, root);
522 ret = btrfs_write_and_wait_transaction(NULL, root);
523 BUG_ON(ret);
524 write_ctree_super(NULL, root);
525 mutex_unlock(&fs_info->fs_mutex);
526
527 if (fs_info->extent_root->node)
528 btrfs_block_release(fs_info->extent_root,
529 fs_info->extent_root->node);
530 if (fs_info->dev_root->node)
531 btrfs_block_release(fs_info->dev_root,
532 fs_info->dev_root->node);
533 if (fs_info->tree_root->node)
534 btrfs_block_release(fs_info->tree_root,
535 fs_info->tree_root->node);
536 btrfs_block_release(root, fs_info->sb_buffer);
537 crypto_free_hash(fs_info->hash_tfm);
538 truncate_inode_pages(fs_info->btree_inode->i_mapping, 0);
539 iput(fs_info->btree_inode);
540 del_fs_roots(fs_info);
541 kfree(fs_info->extent_root);
542 kfree(fs_info->tree_root);
543 kobject_unregister(&fs_info->kobj);
544 return 0;
545}
546
547void btrfs_block_release(struct btrfs_root *root, struct buffer_head *buf)
548{
549 brelse(buf);
550}
551