Btrfs: Dir fsync optimizations
[linux-2.6-block.git] / fs / btrfs / disk-io.c
... / ...
CommitLineData
1/*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19#include <linux/version.h>
20#include <linux/fs.h>
21#include <linux/blkdev.h>
22#include <linux/scatterlist.h>
23#include <linux/swap.h>
24#include <linux/radix-tree.h>
25#include <linux/writeback.h>
26#include <linux/buffer_head.h> // for block_sync_page
27#include <linux/workqueue.h>
28#include <linux/kthread.h>
29#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
30# include <linux/freezer.h>
31#else
32# include <linux/sched.h>
33#endif
34#include "crc32c.h"
35#include "ctree.h"
36#include "disk-io.h"
37#include "transaction.h"
38#include "btrfs_inode.h"
39#include "volumes.h"
40#include "print-tree.h"
41#include "async-thread.h"
42#include "locking.h"
43#include "ref-cache.h"
44#include "tree-log.h"
45
46#if 0
47static int check_tree_block(struct btrfs_root *root, struct extent_buffer *buf)
48{
49 if (extent_buffer_blocknr(buf) != btrfs_header_blocknr(buf)) {
50 printk(KERN_CRIT "buf blocknr(buf) is %llu, header is %llu\n",
51 (unsigned long long)extent_buffer_blocknr(buf),
52 (unsigned long long)btrfs_header_blocknr(buf));
53 return 1;
54 }
55 return 0;
56}
57#endif
58
59static struct extent_io_ops btree_extent_io_ops;
60static void end_workqueue_fn(struct btrfs_work *work);
61
62struct end_io_wq {
63 struct bio *bio;
64 bio_end_io_t *end_io;
65 void *private;
66 struct btrfs_fs_info *info;
67 int error;
68 int metadata;
69 struct list_head list;
70 struct btrfs_work work;
71};
72
73struct async_submit_bio {
74 struct inode *inode;
75 struct bio *bio;
76 struct list_head list;
77 extent_submit_bio_hook_t *submit_bio_hook;
78 int rw;
79 int mirror_num;
80 struct btrfs_work work;
81};
82
83struct extent_map *btree_get_extent(struct inode *inode, struct page *page,
84 size_t page_offset, u64 start, u64 len,
85 int create)
86{
87 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
88 struct extent_map *em;
89 int ret;
90
91 spin_lock(&em_tree->lock);
92 em = lookup_extent_mapping(em_tree, start, len);
93 if (em) {
94 em->bdev =
95 BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
96 spin_unlock(&em_tree->lock);
97 goto out;
98 }
99 spin_unlock(&em_tree->lock);
100
101 em = alloc_extent_map(GFP_NOFS);
102 if (!em) {
103 em = ERR_PTR(-ENOMEM);
104 goto out;
105 }
106 em->start = 0;
107 em->len = (u64)-1;
108 em->block_start = 0;
109 em->bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
110
111 spin_lock(&em_tree->lock);
112 ret = add_extent_mapping(em_tree, em);
113 if (ret == -EEXIST) {
114 u64 failed_start = em->start;
115 u64 failed_len = em->len;
116
117 printk("failed to insert %Lu %Lu -> %Lu into tree\n",
118 em->start, em->len, em->block_start);
119 free_extent_map(em);
120 em = lookup_extent_mapping(em_tree, start, len);
121 if (em) {
122 printk("after failing, found %Lu %Lu %Lu\n",
123 em->start, em->len, em->block_start);
124 ret = 0;
125 } else {
126 em = lookup_extent_mapping(em_tree, failed_start,
127 failed_len);
128 if (em) {
129 printk("double failure lookup gives us "
130 "%Lu %Lu -> %Lu\n", em->start,
131 em->len, em->block_start);
132 free_extent_map(em);
133 }
134 ret = -EIO;
135 }
136 } else if (ret) {
137 free_extent_map(em);
138 em = NULL;
139 }
140 spin_unlock(&em_tree->lock);
141
142 if (ret)
143 em = ERR_PTR(ret);
144out:
145 return em;
146}
147
148u32 btrfs_csum_data(struct btrfs_root *root, char *data, u32 seed, size_t len)
149{
150 return btrfs_crc32c(seed, data, len);
151}
152
153void btrfs_csum_final(u32 crc, char *result)
154{
155 *(__le32 *)result = ~cpu_to_le32(crc);
156}
157
158static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf,
159 int verify)
160{
161 char result[BTRFS_CRC32_SIZE];
162 unsigned long len;
163 unsigned long cur_len;
164 unsigned long offset = BTRFS_CSUM_SIZE;
165 char *map_token = NULL;
166 char *kaddr;
167 unsigned long map_start;
168 unsigned long map_len;
169 int err;
170 u32 crc = ~(u32)0;
171
172 len = buf->len - offset;
173 while(len > 0) {
174 err = map_private_extent_buffer(buf, offset, 32,
175 &map_token, &kaddr,
176 &map_start, &map_len, KM_USER0);
177 if (err) {
178 printk("failed to map extent buffer! %lu\n",
179 offset);
180 return 1;
181 }
182 cur_len = min(len, map_len - (offset - map_start));
183 crc = btrfs_csum_data(root, kaddr + offset - map_start,
184 crc, cur_len);
185 len -= cur_len;
186 offset += cur_len;
187 unmap_extent_buffer(buf, map_token, KM_USER0);
188 }
189 btrfs_csum_final(crc, result);
190
191 if (verify) {
192 /* FIXME, this is not good */
193 if (memcmp_extent_buffer(buf, result, 0, BTRFS_CRC32_SIZE)) {
194 u32 val;
195 u32 found = 0;
196 memcpy(&found, result, BTRFS_CRC32_SIZE);
197
198 read_extent_buffer(buf, &val, 0, BTRFS_CRC32_SIZE);
199 printk("btrfs: %s checksum verify failed on %llu "
200 "wanted %X found %X level %d\n",
201 root->fs_info->sb->s_id,
202 buf->start, val, found, btrfs_header_level(buf));
203 return 1;
204 }
205 } else {
206 write_extent_buffer(buf, result, 0, BTRFS_CRC32_SIZE);
207 }
208 return 0;
209}
210
211static int verify_parent_transid(struct extent_io_tree *io_tree,
212 struct extent_buffer *eb, u64 parent_transid)
213{
214 int ret;
215
216 if (!parent_transid || btrfs_header_generation(eb) == parent_transid)
217 return 0;
218
219 lock_extent(io_tree, eb->start, eb->start + eb->len - 1, GFP_NOFS);
220 if (extent_buffer_uptodate(io_tree, eb) &&
221 btrfs_header_generation(eb) == parent_transid) {
222 ret = 0;
223 goto out;
224 }
225 printk("parent transid verify failed on %llu wanted %llu found %llu\n",
226 (unsigned long long)eb->start,
227 (unsigned long long)parent_transid,
228 (unsigned long long)btrfs_header_generation(eb));
229 ret = 1;
230 clear_extent_buffer_uptodate(io_tree, eb);
231out:
232 unlock_extent(io_tree, eb->start, eb->start + eb->len - 1,
233 GFP_NOFS);
234 return ret;
235
236}
237
238static int btree_read_extent_buffer_pages(struct btrfs_root *root,
239 struct extent_buffer *eb,
240 u64 start, u64 parent_transid)
241{
242 struct extent_io_tree *io_tree;
243 int ret;
244 int num_copies = 0;
245 int mirror_num = 0;
246
247 io_tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree;
248 while (1) {
249 ret = read_extent_buffer_pages(io_tree, eb, start, 1,
250 btree_get_extent, mirror_num);
251 if (!ret &&
252 !verify_parent_transid(io_tree, eb, parent_transid))
253 return ret;
254printk("read extent buffer pages failed with ret %d mirror no %d\n", ret, mirror_num);
255 num_copies = btrfs_num_copies(&root->fs_info->mapping_tree,
256 eb->start, eb->len);
257 if (num_copies == 1)
258 return ret;
259
260 mirror_num++;
261 if (mirror_num > num_copies)
262 return ret;
263 }
264 return -EIO;
265}
266
267int csum_dirty_buffer(struct btrfs_root *root, struct page *page)
268{
269 struct extent_io_tree *tree;
270 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
271 u64 found_start;
272 int found_level;
273 unsigned long len;
274 struct extent_buffer *eb;
275 int ret;
276
277 tree = &BTRFS_I(page->mapping->host)->io_tree;
278
279 if (page->private == EXTENT_PAGE_PRIVATE)
280 goto out;
281 if (!page->private)
282 goto out;
283 len = page->private >> 2;
284 if (len == 0) {
285 WARN_ON(1);
286 }
287 eb = alloc_extent_buffer(tree, start, len, page, GFP_NOFS);
288 ret = btree_read_extent_buffer_pages(root, eb, start + PAGE_CACHE_SIZE,
289 btrfs_header_generation(eb));
290 BUG_ON(ret);
291 found_start = btrfs_header_bytenr(eb);
292 if (found_start != start) {
293 printk("warning: eb start incorrect %Lu buffer %Lu len %lu\n",
294 start, found_start, len);
295 WARN_ON(1);
296 goto err;
297 }
298 if (eb->first_page != page) {
299 printk("bad first page %lu %lu\n", eb->first_page->index,
300 page->index);
301 WARN_ON(1);
302 goto err;
303 }
304 if (!PageUptodate(page)) {
305 printk("csum not up to date page %lu\n", page->index);
306 WARN_ON(1);
307 goto err;
308 }
309 found_level = btrfs_header_level(eb);
310
311 csum_tree_block(root, eb, 0);
312err:
313 free_extent_buffer(eb);
314out:
315 return 0;
316}
317
318static int btree_writepage_io_hook(struct page *page, u64 start, u64 end)
319{
320 struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
321
322 csum_dirty_buffer(root, page);
323 return 0;
324}
325
326int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
327 struct extent_state *state)
328{
329 struct extent_io_tree *tree;
330 u64 found_start;
331 int found_level;
332 unsigned long len;
333 struct extent_buffer *eb;
334 struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
335 int ret = 0;
336
337 tree = &BTRFS_I(page->mapping->host)->io_tree;
338 if (page->private == EXTENT_PAGE_PRIVATE)
339 goto out;
340 if (!page->private)
341 goto out;
342 len = page->private >> 2;
343 if (len == 0) {
344 WARN_ON(1);
345 }
346 eb = alloc_extent_buffer(tree, start, len, page, GFP_NOFS);
347
348 found_start = btrfs_header_bytenr(eb);
349 if (0 && found_start != start) {
350 printk("bad tree block start %llu %llu\n",
351 (unsigned long long)found_start,
352 (unsigned long long)eb->start);
353 ret = -EIO;
354 goto err;
355 }
356 if (eb->first_page != page) {
357 printk("bad first page %lu %lu\n", eb->first_page->index,
358 page->index);
359 WARN_ON(1);
360 ret = -EIO;
361 goto err;
362 }
363 if (memcmp_extent_buffer(eb, root->fs_info->fsid,
364 (unsigned long)btrfs_header_fsid(eb),
365 BTRFS_FSID_SIZE)) {
366 printk("bad fsid on block %Lu\n", eb->start);
367 ret = -EIO;
368 goto err;
369 }
370 found_level = btrfs_header_level(eb);
371
372 ret = csum_tree_block(root, eb, 1);
373 if (ret)
374 ret = -EIO;
375
376 end = min_t(u64, eb->len, PAGE_CACHE_SIZE);
377 end = eb->start + end - 1;
378err:
379 free_extent_buffer(eb);
380out:
381 return ret;
382}
383
384#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
385static void end_workqueue_bio(struct bio *bio, int err)
386#else
387static int end_workqueue_bio(struct bio *bio,
388 unsigned int bytes_done, int err)
389#endif
390{
391 struct end_io_wq *end_io_wq = bio->bi_private;
392 struct btrfs_fs_info *fs_info;
393
394#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
395 if (bio->bi_size)
396 return 1;
397#endif
398
399 fs_info = end_io_wq->info;
400 end_io_wq->error = err;
401 end_io_wq->work.func = end_workqueue_fn;
402 end_io_wq->work.flags = 0;
403 if (bio->bi_rw & (1 << BIO_RW))
404 btrfs_queue_worker(&fs_info->endio_write_workers,
405 &end_io_wq->work);
406 else
407 btrfs_queue_worker(&fs_info->endio_workers, &end_io_wq->work);
408
409#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
410 return 0;
411#endif
412}
413
414int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
415 int metadata)
416{
417 struct end_io_wq *end_io_wq;
418 end_io_wq = kmalloc(sizeof(*end_io_wq), GFP_NOFS);
419 if (!end_io_wq)
420 return -ENOMEM;
421
422 end_io_wq->private = bio->bi_private;
423 end_io_wq->end_io = bio->bi_end_io;
424 end_io_wq->info = info;
425 end_io_wq->error = 0;
426 end_io_wq->bio = bio;
427 end_io_wq->metadata = metadata;
428
429 bio->bi_private = end_io_wq;
430 bio->bi_end_io = end_workqueue_bio;
431 return 0;
432}
433
434unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info)
435{
436 unsigned long limit = min_t(unsigned long,
437 info->workers.max_workers,
438 info->fs_devices->open_devices);
439 return 256 * limit;
440}
441
442int btrfs_congested_async(struct btrfs_fs_info *info, int iodone)
443{
444 return atomic_read(&info->nr_async_bios) >
445 btrfs_async_submit_limit(info);
446}
447
448static void run_one_async_submit(struct btrfs_work *work)
449{
450 struct btrfs_fs_info *fs_info;
451 struct async_submit_bio *async;
452 int limit;
453
454 async = container_of(work, struct async_submit_bio, work);
455 fs_info = BTRFS_I(async->inode)->root->fs_info;
456
457 limit = btrfs_async_submit_limit(fs_info);
458 limit = limit * 2 / 3;
459
460 atomic_dec(&fs_info->nr_async_submits);
461
462 if (atomic_read(&fs_info->nr_async_submits) < limit &&
463 waitqueue_active(&fs_info->async_submit_wait))
464 wake_up(&fs_info->async_submit_wait);
465
466 async->submit_bio_hook(async->inode, async->rw, async->bio,
467 async->mirror_num);
468 kfree(async);
469}
470
471int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
472 int rw, struct bio *bio, int mirror_num,
473 extent_submit_bio_hook_t *submit_bio_hook)
474{
475 struct async_submit_bio *async;
476 int limit = btrfs_async_submit_limit(fs_info);
477
478 async = kmalloc(sizeof(*async), GFP_NOFS);
479 if (!async)
480 return -ENOMEM;
481
482 async->inode = inode;
483 async->rw = rw;
484 async->bio = bio;
485 async->mirror_num = mirror_num;
486 async->submit_bio_hook = submit_bio_hook;
487 async->work.func = run_one_async_submit;
488 async->work.flags = 0;
489 atomic_inc(&fs_info->nr_async_submits);
490 btrfs_queue_worker(&fs_info->workers, &async->work);
491
492 if (atomic_read(&fs_info->nr_async_submits) > limit) {
493 wait_event_timeout(fs_info->async_submit_wait,
494 (atomic_read(&fs_info->nr_async_submits) < limit),
495 HZ/10);
496
497 wait_event_timeout(fs_info->async_submit_wait,
498 (atomic_read(&fs_info->nr_async_bios) < limit),
499 HZ/10);
500 }
501 return 0;
502}
503
504static int __btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
505 int mirror_num)
506{
507 struct btrfs_root *root = BTRFS_I(inode)->root;
508 u64 offset;
509 int ret;
510
511 offset = bio->bi_sector << 9;
512
513 /*
514 * when we're called for a write, we're already in the async
515 * submission context. Just jump into btrfs_map_bio
516 */
517 if (rw & (1 << BIO_RW)) {
518 return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio,
519 mirror_num, 1);
520 }
521
522 /*
523 * called for a read, do the setup so that checksum validation
524 * can happen in the async kernel threads
525 */
526 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 1);
527 BUG_ON(ret);
528
529 return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, mirror_num, 1);
530}
531
532static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
533 int mirror_num)
534{
535 /*
536 * kthread helpers are used to submit writes so that checksumming
537 * can happen in parallel across all CPUs
538 */
539 if (!(rw & (1 << BIO_RW))) {
540 return __btree_submit_bio_hook(inode, rw, bio, mirror_num);
541 }
542 return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
543 inode, rw, bio, mirror_num,
544 __btree_submit_bio_hook);
545}
546
547static int btree_writepage(struct page *page, struct writeback_control *wbc)
548{
549 struct extent_io_tree *tree;
550 tree = &BTRFS_I(page->mapping->host)->io_tree;
551
552 if (current->flags & PF_MEMALLOC) {
553 redirty_page_for_writepage(wbc, page);
554 unlock_page(page);
555 return 0;
556 }
557 return extent_write_full_page(tree, page, btree_get_extent, wbc);
558}
559
560static int btree_writepages(struct address_space *mapping,
561 struct writeback_control *wbc)
562{
563 struct extent_io_tree *tree;
564 tree = &BTRFS_I(mapping->host)->io_tree;
565 if (wbc->sync_mode == WB_SYNC_NONE) {
566 u64 num_dirty;
567 u64 start = 0;
568 unsigned long thresh = 8 * 1024 * 1024;
569
570 if (wbc->for_kupdate)
571 return 0;
572
573 num_dirty = count_range_bits(tree, &start, (u64)-1,
574 thresh, EXTENT_DIRTY);
575 if (num_dirty < thresh) {
576 return 0;
577 }
578 }
579 return extent_writepages(tree, mapping, btree_get_extent, wbc);
580}
581
582int btree_readpage(struct file *file, struct page *page)
583{
584 struct extent_io_tree *tree;
585 tree = &BTRFS_I(page->mapping->host)->io_tree;
586 return extent_read_full_page(tree, page, btree_get_extent);
587}
588
589static int btree_releasepage(struct page *page, gfp_t gfp_flags)
590{
591 struct extent_io_tree *tree;
592 struct extent_map_tree *map;
593 int ret;
594
595 if (PageWriteback(page) || PageDirty(page))
596 return 0;
597
598 tree = &BTRFS_I(page->mapping->host)->io_tree;
599 map = &BTRFS_I(page->mapping->host)->extent_tree;
600
601 ret = try_release_extent_state(map, tree, page, gfp_flags);
602 if (!ret) {
603 return 0;
604 }
605
606 ret = try_release_extent_buffer(tree, page);
607 if (ret == 1) {
608 ClearPagePrivate(page);
609 set_page_private(page, 0);
610 page_cache_release(page);
611 }
612
613 return ret;
614}
615
616static void btree_invalidatepage(struct page *page, unsigned long offset)
617{
618 struct extent_io_tree *tree;
619 tree = &BTRFS_I(page->mapping->host)->io_tree;
620 extent_invalidatepage(tree, page, offset);
621 btree_releasepage(page, GFP_NOFS);
622 if (PagePrivate(page)) {
623 printk("warning page private not zero on page %Lu\n",
624 page_offset(page));
625 ClearPagePrivate(page);
626 set_page_private(page, 0);
627 page_cache_release(page);
628 }
629}
630
631#if 0
632static int btree_writepage(struct page *page, struct writeback_control *wbc)
633{
634 struct buffer_head *bh;
635 struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
636 struct buffer_head *head;
637 if (!page_has_buffers(page)) {
638 create_empty_buffers(page, root->fs_info->sb->s_blocksize,
639 (1 << BH_Dirty)|(1 << BH_Uptodate));
640 }
641 head = page_buffers(page);
642 bh = head;
643 do {
644 if (buffer_dirty(bh))
645 csum_tree_block(root, bh, 0);
646 bh = bh->b_this_page;
647 } while (bh != head);
648 return block_write_full_page(page, btree_get_block, wbc);
649}
650#endif
651
652static struct address_space_operations btree_aops = {
653 .readpage = btree_readpage,
654 .writepage = btree_writepage,
655 .writepages = btree_writepages,
656 .releasepage = btree_releasepage,
657 .invalidatepage = btree_invalidatepage,
658 .sync_page = block_sync_page,
659};
660
661int readahead_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize,
662 u64 parent_transid)
663{
664 struct extent_buffer *buf = NULL;
665 struct inode *btree_inode = root->fs_info->btree_inode;
666 int ret = 0;
667
668 buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
669 if (!buf)
670 return 0;
671 read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree,
672 buf, 0, 0, btree_get_extent, 0);
673 free_extent_buffer(buf);
674 return ret;
675}
676
677struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root,
678 u64 bytenr, u32 blocksize)
679{
680 struct inode *btree_inode = root->fs_info->btree_inode;
681 struct extent_buffer *eb;
682 eb = find_extent_buffer(&BTRFS_I(btree_inode)->io_tree,
683 bytenr, blocksize, GFP_NOFS);
684 return eb;
685}
686
687struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
688 u64 bytenr, u32 blocksize)
689{
690 struct inode *btree_inode = root->fs_info->btree_inode;
691 struct extent_buffer *eb;
692
693 eb = alloc_extent_buffer(&BTRFS_I(btree_inode)->io_tree,
694 bytenr, blocksize, NULL, GFP_NOFS);
695 return eb;
696}
697
698
699int btrfs_write_tree_block(struct extent_buffer *buf)
700{
701 return btrfs_fdatawrite_range(buf->first_page->mapping, buf->start,
702 buf->start + buf->len - 1, WB_SYNC_NONE);
703}
704
705int btrfs_wait_tree_block_writeback(struct extent_buffer *buf)
706{
707 return btrfs_wait_on_page_writeback_range(buf->first_page->mapping,
708 buf->start, buf->start + buf->len -1);
709}
710
711struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
712 u32 blocksize, u64 parent_transid)
713{
714 struct extent_buffer *buf = NULL;
715 struct inode *btree_inode = root->fs_info->btree_inode;
716 struct extent_io_tree *io_tree;
717 int ret;
718
719 io_tree = &BTRFS_I(btree_inode)->io_tree;
720
721 buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
722 if (!buf)
723 return NULL;
724
725 ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
726
727 if (ret == 0) {
728 buf->flags |= EXTENT_UPTODATE;
729 } else {
730 WARN_ON(1);
731 }
732 return buf;
733
734}
735
736int clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root,
737 struct extent_buffer *buf)
738{
739 struct inode *btree_inode = root->fs_info->btree_inode;
740 if (btrfs_header_generation(buf) ==
741 root->fs_info->running_transaction->transid) {
742 WARN_ON(!btrfs_tree_locked(buf));
743 clear_extent_buffer_dirty(&BTRFS_I(btree_inode)->io_tree,
744 buf);
745 }
746 return 0;
747}
748
749static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
750 u32 stripesize, struct btrfs_root *root,
751 struct btrfs_fs_info *fs_info,
752 u64 objectid)
753{
754 root->node = NULL;
755 root->inode = NULL;
756 root->commit_root = NULL;
757 root->ref_tree = NULL;
758 root->sectorsize = sectorsize;
759 root->nodesize = nodesize;
760 root->leafsize = leafsize;
761 root->stripesize = stripesize;
762 root->ref_cows = 0;
763 root->track_dirty = 0;
764
765 root->fs_info = fs_info;
766 root->objectid = objectid;
767 root->last_trans = 0;
768 root->highest_inode = 0;
769 root->last_inode_alloc = 0;
770 root->name = NULL;
771 root->in_sysfs = 0;
772
773 INIT_LIST_HEAD(&root->dirty_list);
774 INIT_LIST_HEAD(&root->orphan_list);
775 INIT_LIST_HEAD(&root->dead_list);
776 spin_lock_init(&root->node_lock);
777 spin_lock_init(&root->list_lock);
778 mutex_init(&root->objectid_mutex);
779 mutex_init(&root->log_mutex);
780
781 btrfs_leaf_ref_tree_init(&root->ref_tree_struct);
782 root->ref_tree = &root->ref_tree_struct;
783
784 memset(&root->root_key, 0, sizeof(root->root_key));
785 memset(&root->root_item, 0, sizeof(root->root_item));
786 memset(&root->defrag_progress, 0, sizeof(root->defrag_progress));
787 memset(&root->root_kobj, 0, sizeof(root->root_kobj));
788 root->defrag_trans_start = fs_info->generation;
789 init_completion(&root->kobj_unregister);
790 root->defrag_running = 0;
791 root->defrag_level = 0;
792 root->root_key.objectid = objectid;
793 return 0;
794}
795
796static int find_and_setup_root(struct btrfs_root *tree_root,
797 struct btrfs_fs_info *fs_info,
798 u64 objectid,
799 struct btrfs_root *root)
800{
801 int ret;
802 u32 blocksize;
803
804 __setup_root(tree_root->nodesize, tree_root->leafsize,
805 tree_root->sectorsize, tree_root->stripesize,
806 root, fs_info, objectid);
807 ret = btrfs_find_last_root(tree_root, objectid,
808 &root->root_item, &root->root_key);
809 BUG_ON(ret);
810
811 blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item));
812 root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
813 blocksize, 0);
814 BUG_ON(!root->node);
815 return 0;
816}
817
818int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans,
819 struct btrfs_fs_info *fs_info)
820{
821 struct extent_buffer *eb;
822 int ret;
823
824 if (!fs_info->log_root_tree)
825 return 0;
826
827 eb = fs_info->log_root_tree->node;
828
829 WARN_ON(btrfs_header_level(eb) != 0);
830 WARN_ON(btrfs_header_nritems(eb) != 0);
831
832 ret = btrfs_free_extent(trans, fs_info->tree_root,
833 eb->start, eb->len,
834 BTRFS_TREE_LOG_OBJECTID, 0, 0, 0, 1);
835 BUG_ON(ret);
836
837 free_extent_buffer(eb);
838 kfree(fs_info->log_root_tree);
839 fs_info->log_root_tree = NULL;
840 return 0;
841}
842
843int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
844 struct btrfs_fs_info *fs_info)
845{
846 struct btrfs_root *root;
847 struct btrfs_root *tree_root = fs_info->tree_root;
848
849 root = kzalloc(sizeof(*root), GFP_NOFS);
850 if (!root)
851 return -ENOMEM;
852
853 __setup_root(tree_root->nodesize, tree_root->leafsize,
854 tree_root->sectorsize, tree_root->stripesize,
855 root, fs_info, BTRFS_TREE_LOG_OBJECTID);
856
857 root->root_key.objectid = BTRFS_TREE_LOG_OBJECTID;
858 root->root_key.type = BTRFS_ROOT_ITEM_KEY;
859 root->root_key.offset = BTRFS_TREE_LOG_OBJECTID;
860 root->ref_cows = 0;
861
862 root->node = btrfs_alloc_free_block(trans, root, root->leafsize,
863 BTRFS_TREE_LOG_OBJECTID,
864 0, 0, 0, 0, 0);
865
866 btrfs_set_header_nritems(root->node, 0);
867 btrfs_set_header_level(root->node, 0);
868 btrfs_set_header_bytenr(root->node, root->node->start);
869 btrfs_set_header_generation(root->node, trans->transid);
870 btrfs_set_header_owner(root->node, BTRFS_TREE_LOG_OBJECTID);
871
872 write_extent_buffer(root->node, root->fs_info->fsid,
873 (unsigned long)btrfs_header_fsid(root->node),
874 BTRFS_FSID_SIZE);
875 btrfs_mark_buffer_dirty(root->node);
876 btrfs_tree_unlock(root->node);
877 fs_info->log_root_tree = root;
878 return 0;
879}
880
881struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root,
882 struct btrfs_key *location)
883{
884 struct btrfs_root *root;
885 struct btrfs_fs_info *fs_info = tree_root->fs_info;
886 struct btrfs_path *path;
887 struct extent_buffer *l;
888 u64 highest_inode;
889 u32 blocksize;
890 int ret = 0;
891
892 root = kzalloc(sizeof(*root), GFP_NOFS);
893 if (!root)
894 return ERR_PTR(-ENOMEM);
895 if (location->offset == (u64)-1) {
896 ret = find_and_setup_root(tree_root, fs_info,
897 location->objectid, root);
898 if (ret) {
899 kfree(root);
900 return ERR_PTR(ret);
901 }
902 goto insert;
903 }
904
905 __setup_root(tree_root->nodesize, tree_root->leafsize,
906 tree_root->sectorsize, tree_root->stripesize,
907 root, fs_info, location->objectid);
908
909 path = btrfs_alloc_path();
910 BUG_ON(!path);
911 ret = btrfs_search_slot(NULL, tree_root, location, path, 0, 0);
912 if (ret != 0) {
913 if (ret > 0)
914 ret = -ENOENT;
915 goto out;
916 }
917 l = path->nodes[0];
918 read_extent_buffer(l, &root->root_item,
919 btrfs_item_ptr_offset(l, path->slots[0]),
920 sizeof(root->root_item));
921 memcpy(&root->root_key, location, sizeof(*location));
922 ret = 0;
923out:
924 btrfs_release_path(root, path);
925 btrfs_free_path(path);
926 if (ret) {
927 kfree(root);
928 return ERR_PTR(ret);
929 }
930 blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item));
931 root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
932 blocksize, 0);
933 BUG_ON(!root->node);
934insert:
935 if (location->objectid != BTRFS_TREE_LOG_OBJECTID) {
936 root->ref_cows = 1;
937 ret = btrfs_find_highest_inode(root, &highest_inode);
938 if (ret == 0) {
939 root->highest_inode = highest_inode;
940 root->last_inode_alloc = highest_inode;
941 }
942 }
943 return root;
944}
945
946struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
947 u64 root_objectid)
948{
949 struct btrfs_root *root;
950
951 if (root_objectid == BTRFS_ROOT_TREE_OBJECTID)
952 return fs_info->tree_root;
953 if (root_objectid == BTRFS_EXTENT_TREE_OBJECTID)
954 return fs_info->extent_root;
955
956 root = radix_tree_lookup(&fs_info->fs_roots_radix,
957 (unsigned long)root_objectid);
958 return root;
959}
960
961struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info,
962 struct btrfs_key *location)
963{
964 struct btrfs_root *root;
965 int ret;
966
967 if (location->objectid == BTRFS_ROOT_TREE_OBJECTID)
968 return fs_info->tree_root;
969 if (location->objectid == BTRFS_EXTENT_TREE_OBJECTID)
970 return fs_info->extent_root;
971 if (location->objectid == BTRFS_CHUNK_TREE_OBJECTID)
972 return fs_info->chunk_root;
973 if (location->objectid == BTRFS_DEV_TREE_OBJECTID)
974 return fs_info->dev_root;
975
976 root = radix_tree_lookup(&fs_info->fs_roots_radix,
977 (unsigned long)location->objectid);
978 if (root)
979 return root;
980
981 root = btrfs_read_fs_root_no_radix(fs_info->tree_root, location);
982 if (IS_ERR(root))
983 return root;
984 ret = radix_tree_insert(&fs_info->fs_roots_radix,
985 (unsigned long)root->root_key.objectid,
986 root);
987 if (ret) {
988 free_extent_buffer(root->node);
989 kfree(root);
990 return ERR_PTR(ret);
991 }
992 ret = btrfs_find_dead_roots(fs_info->tree_root,
993 root->root_key.objectid, root);
994 BUG_ON(ret);
995
996 return root;
997}
998
999struct btrfs_root *btrfs_read_fs_root(struct btrfs_fs_info *fs_info,
1000 struct btrfs_key *location,
1001 const char *name, int namelen)
1002{
1003 struct btrfs_root *root;
1004 int ret;
1005
1006 root = btrfs_read_fs_root_no_name(fs_info, location);
1007 if (!root)
1008 return NULL;
1009
1010 if (root->in_sysfs)
1011 return root;
1012
1013 ret = btrfs_set_root_name(root, name, namelen);
1014 if (ret) {
1015 free_extent_buffer(root->node);
1016 kfree(root);
1017 return ERR_PTR(ret);
1018 }
1019
1020 ret = btrfs_sysfs_add_root(root);
1021 if (ret) {
1022 free_extent_buffer(root->node);
1023 kfree(root->name);
1024 kfree(root);
1025 return ERR_PTR(ret);
1026 }
1027 root->in_sysfs = 1;
1028 return root;
1029}
1030#if 0
1031static int add_hasher(struct btrfs_fs_info *info, char *type) {
1032 struct btrfs_hasher *hasher;
1033
1034 hasher = kmalloc(sizeof(*hasher), GFP_NOFS);
1035 if (!hasher)
1036 return -ENOMEM;
1037 hasher->hash_tfm = crypto_alloc_hash(type, 0, CRYPTO_ALG_ASYNC);
1038 if (!hasher->hash_tfm) {
1039 kfree(hasher);
1040 return -EINVAL;
1041 }
1042 spin_lock(&info->hash_lock);
1043 list_add(&hasher->list, &info->hashers);
1044 spin_unlock(&info->hash_lock);
1045 return 0;
1046}
1047#endif
1048
1049static int btrfs_congested_fn(void *congested_data, int bdi_bits)
1050{
1051 struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data;
1052 int ret = 0;
1053 struct list_head *cur;
1054 struct btrfs_device *device;
1055 struct backing_dev_info *bdi;
1056
1057 if ((bdi_bits & (1 << BDI_write_congested)) &&
1058 btrfs_congested_async(info, 0))
1059 return 1;
1060
1061 list_for_each(cur, &info->fs_devices->devices) {
1062 device = list_entry(cur, struct btrfs_device, dev_list);
1063 if (!device->bdev)
1064 continue;
1065 bdi = blk_get_backing_dev_info(device->bdev);
1066 if (bdi && bdi_congested(bdi, bdi_bits)) {
1067 ret = 1;
1068 break;
1069 }
1070 }
1071 return ret;
1072}
1073
1074/*
1075 * this unplugs every device on the box, and it is only used when page
1076 * is null
1077 */
1078static void __unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
1079{
1080 struct list_head *cur;
1081 struct btrfs_device *device;
1082 struct btrfs_fs_info *info;
1083
1084 info = (struct btrfs_fs_info *)bdi->unplug_io_data;
1085 list_for_each(cur, &info->fs_devices->devices) {
1086 device = list_entry(cur, struct btrfs_device, dev_list);
1087 bdi = blk_get_backing_dev_info(device->bdev);
1088 if (bdi->unplug_io_fn) {
1089 bdi->unplug_io_fn(bdi, page);
1090 }
1091 }
1092}
1093
1094void btrfs_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
1095{
1096 struct inode *inode;
1097 struct extent_map_tree *em_tree;
1098 struct extent_map *em;
1099 struct address_space *mapping;
1100 u64 offset;
1101
1102 /* the generic O_DIRECT read code does this */
1103 if (!page) {
1104 __unplug_io_fn(bdi, page);
1105 return;
1106 }
1107
1108 /*
1109 * page->mapping may change at any time. Get a consistent copy
1110 * and use that for everything below
1111 */
1112 smp_mb();
1113 mapping = page->mapping;
1114 if (!mapping)
1115 return;
1116
1117 inode = mapping->host;
1118 offset = page_offset(page);
1119
1120 em_tree = &BTRFS_I(inode)->extent_tree;
1121 spin_lock(&em_tree->lock);
1122 em = lookup_extent_mapping(em_tree, offset, PAGE_CACHE_SIZE);
1123 spin_unlock(&em_tree->lock);
1124 if (!em) {
1125 __unplug_io_fn(bdi, page);
1126 return;
1127 }
1128
1129 if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
1130 free_extent_map(em);
1131 __unplug_io_fn(bdi, page);
1132 return;
1133 }
1134 offset = offset - em->start;
1135 btrfs_unplug_page(&BTRFS_I(inode)->root->fs_info->mapping_tree,
1136 em->block_start + offset, page);
1137 free_extent_map(em);
1138}
1139
1140static int setup_bdi(struct btrfs_fs_info *info, struct backing_dev_info *bdi)
1141{
1142#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1143 bdi_init(bdi);
1144#endif
1145 bdi->ra_pages = default_backing_dev_info.ra_pages;
1146 bdi->state = 0;
1147 bdi->capabilities = default_backing_dev_info.capabilities;
1148 bdi->unplug_io_fn = btrfs_unplug_io_fn;
1149 bdi->unplug_io_data = info;
1150 bdi->congested_fn = btrfs_congested_fn;
1151 bdi->congested_data = info;
1152 return 0;
1153}
1154
1155static int bio_ready_for_csum(struct bio *bio)
1156{
1157 u64 length = 0;
1158 u64 buf_len = 0;
1159 u64 start = 0;
1160 struct page *page;
1161 struct extent_io_tree *io_tree = NULL;
1162 struct btrfs_fs_info *info = NULL;
1163 struct bio_vec *bvec;
1164 int i;
1165 int ret;
1166
1167 bio_for_each_segment(bvec, bio, i) {
1168 page = bvec->bv_page;
1169 if (page->private == EXTENT_PAGE_PRIVATE) {
1170 length += bvec->bv_len;
1171 continue;
1172 }
1173 if (!page->private) {
1174 length += bvec->bv_len;
1175 continue;
1176 }
1177 length = bvec->bv_len;
1178 buf_len = page->private >> 2;
1179 start = page_offset(page) + bvec->bv_offset;
1180 io_tree = &BTRFS_I(page->mapping->host)->io_tree;
1181 info = BTRFS_I(page->mapping->host)->root->fs_info;
1182 }
1183 /* are we fully contained in this bio? */
1184 if (buf_len <= length)
1185 return 1;
1186
1187 ret = extent_range_uptodate(io_tree, start + length,
1188 start + buf_len - 1);
1189 if (ret == 1)
1190 return ret;
1191 return ret;
1192}
1193
1194/*
1195 * called by the kthread helper functions to finally call the bio end_io
1196 * functions. This is where read checksum verification actually happens
1197 */
1198static void end_workqueue_fn(struct btrfs_work *work)
1199{
1200 struct bio *bio;
1201 struct end_io_wq *end_io_wq;
1202 struct btrfs_fs_info *fs_info;
1203 int error;
1204
1205 end_io_wq = container_of(work, struct end_io_wq, work);
1206 bio = end_io_wq->bio;
1207 fs_info = end_io_wq->info;
1208
1209 /* metadata bios are special because the whole tree block must
1210 * be checksummed at once. This makes sure the entire block is in
1211 * ram and up to date before trying to verify things. For
1212 * blocksize <= pagesize, it is basically a noop
1213 */
1214 if (end_io_wq->metadata && !bio_ready_for_csum(bio)) {
1215 btrfs_queue_worker(&fs_info->endio_workers,
1216 &end_io_wq->work);
1217 return;
1218 }
1219 error = end_io_wq->error;
1220 bio->bi_private = end_io_wq->private;
1221 bio->bi_end_io = end_io_wq->end_io;
1222 kfree(end_io_wq);
1223#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1224 bio_endio(bio, bio->bi_size, error);
1225#else
1226 bio_endio(bio, error);
1227#endif
1228}
1229
1230static int cleaner_kthread(void *arg)
1231{
1232 struct btrfs_root *root = arg;
1233
1234 do {
1235 smp_mb();
1236 if (root->fs_info->closing)
1237 break;
1238
1239 vfs_check_frozen(root->fs_info->sb, SB_FREEZE_WRITE);
1240 mutex_lock(&root->fs_info->cleaner_mutex);
1241 btrfs_clean_old_snapshots(root);
1242 mutex_unlock(&root->fs_info->cleaner_mutex);
1243
1244 if (freezing(current)) {
1245 refrigerator();
1246 } else {
1247 smp_mb();
1248 if (root->fs_info->closing)
1249 break;
1250 set_current_state(TASK_INTERRUPTIBLE);
1251 schedule();
1252 __set_current_state(TASK_RUNNING);
1253 }
1254 } while (!kthread_should_stop());
1255 return 0;
1256}
1257
1258static int transaction_kthread(void *arg)
1259{
1260 struct btrfs_root *root = arg;
1261 struct btrfs_trans_handle *trans;
1262 struct btrfs_transaction *cur;
1263 unsigned long now;
1264 unsigned long delay;
1265 int ret;
1266
1267 do {
1268 smp_mb();
1269 if (root->fs_info->closing)
1270 break;
1271
1272 delay = HZ * 30;
1273 vfs_check_frozen(root->fs_info->sb, SB_FREEZE_WRITE);
1274 mutex_lock(&root->fs_info->transaction_kthread_mutex);
1275
1276 if (root->fs_info->total_ref_cache_size > 20 * 1024 * 1024) {
1277 printk("btrfs: total reference cache size %Lu\n",
1278 root->fs_info->total_ref_cache_size);
1279 }
1280
1281 mutex_lock(&root->fs_info->trans_mutex);
1282 cur = root->fs_info->running_transaction;
1283 if (!cur) {
1284 mutex_unlock(&root->fs_info->trans_mutex);
1285 goto sleep;
1286 }
1287
1288 now = get_seconds();
1289 if (now < cur->start_time || now - cur->start_time < 30) {
1290 mutex_unlock(&root->fs_info->trans_mutex);
1291 delay = HZ * 5;
1292 goto sleep;
1293 }
1294 mutex_unlock(&root->fs_info->trans_mutex);
1295 trans = btrfs_start_transaction(root, 1);
1296 ret = btrfs_commit_transaction(trans, root);
1297sleep:
1298 wake_up_process(root->fs_info->cleaner_kthread);
1299 mutex_unlock(&root->fs_info->transaction_kthread_mutex);
1300
1301 if (freezing(current)) {
1302 refrigerator();
1303 } else {
1304 if (root->fs_info->closing)
1305 break;
1306 set_current_state(TASK_INTERRUPTIBLE);
1307 schedule_timeout(delay);
1308 __set_current_state(TASK_RUNNING);
1309 }
1310 } while (!kthread_should_stop());
1311 return 0;
1312}
1313
1314struct btrfs_root *open_ctree(struct super_block *sb,
1315 struct btrfs_fs_devices *fs_devices,
1316 char *options)
1317{
1318 u32 sectorsize;
1319 u32 nodesize;
1320 u32 leafsize;
1321 u32 blocksize;
1322 u32 stripesize;
1323 struct buffer_head *bh;
1324 struct btrfs_root *extent_root = kzalloc(sizeof(struct btrfs_root),
1325 GFP_NOFS);
1326 struct btrfs_root *tree_root = kzalloc(sizeof(struct btrfs_root),
1327 GFP_NOFS);
1328 struct btrfs_fs_info *fs_info = kzalloc(sizeof(*fs_info),
1329 GFP_NOFS);
1330 struct btrfs_root *chunk_root = kzalloc(sizeof(struct btrfs_root),
1331 GFP_NOFS);
1332 struct btrfs_root *dev_root = kzalloc(sizeof(struct btrfs_root),
1333 GFP_NOFS);
1334 struct btrfs_root *log_tree_root;
1335
1336 int ret;
1337 int err = -EINVAL;
1338
1339 struct btrfs_super_block *disk_super;
1340
1341 if (!extent_root || !tree_root || !fs_info) {
1342 err = -ENOMEM;
1343 goto fail;
1344 }
1345 INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_NOFS);
1346 INIT_LIST_HEAD(&fs_info->trans_list);
1347 INIT_LIST_HEAD(&fs_info->dead_roots);
1348 INIT_LIST_HEAD(&fs_info->hashers);
1349 INIT_LIST_HEAD(&fs_info->delalloc_inodes);
1350 spin_lock_init(&fs_info->hash_lock);
1351 spin_lock_init(&fs_info->delalloc_lock);
1352 spin_lock_init(&fs_info->new_trans_lock);
1353 spin_lock_init(&fs_info->ref_cache_lock);
1354
1355 init_completion(&fs_info->kobj_unregister);
1356 fs_info->tree_root = tree_root;
1357 fs_info->extent_root = extent_root;
1358 fs_info->chunk_root = chunk_root;
1359 fs_info->dev_root = dev_root;
1360 fs_info->fs_devices = fs_devices;
1361 INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
1362 INIT_LIST_HEAD(&fs_info->space_info);
1363 btrfs_mapping_init(&fs_info->mapping_tree);
1364 atomic_set(&fs_info->nr_async_submits, 0);
1365 atomic_set(&fs_info->nr_async_bios, 0);
1366 atomic_set(&fs_info->throttles, 0);
1367 atomic_set(&fs_info->throttle_gen, 0);
1368 fs_info->sb = sb;
1369 fs_info->max_extent = (u64)-1;
1370 fs_info->max_inline = 8192 * 1024;
1371 setup_bdi(fs_info, &fs_info->bdi);
1372 fs_info->btree_inode = new_inode(sb);
1373 fs_info->btree_inode->i_ino = 1;
1374 fs_info->btree_inode->i_nlink = 1;
1375 fs_info->thread_pool_size = min(num_online_cpus() + 2, 8);
1376
1377 INIT_LIST_HEAD(&fs_info->ordered_extents);
1378 spin_lock_init(&fs_info->ordered_extent_lock);
1379
1380 sb->s_blocksize = 4096;
1381 sb->s_blocksize_bits = blksize_bits(4096);
1382
1383 /*
1384 * we set the i_size on the btree inode to the max possible int.
1385 * the real end of the address space is determined by all of
1386 * the devices in the system
1387 */
1388 fs_info->btree_inode->i_size = OFFSET_MAX;
1389 fs_info->btree_inode->i_mapping->a_ops = &btree_aops;
1390 fs_info->btree_inode->i_mapping->backing_dev_info = &fs_info->bdi;
1391
1392 extent_io_tree_init(&BTRFS_I(fs_info->btree_inode)->io_tree,
1393 fs_info->btree_inode->i_mapping,
1394 GFP_NOFS);
1395 extent_map_tree_init(&BTRFS_I(fs_info->btree_inode)->extent_tree,
1396 GFP_NOFS);
1397
1398 BTRFS_I(fs_info->btree_inode)->io_tree.ops = &btree_extent_io_ops;
1399
1400 extent_io_tree_init(&fs_info->free_space_cache,
1401 fs_info->btree_inode->i_mapping, GFP_NOFS);
1402 extent_io_tree_init(&fs_info->block_group_cache,
1403 fs_info->btree_inode->i_mapping, GFP_NOFS);
1404 extent_io_tree_init(&fs_info->pinned_extents,
1405 fs_info->btree_inode->i_mapping, GFP_NOFS);
1406 extent_io_tree_init(&fs_info->pending_del,
1407 fs_info->btree_inode->i_mapping, GFP_NOFS);
1408 extent_io_tree_init(&fs_info->extent_ins,
1409 fs_info->btree_inode->i_mapping, GFP_NOFS);
1410 fs_info->do_barriers = 1;
1411
1412 BTRFS_I(fs_info->btree_inode)->root = tree_root;
1413 memset(&BTRFS_I(fs_info->btree_inode)->location, 0,
1414 sizeof(struct btrfs_key));
1415 insert_inode_hash(fs_info->btree_inode);
1416 mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS);
1417
1418 mutex_init(&fs_info->trans_mutex);
1419 mutex_init(&fs_info->tree_log_mutex);
1420 mutex_init(&fs_info->drop_mutex);
1421 mutex_init(&fs_info->alloc_mutex);
1422 mutex_init(&fs_info->chunk_mutex);
1423 mutex_init(&fs_info->transaction_kthread_mutex);
1424 mutex_init(&fs_info->cleaner_mutex);
1425 mutex_init(&fs_info->volume_mutex);
1426 init_waitqueue_head(&fs_info->transaction_throttle);
1427 init_waitqueue_head(&fs_info->transaction_wait);
1428 init_waitqueue_head(&fs_info->async_submit_wait);
1429 init_waitqueue_head(&fs_info->tree_log_wait);
1430 atomic_set(&fs_info->tree_log_commit, 0);
1431 atomic_set(&fs_info->tree_log_writers, 0);
1432 fs_info->tree_log_transid = 0;
1433
1434#if 0
1435 ret = add_hasher(fs_info, "crc32c");
1436 if (ret) {
1437 printk("btrfs: failed hash setup, modprobe cryptomgr?\n");
1438 err = -ENOMEM;
1439 goto fail_iput;
1440 }
1441#endif
1442 __setup_root(4096, 4096, 4096, 4096, tree_root,
1443 fs_info, BTRFS_ROOT_TREE_OBJECTID);
1444
1445
1446 bh = __bread(fs_devices->latest_bdev,
1447 BTRFS_SUPER_INFO_OFFSET / 4096, 4096);
1448 if (!bh)
1449 goto fail_iput;
1450
1451 memcpy(&fs_info->super_copy, bh->b_data, sizeof(fs_info->super_copy));
1452 brelse(bh);
1453
1454 memcpy(fs_info->fsid, fs_info->super_copy.fsid, BTRFS_FSID_SIZE);
1455
1456 disk_super = &fs_info->super_copy;
1457 if (!btrfs_super_root(disk_super))
1458 goto fail_sb_buffer;
1459
1460 err = btrfs_parse_options(tree_root, options);
1461 if (err)
1462 goto fail_sb_buffer;
1463
1464 /*
1465 * we need to start all the end_io workers up front because the
1466 * queue work function gets called at interrupt time, and so it
1467 * cannot dynamically grow.
1468 */
1469 btrfs_init_workers(&fs_info->workers, "worker",
1470 fs_info->thread_pool_size);
1471 btrfs_init_workers(&fs_info->submit_workers, "submit",
1472 min_t(u64, fs_devices->num_devices,
1473 fs_info->thread_pool_size));
1474
1475 /* a higher idle thresh on the submit workers makes it much more
1476 * likely that bios will be send down in a sane order to the
1477 * devices
1478 */
1479 fs_info->submit_workers.idle_thresh = 64;
1480
1481 /* fs_info->workers is responsible for checksumming file data
1482 * blocks and metadata. Using a larger idle thresh allows each
1483 * worker thread to operate on things in roughly the order they
1484 * were sent by the writeback daemons, improving overall locality
1485 * of the IO going down the pipe.
1486 */
1487 fs_info->workers.idle_thresh = 128;
1488
1489 btrfs_init_workers(&fs_info->fixup_workers, "fixup", 1);
1490 btrfs_init_workers(&fs_info->endio_workers, "endio",
1491 fs_info->thread_pool_size);
1492 btrfs_init_workers(&fs_info->endio_write_workers, "endio-write",
1493 fs_info->thread_pool_size);
1494
1495 /*
1496 * endios are largely parallel and should have a very
1497 * low idle thresh
1498 */
1499 fs_info->endio_workers.idle_thresh = 4;
1500 fs_info->endio_write_workers.idle_thresh = 4;
1501
1502 btrfs_start_workers(&fs_info->workers, 1);
1503 btrfs_start_workers(&fs_info->submit_workers, 1);
1504 btrfs_start_workers(&fs_info->fixup_workers, 1);
1505 btrfs_start_workers(&fs_info->endio_workers, fs_info->thread_pool_size);
1506 btrfs_start_workers(&fs_info->endio_write_workers,
1507 fs_info->thread_pool_size);
1508
1509 err = -EINVAL;
1510 if (btrfs_super_num_devices(disk_super) > fs_devices->open_devices) {
1511 printk("Btrfs: wanted %llu devices, but found %llu\n",
1512 (unsigned long long)btrfs_super_num_devices(disk_super),
1513 (unsigned long long)fs_devices->open_devices);
1514 if (btrfs_test_opt(tree_root, DEGRADED))
1515 printk("continuing in degraded mode\n");
1516 else {
1517 goto fail_sb_buffer;
1518 }
1519 }
1520
1521 fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super);
1522
1523 nodesize = btrfs_super_nodesize(disk_super);
1524 leafsize = btrfs_super_leafsize(disk_super);
1525 sectorsize = btrfs_super_sectorsize(disk_super);
1526 stripesize = btrfs_super_stripesize(disk_super);
1527 tree_root->nodesize = nodesize;
1528 tree_root->leafsize = leafsize;
1529 tree_root->sectorsize = sectorsize;
1530 tree_root->stripesize = stripesize;
1531
1532 sb->s_blocksize = sectorsize;
1533 sb->s_blocksize_bits = blksize_bits(sectorsize);
1534
1535 if (strncmp((char *)(&disk_super->magic), BTRFS_MAGIC,
1536 sizeof(disk_super->magic))) {
1537 printk("btrfs: valid FS not found on %s\n", sb->s_id);
1538 goto fail_sb_buffer;
1539 }
1540
1541 mutex_lock(&fs_info->chunk_mutex);
1542 ret = btrfs_read_sys_array(tree_root);
1543 mutex_unlock(&fs_info->chunk_mutex);
1544 if (ret) {
1545 printk("btrfs: failed to read the system array on %s\n",
1546 sb->s_id);
1547 goto fail_sys_array;
1548 }
1549
1550 blocksize = btrfs_level_size(tree_root,
1551 btrfs_super_chunk_root_level(disk_super));
1552
1553 __setup_root(nodesize, leafsize, sectorsize, stripesize,
1554 chunk_root, fs_info, BTRFS_CHUNK_TREE_OBJECTID);
1555
1556 chunk_root->node = read_tree_block(chunk_root,
1557 btrfs_super_chunk_root(disk_super),
1558 blocksize, 0);
1559 BUG_ON(!chunk_root->node);
1560
1561 read_extent_buffer(chunk_root->node, fs_info->chunk_tree_uuid,
1562 (unsigned long)btrfs_header_chunk_tree_uuid(chunk_root->node),
1563 BTRFS_UUID_SIZE);
1564
1565 mutex_lock(&fs_info->chunk_mutex);
1566 ret = btrfs_read_chunk_tree(chunk_root);
1567 mutex_unlock(&fs_info->chunk_mutex);
1568 BUG_ON(ret);
1569
1570 btrfs_close_extra_devices(fs_devices);
1571
1572 blocksize = btrfs_level_size(tree_root,
1573 btrfs_super_root_level(disk_super));
1574
1575
1576 tree_root->node = read_tree_block(tree_root,
1577 btrfs_super_root(disk_super),
1578 blocksize, 0);
1579 if (!tree_root->node)
1580 goto fail_sb_buffer;
1581
1582
1583 ret = find_and_setup_root(tree_root, fs_info,
1584 BTRFS_EXTENT_TREE_OBJECTID, extent_root);
1585 if (ret)
1586 goto fail_tree_root;
1587 extent_root->track_dirty = 1;
1588
1589 ret = find_and_setup_root(tree_root, fs_info,
1590 BTRFS_DEV_TREE_OBJECTID, dev_root);
1591 dev_root->track_dirty = 1;
1592
1593 if (ret)
1594 goto fail_extent_root;
1595
1596 btrfs_read_block_groups(extent_root);
1597
1598 fs_info->generation = btrfs_super_generation(disk_super) + 1;
1599 fs_info->data_alloc_profile = (u64)-1;
1600 fs_info->metadata_alloc_profile = (u64)-1;
1601 fs_info->system_alloc_profile = fs_info->metadata_alloc_profile;
1602 fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root,
1603 "btrfs-cleaner");
1604 if (!fs_info->cleaner_kthread)
1605 goto fail_extent_root;
1606
1607 fs_info->transaction_kthread = kthread_run(transaction_kthread,
1608 tree_root,
1609 "btrfs-transaction");
1610 if (!fs_info->transaction_kthread)
1611 goto fail_cleaner;
1612
1613 if (btrfs_super_log_root(disk_super) != 0) {
1614 u32 blocksize;
1615 u64 bytenr = btrfs_super_log_root(disk_super);
1616
1617 blocksize =
1618 btrfs_level_size(tree_root,
1619 btrfs_super_log_root_level(disk_super));
1620
1621 log_tree_root = kzalloc(sizeof(struct btrfs_root),
1622 GFP_NOFS);
1623
1624 __setup_root(nodesize, leafsize, sectorsize, stripesize,
1625 log_tree_root, fs_info, BTRFS_TREE_LOG_OBJECTID);
1626
1627 log_tree_root->node = read_tree_block(tree_root, bytenr,
1628 blocksize, 0);
1629 ret = btrfs_recover_log_trees(log_tree_root);
1630 BUG_ON(ret);
1631 }
1632 fs_info->last_trans_committed = btrfs_super_generation(disk_super);
1633 return tree_root;
1634
1635fail_cleaner:
1636 kthread_stop(fs_info->cleaner_kthread);
1637fail_extent_root:
1638 free_extent_buffer(extent_root->node);
1639fail_tree_root:
1640 free_extent_buffer(tree_root->node);
1641fail_sys_array:
1642fail_sb_buffer:
1643 btrfs_stop_workers(&fs_info->fixup_workers);
1644 btrfs_stop_workers(&fs_info->workers);
1645 btrfs_stop_workers(&fs_info->endio_workers);
1646 btrfs_stop_workers(&fs_info->endio_write_workers);
1647 btrfs_stop_workers(&fs_info->submit_workers);
1648fail_iput:
1649 iput(fs_info->btree_inode);
1650fail:
1651 btrfs_close_devices(fs_info->fs_devices);
1652 btrfs_mapping_tree_free(&fs_info->mapping_tree);
1653
1654 kfree(extent_root);
1655 kfree(tree_root);
1656#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1657 bdi_destroy(&fs_info->bdi);
1658#endif
1659 kfree(fs_info);
1660 return ERR_PTR(err);
1661}
1662
1663static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate)
1664{
1665 char b[BDEVNAME_SIZE];
1666
1667 if (uptodate) {
1668 set_buffer_uptodate(bh);
1669 } else {
1670 if (!buffer_eopnotsupp(bh) && printk_ratelimit()) {
1671 printk(KERN_WARNING "lost page write due to "
1672 "I/O error on %s\n",
1673 bdevname(bh->b_bdev, b));
1674 }
1675 /* note, we dont' set_buffer_write_io_error because we have
1676 * our own ways of dealing with the IO errors
1677 */
1678 clear_buffer_uptodate(bh);
1679 }
1680 unlock_buffer(bh);
1681 put_bh(bh);
1682}
1683
1684int write_all_supers(struct btrfs_root *root)
1685{
1686 struct list_head *cur;
1687 struct list_head *head = &root->fs_info->fs_devices->devices;
1688 struct btrfs_device *dev;
1689 struct btrfs_super_block *sb;
1690 struct btrfs_dev_item *dev_item;
1691 struct buffer_head *bh;
1692 int ret;
1693 int do_barriers;
1694 int max_errors;
1695 int total_errors = 0;
1696 u32 crc;
1697 u64 flags;
1698
1699 max_errors = btrfs_super_num_devices(&root->fs_info->super_copy) - 1;
1700 do_barriers = !btrfs_test_opt(root, NOBARRIER);
1701
1702 sb = &root->fs_info->super_for_commit;
1703 dev_item = &sb->dev_item;
1704 list_for_each(cur, head) {
1705 dev = list_entry(cur, struct btrfs_device, dev_list);
1706 if (!dev->bdev) {
1707 total_errors++;
1708 continue;
1709 }
1710 if (!dev->in_fs_metadata)
1711 continue;
1712
1713 btrfs_set_stack_device_type(dev_item, dev->type);
1714 btrfs_set_stack_device_id(dev_item, dev->devid);
1715 btrfs_set_stack_device_total_bytes(dev_item, dev->total_bytes);
1716 btrfs_set_stack_device_bytes_used(dev_item, dev->bytes_used);
1717 btrfs_set_stack_device_io_align(dev_item, dev->io_align);
1718 btrfs_set_stack_device_io_width(dev_item, dev->io_width);
1719 btrfs_set_stack_device_sector_size(dev_item, dev->sector_size);
1720 memcpy(dev_item->uuid, dev->uuid, BTRFS_UUID_SIZE);
1721 flags = btrfs_super_flags(sb);
1722 btrfs_set_super_flags(sb, flags | BTRFS_HEADER_FLAG_WRITTEN);
1723
1724
1725 crc = ~(u32)0;
1726 crc = btrfs_csum_data(root, (char *)sb + BTRFS_CSUM_SIZE, crc,
1727 BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE);
1728 btrfs_csum_final(crc, sb->csum);
1729
1730 bh = __getblk(dev->bdev, BTRFS_SUPER_INFO_OFFSET / 4096,
1731 BTRFS_SUPER_INFO_SIZE);
1732
1733 memcpy(bh->b_data, sb, BTRFS_SUPER_INFO_SIZE);
1734 dev->pending_io = bh;
1735
1736 get_bh(bh);
1737 set_buffer_uptodate(bh);
1738 lock_buffer(bh);
1739 bh->b_end_io = btrfs_end_buffer_write_sync;
1740
1741 if (do_barriers && dev->barriers) {
1742 ret = submit_bh(WRITE_BARRIER, bh);
1743 if (ret == -EOPNOTSUPP) {
1744 printk("btrfs: disabling barriers on dev %s\n",
1745 dev->name);
1746 set_buffer_uptodate(bh);
1747 dev->barriers = 0;
1748 get_bh(bh);
1749 lock_buffer(bh);
1750 ret = submit_bh(WRITE, bh);
1751 }
1752 } else {
1753 ret = submit_bh(WRITE, bh);
1754 }
1755 if (ret)
1756 total_errors++;
1757 }
1758 if (total_errors > max_errors) {
1759 printk("btrfs: %d errors while writing supers\n", total_errors);
1760 BUG();
1761 }
1762 total_errors = 0;
1763
1764 list_for_each(cur, head) {
1765 dev = list_entry(cur, struct btrfs_device, dev_list);
1766 if (!dev->bdev)
1767 continue;
1768 if (!dev->in_fs_metadata)
1769 continue;
1770
1771 BUG_ON(!dev->pending_io);
1772 bh = dev->pending_io;
1773 wait_on_buffer(bh);
1774 if (!buffer_uptodate(dev->pending_io)) {
1775 if (do_barriers && dev->barriers) {
1776 printk("btrfs: disabling barriers on dev %s\n",
1777 dev->name);
1778 set_buffer_uptodate(bh);
1779 get_bh(bh);
1780 lock_buffer(bh);
1781 dev->barriers = 0;
1782 ret = submit_bh(WRITE, bh);
1783 BUG_ON(ret);
1784 wait_on_buffer(bh);
1785 if (!buffer_uptodate(bh))
1786 total_errors++;
1787 } else {
1788 total_errors++;
1789 }
1790
1791 }
1792 dev->pending_io = NULL;
1793 brelse(bh);
1794 }
1795 if (total_errors > max_errors) {
1796 printk("btrfs: %d errors while writing supers\n", total_errors);
1797 BUG();
1798 }
1799 return 0;
1800}
1801
1802int write_ctree_super(struct btrfs_trans_handle *trans, struct btrfs_root
1803 *root)
1804{
1805 int ret;
1806
1807 ret = write_all_supers(root);
1808 return ret;
1809}
1810
1811int btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
1812{
1813 radix_tree_delete(&fs_info->fs_roots_radix,
1814 (unsigned long)root->root_key.objectid);
1815 if (root->in_sysfs)
1816 btrfs_sysfs_del_root(root);
1817 if (root->inode)
1818 iput(root->inode);
1819 if (root->node)
1820 free_extent_buffer(root->node);
1821 if (root->commit_root)
1822 free_extent_buffer(root->commit_root);
1823 if (root->name)
1824 kfree(root->name);
1825 kfree(root);
1826 return 0;
1827}
1828
1829static int del_fs_roots(struct btrfs_fs_info *fs_info)
1830{
1831 int ret;
1832 struct btrfs_root *gang[8];
1833 int i;
1834
1835 while(1) {
1836 ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
1837 (void **)gang, 0,
1838 ARRAY_SIZE(gang));
1839 if (!ret)
1840 break;
1841 for (i = 0; i < ret; i++)
1842 btrfs_free_fs_root(fs_info, gang[i]);
1843 }
1844 return 0;
1845}
1846
1847int close_ctree(struct btrfs_root *root)
1848{
1849 int ret;
1850 struct btrfs_trans_handle *trans;
1851 struct btrfs_fs_info *fs_info = root->fs_info;
1852
1853 fs_info->closing = 1;
1854 smp_mb();
1855
1856 kthread_stop(root->fs_info->transaction_kthread);
1857 kthread_stop(root->fs_info->cleaner_kthread);
1858
1859 btrfs_clean_old_snapshots(root);
1860 trans = btrfs_start_transaction(root, 1);
1861 ret = btrfs_commit_transaction(trans, root);
1862 /* run commit again to drop the original snapshot */
1863 trans = btrfs_start_transaction(root, 1);
1864 btrfs_commit_transaction(trans, root);
1865 ret = btrfs_write_and_wait_transaction(NULL, root);
1866 BUG_ON(ret);
1867
1868 write_ctree_super(NULL, root);
1869
1870 if (fs_info->delalloc_bytes) {
1871 printk("btrfs: at unmount delalloc count %Lu\n",
1872 fs_info->delalloc_bytes);
1873 }
1874 if (fs_info->total_ref_cache_size) {
1875 printk("btrfs: at umount reference cache size %Lu\n",
1876 fs_info->total_ref_cache_size);
1877 }
1878
1879 if (fs_info->extent_root->node)
1880 free_extent_buffer(fs_info->extent_root->node);
1881
1882 if (fs_info->tree_root->node)
1883 free_extent_buffer(fs_info->tree_root->node);
1884
1885 if (root->fs_info->chunk_root->node);
1886 free_extent_buffer(root->fs_info->chunk_root->node);
1887
1888 if (root->fs_info->dev_root->node);
1889 free_extent_buffer(root->fs_info->dev_root->node);
1890
1891 btrfs_free_block_groups(root->fs_info);
1892 fs_info->closing = 2;
1893 del_fs_roots(fs_info);
1894
1895 filemap_write_and_wait(fs_info->btree_inode->i_mapping);
1896
1897 truncate_inode_pages(fs_info->btree_inode->i_mapping, 0);
1898
1899 btrfs_stop_workers(&fs_info->fixup_workers);
1900 btrfs_stop_workers(&fs_info->workers);
1901 btrfs_stop_workers(&fs_info->endio_workers);
1902 btrfs_stop_workers(&fs_info->endio_write_workers);
1903 btrfs_stop_workers(&fs_info->submit_workers);
1904
1905 iput(fs_info->btree_inode);
1906#if 0
1907 while(!list_empty(&fs_info->hashers)) {
1908 struct btrfs_hasher *hasher;
1909 hasher = list_entry(fs_info->hashers.next, struct btrfs_hasher,
1910 hashers);
1911 list_del(&hasher->hashers);
1912 crypto_free_hash(&fs_info->hash_tfm);
1913 kfree(hasher);
1914 }
1915#endif
1916 btrfs_close_devices(fs_info->fs_devices);
1917 btrfs_mapping_tree_free(&fs_info->mapping_tree);
1918
1919#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1920 bdi_destroy(&fs_info->bdi);
1921#endif
1922
1923 kfree(fs_info->extent_root);
1924 kfree(fs_info->tree_root);
1925 kfree(fs_info->chunk_root);
1926 kfree(fs_info->dev_root);
1927 return 0;
1928}
1929
1930int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid)
1931{
1932 int ret;
1933 struct inode *btree_inode = buf->first_page->mapping->host;
1934
1935 ret = extent_buffer_uptodate(&BTRFS_I(btree_inode)->io_tree, buf);
1936 if (!ret)
1937 return ret;
1938
1939 ret = verify_parent_transid(&BTRFS_I(btree_inode)->io_tree, buf,
1940 parent_transid);
1941 return !ret;
1942}
1943
1944int btrfs_set_buffer_uptodate(struct extent_buffer *buf)
1945{
1946 struct inode *btree_inode = buf->first_page->mapping->host;
1947 return set_extent_buffer_uptodate(&BTRFS_I(btree_inode)->io_tree,
1948 buf);
1949}
1950
1951void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
1952{
1953 struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;
1954 u64 transid = btrfs_header_generation(buf);
1955 struct inode *btree_inode = root->fs_info->btree_inode;
1956
1957 WARN_ON(!btrfs_tree_locked(buf));
1958 if (transid != root->fs_info->generation) {
1959 printk(KERN_CRIT "transid mismatch buffer %llu, found %Lu running %Lu\n",
1960 (unsigned long long)buf->start,
1961 transid, root->fs_info->generation);
1962 WARN_ON(1);
1963 }
1964 set_extent_buffer_dirty(&BTRFS_I(btree_inode)->io_tree, buf);
1965}
1966
1967void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr)
1968{
1969 /*
1970 * looks as though older kernels can get into trouble with
1971 * this code, they end up stuck in balance_dirty_pages forever
1972 */
1973 struct extent_io_tree *tree;
1974 u64 num_dirty;
1975 u64 start = 0;
1976 unsigned long thresh = 96 * 1024 * 1024;
1977 tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree;
1978
1979 if (current_is_pdflush() || current->flags & PF_MEMALLOC)
1980 return;
1981
1982 num_dirty = count_range_bits(tree, &start, (u64)-1,
1983 thresh, EXTENT_DIRTY);
1984 if (num_dirty > thresh) {
1985 balance_dirty_pages_ratelimited_nr(
1986 root->fs_info->btree_inode->i_mapping, 1);
1987 }
1988 return;
1989}
1990
1991int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid)
1992{
1993 struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;
1994 int ret;
1995 ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
1996 if (ret == 0) {
1997 buf->flags |= EXTENT_UPTODATE;
1998 }
1999 return ret;
2000}
2001
2002int btree_lock_page_hook(struct page *page)
2003{
2004 struct inode *inode = page->mapping->host;
2005 struct btrfs_root *root = BTRFS_I(inode)->root;
2006 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
2007 struct extent_buffer *eb;
2008 unsigned long len;
2009 u64 bytenr = page_offset(page);
2010
2011 if (page->private == EXTENT_PAGE_PRIVATE)
2012 goto out;
2013
2014 len = page->private >> 2;
2015 eb = find_extent_buffer(io_tree, bytenr, len, GFP_NOFS);
2016 if (!eb)
2017 goto out;
2018
2019 btrfs_tree_lock(eb);
2020 spin_lock(&root->fs_info->hash_lock);
2021 btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
2022 spin_unlock(&root->fs_info->hash_lock);
2023 btrfs_tree_unlock(eb);
2024 free_extent_buffer(eb);
2025out:
2026 lock_page(page);
2027 return 0;
2028}
2029
2030static struct extent_io_ops btree_extent_io_ops = {
2031 .write_cache_pages_lock_hook = btree_lock_page_hook,
2032 .writepage_io_hook = btree_writepage_io_hook,
2033 .readpage_end_io_hook = btree_readpage_end_io_hook,
2034 .submit_bio_hook = btree_submit_bio_hook,
2035 /* note we're sharing with inode.c for the merge bio hook */
2036 .merge_bio_hook = btrfs_merge_bio_hook,
2037};