Btrfs: do not async metadata csumming in certain situations
[linux-2.6-block.git] / fs / btrfs / disk-io.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <linux/fs.h>
20 #include <linux/blkdev.h>
21 #include <linux/scatterlist.h>
22 #include <linux/swap.h>
23 #include <linux/radix-tree.h>
24 #include <linux/writeback.h>
25 #include <linux/buffer_head.h>
26 #include <linux/workqueue.h>
27 #include <linux/kthread.h>
28 #include <linux/freezer.h>
29 #include <linux/crc32c.h>
30 #include <linux/slab.h>
31 #include <linux/migrate.h>
32 #include <linux/ratelimit.h>
33 #include <asm/unaligned.h>
34 #include "compat.h"
35 #include "ctree.h"
36 #include "disk-io.h"
37 #include "transaction.h"
38 #include "btrfs_inode.h"
39 #include "volumes.h"
40 #include "print-tree.h"
41 #include "async-thread.h"
42 #include "locking.h"
43 #include "tree-log.h"
44 #include "free-space-cache.h"
45 #include "inode-map.h"
46 #include "check-integrity.h"
47 #include "rcu-string.h"
48
49 #ifdef CONFIG_X86
50 #include <asm/cpufeature.h>
51 #endif
52
53 static struct extent_io_ops btree_extent_io_ops;
54 static void end_workqueue_fn(struct btrfs_work *work);
55 static void free_fs_root(struct btrfs_root *root);
56 static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
57                                     int read_only);
58 static void btrfs_destroy_ordered_operations(struct btrfs_root *root);
59 static void btrfs_destroy_ordered_extents(struct btrfs_root *root);
60 static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
61                                       struct btrfs_root *root);
62 static void btrfs_destroy_pending_snapshots(struct btrfs_transaction *t);
63 static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root);
64 static int btrfs_destroy_marked_extents(struct btrfs_root *root,
65                                         struct extent_io_tree *dirty_pages,
66                                         int mark);
67 static int btrfs_destroy_pinned_extent(struct btrfs_root *root,
68                                        struct extent_io_tree *pinned_extents);
69
70 /*
71  * end_io_wq structs are used to do processing in task context when an IO is
72  * complete.  This is used during reads to verify checksums, and it is used
73  * by writes to insert metadata for new file extents after IO is complete.
74  */
75 struct end_io_wq {
76         struct bio *bio;
77         bio_end_io_t *end_io;
78         void *private;
79         struct btrfs_fs_info *info;
80         int error;
81         int metadata;
82         struct list_head list;
83         struct btrfs_work work;
84 };
85
86 /*
87  * async submit bios are used to offload expensive checksumming
88  * onto the worker threads.  They checksum file and metadata bios
89  * just before they are sent down the IO stack.
90  */
91 struct async_submit_bio {
92         struct inode *inode;
93         struct bio *bio;
94         struct list_head list;
95         extent_submit_bio_hook_t *submit_bio_start;
96         extent_submit_bio_hook_t *submit_bio_done;
97         int rw;
98         int mirror_num;
99         unsigned long bio_flags;
100         /*
101          * bio_offset is optional, can be used if the pages in the bio
102          * can't tell us where in the file the bio should go
103          */
104         u64 bio_offset;
105         struct btrfs_work work;
106         int error;
107 };
108
109 /*
110  * Lockdep class keys for extent_buffer->lock's in this root.  For a given
111  * eb, the lockdep key is determined by the btrfs_root it belongs to and
112  * the level the eb occupies in the tree.
113  *
114  * Different roots are used for different purposes and may nest inside each
115  * other and they require separate keysets.  As lockdep keys should be
116  * static, assign keysets according to the purpose of the root as indicated
117  * by btrfs_root->objectid.  This ensures that all special purpose roots
118  * have separate keysets.
119  *
120  * Lock-nesting across peer nodes is always done with the immediate parent
121  * node locked thus preventing deadlock.  As lockdep doesn't know this, use
122  * subclass to avoid triggering lockdep warning in such cases.
123  *
124  * The key is set by the readpage_end_io_hook after the buffer has passed
125  * csum validation but before the pages are unlocked.  It is also set by
126  * btrfs_init_new_buffer on freshly allocated blocks.
127  *
128  * We also add a check to make sure the highest level of the tree is the
129  * same as our lockdep setup here.  If BTRFS_MAX_LEVEL changes, this code
130  * needs update as well.
131  */
132 #ifdef CONFIG_DEBUG_LOCK_ALLOC
133 # if BTRFS_MAX_LEVEL != 8
134 #  error
135 # endif
136
137 static struct btrfs_lockdep_keyset {
138         u64                     id;             /* root objectid */
139         const char              *name_stem;     /* lock name stem */
140         char                    names[BTRFS_MAX_LEVEL + 1][20];
141         struct lock_class_key   keys[BTRFS_MAX_LEVEL + 1];
142 } btrfs_lockdep_keysets[] = {
143         { .id = BTRFS_ROOT_TREE_OBJECTID,       .name_stem = "root"     },
144         { .id = BTRFS_EXTENT_TREE_OBJECTID,     .name_stem = "extent"   },
145         { .id = BTRFS_CHUNK_TREE_OBJECTID,      .name_stem = "chunk"    },
146         { .id = BTRFS_DEV_TREE_OBJECTID,        .name_stem = "dev"      },
147         { .id = BTRFS_FS_TREE_OBJECTID,         .name_stem = "fs"       },
148         { .id = BTRFS_CSUM_TREE_OBJECTID,       .name_stem = "csum"     },
149         { .id = BTRFS_ORPHAN_OBJECTID,          .name_stem = "orphan"   },
150         { .id = BTRFS_TREE_LOG_OBJECTID,        .name_stem = "log"      },
151         { .id = BTRFS_TREE_RELOC_OBJECTID,      .name_stem = "treloc"   },
152         { .id = BTRFS_DATA_RELOC_TREE_OBJECTID, .name_stem = "dreloc"   },
153         { .id = 0,                              .name_stem = "tree"     },
154 };
155
156 void __init btrfs_init_lockdep(void)
157 {
158         int i, j;
159
160         /* initialize lockdep class names */
161         for (i = 0; i < ARRAY_SIZE(btrfs_lockdep_keysets); i++) {
162                 struct btrfs_lockdep_keyset *ks = &btrfs_lockdep_keysets[i];
163
164                 for (j = 0; j < ARRAY_SIZE(ks->names); j++)
165                         snprintf(ks->names[j], sizeof(ks->names[j]),
166                                  "btrfs-%s-%02d", ks->name_stem, j);
167         }
168 }
169
170 void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb,
171                                     int level)
172 {
173         struct btrfs_lockdep_keyset *ks;
174
175         BUG_ON(level >= ARRAY_SIZE(ks->keys));
176
177         /* find the matching keyset, id 0 is the default entry */
178         for (ks = btrfs_lockdep_keysets; ks->id; ks++)
179                 if (ks->id == objectid)
180                         break;
181
182         lockdep_set_class_and_name(&eb->lock,
183                                    &ks->keys[level], ks->names[level]);
184 }
185
186 #endif
187
188 /*
189  * extents on the btree inode are pretty simple, there's one extent
190  * that covers the entire device
191  */
192 static struct extent_map *btree_get_extent(struct inode *inode,
193                 struct page *page, size_t pg_offset, u64 start, u64 len,
194                 int create)
195 {
196         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
197         struct extent_map *em;
198         int ret;
199
200         read_lock(&em_tree->lock);
201         em = lookup_extent_mapping(em_tree, start, len);
202         if (em) {
203                 em->bdev =
204                         BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
205                 read_unlock(&em_tree->lock);
206                 goto out;
207         }
208         read_unlock(&em_tree->lock);
209
210         em = alloc_extent_map();
211         if (!em) {
212                 em = ERR_PTR(-ENOMEM);
213                 goto out;
214         }
215         em->start = 0;
216         em->len = (u64)-1;
217         em->block_len = (u64)-1;
218         em->block_start = 0;
219         em->bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
220
221         write_lock(&em_tree->lock);
222         ret = add_extent_mapping(em_tree, em);
223         if (ret == -EEXIST) {
224                 free_extent_map(em);
225                 em = lookup_extent_mapping(em_tree, start, len);
226                 if (!em)
227                         em = ERR_PTR(-EIO);
228         } else if (ret) {
229                 free_extent_map(em);
230                 em = ERR_PTR(ret);
231         }
232         write_unlock(&em_tree->lock);
233
234 out:
235         return em;
236 }
237
238 u32 btrfs_csum_data(struct btrfs_root *root, char *data, u32 seed, size_t len)
239 {
240         return crc32c(seed, data, len);
241 }
242
243 void btrfs_csum_final(u32 crc, char *result)
244 {
245         put_unaligned_le32(~crc, result);
246 }
247
248 /*
249  * compute the csum for a btree block, and either verify it or write it
250  * into the csum field of the block.
251  */
252 static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf,
253                            int verify)
254 {
255         u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
256         char *result = NULL;
257         unsigned long len;
258         unsigned long cur_len;
259         unsigned long offset = BTRFS_CSUM_SIZE;
260         char *kaddr;
261         unsigned long map_start;
262         unsigned long map_len;
263         int err;
264         u32 crc = ~(u32)0;
265         unsigned long inline_result;
266
267         len = buf->len - offset;
268         while (len > 0) {
269                 err = map_private_extent_buffer(buf, offset, 32,
270                                         &kaddr, &map_start, &map_len);
271                 if (err)
272                         return 1;
273                 cur_len = min(len, map_len - (offset - map_start));
274                 crc = btrfs_csum_data(root, kaddr + offset - map_start,
275                                       crc, cur_len);
276                 len -= cur_len;
277                 offset += cur_len;
278         }
279         if (csum_size > sizeof(inline_result)) {
280                 result = kzalloc(csum_size * sizeof(char), GFP_NOFS);
281                 if (!result)
282                         return 1;
283         } else {
284                 result = (char *)&inline_result;
285         }
286
287         btrfs_csum_final(crc, result);
288
289         if (verify) {
290                 if (memcmp_extent_buffer(buf, result, 0, csum_size)) {
291                         u32 val;
292                         u32 found = 0;
293                         memcpy(&found, result, csum_size);
294
295                         read_extent_buffer(buf, &val, 0, csum_size);
296                         printk_ratelimited(KERN_INFO "btrfs: %s checksum verify "
297                                        "failed on %llu wanted %X found %X "
298                                        "level %d\n",
299                                        root->fs_info->sb->s_id,
300                                        (unsigned long long)buf->start, val, found,
301                                        btrfs_header_level(buf));
302                         if (result != (char *)&inline_result)
303                                 kfree(result);
304                         return 1;
305                 }
306         } else {
307                 write_extent_buffer(buf, result, 0, csum_size);
308         }
309         if (result != (char *)&inline_result)
310                 kfree(result);
311         return 0;
312 }
313
314 /*
315  * we can't consider a given block up to date unless the transid of the
316  * block matches the transid in the parent node's pointer.  This is how we
317  * detect blocks that either didn't get written at all or got written
318  * in the wrong place.
319  */
320 static int verify_parent_transid(struct extent_io_tree *io_tree,
321                                  struct extent_buffer *eb, u64 parent_transid,
322                                  int atomic)
323 {
324         struct extent_state *cached_state = NULL;
325         int ret;
326
327         if (!parent_transid || btrfs_header_generation(eb) == parent_transid)
328                 return 0;
329
330         if (atomic)
331                 return -EAGAIN;
332
333         lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1,
334                          0, &cached_state);
335         if (extent_buffer_uptodate(eb) &&
336             btrfs_header_generation(eb) == parent_transid) {
337                 ret = 0;
338                 goto out;
339         }
340         printk_ratelimited("parent transid verify failed on %llu wanted %llu "
341                        "found %llu\n",
342                        (unsigned long long)eb->start,
343                        (unsigned long long)parent_transid,
344                        (unsigned long long)btrfs_header_generation(eb));
345         ret = 1;
346         clear_extent_buffer_uptodate(eb);
347 out:
348         unlock_extent_cached(io_tree, eb->start, eb->start + eb->len - 1,
349                              &cached_state, GFP_NOFS);
350         return ret;
351 }
352
353 /*
354  * helper to read a given tree block, doing retries as required when
355  * the checksums don't match and we have alternate mirrors to try.
356  */
357 static int btree_read_extent_buffer_pages(struct btrfs_root *root,
358                                           struct extent_buffer *eb,
359                                           u64 start, u64 parent_transid)
360 {
361         struct extent_io_tree *io_tree;
362         int failed = 0;
363         int ret;
364         int num_copies = 0;
365         int mirror_num = 0;
366         int failed_mirror = 0;
367
368         clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
369         io_tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree;
370         while (1) {
371                 ret = read_extent_buffer_pages(io_tree, eb, start,
372                                                WAIT_COMPLETE,
373                                                btree_get_extent, mirror_num);
374                 if (!ret) {
375                         if (!verify_parent_transid(io_tree, eb,
376                                                    parent_transid, 0))
377                                 break;
378                         else
379                                 ret = -EIO;
380                 }
381
382                 /*
383                  * This buffer's crc is fine, but its contents are corrupted, so
384                  * there is no reason to read the other copies, they won't be
385                  * any less wrong.
386                  */
387                 if (test_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags))
388                         break;
389
390                 num_copies = btrfs_num_copies(&root->fs_info->mapping_tree,
391                                               eb->start, eb->len);
392                 if (num_copies == 1)
393                         break;
394
395                 if (!failed_mirror) {
396                         failed = 1;
397                         failed_mirror = eb->read_mirror;
398                 }
399
400                 mirror_num++;
401                 if (mirror_num == failed_mirror)
402                         mirror_num++;
403
404                 if (mirror_num > num_copies)
405                         break;
406         }
407
408         if (failed && !ret && failed_mirror)
409                 repair_eb_io_failure(root, eb, failed_mirror);
410
411         return ret;
412 }
413
414 /*
415  * checksum a dirty tree block before IO.  This has extra checks to make sure
416  * we only fill in the checksum field in the first page of a multi-page block
417  */
418
419 static int csum_dirty_buffer(struct btrfs_root *root, struct page *page)
420 {
421         struct extent_io_tree *tree;
422         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
423         u64 found_start;
424         struct extent_buffer *eb;
425
426         tree = &BTRFS_I(page->mapping->host)->io_tree;
427
428         eb = (struct extent_buffer *)page->private;
429         if (page != eb->pages[0])
430                 return 0;
431         found_start = btrfs_header_bytenr(eb);
432         if (found_start != start) {
433                 WARN_ON(1);
434                 return 0;
435         }
436         if (eb->pages[0] != page) {
437                 WARN_ON(1);
438                 return 0;
439         }
440         if (!PageUptodate(page)) {
441                 WARN_ON(1);
442                 return 0;
443         }
444         csum_tree_block(root, eb, 0);
445         return 0;
446 }
447
448 static int check_tree_block_fsid(struct btrfs_root *root,
449                                  struct extent_buffer *eb)
450 {
451         struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
452         u8 fsid[BTRFS_UUID_SIZE];
453         int ret = 1;
454
455         read_extent_buffer(eb, fsid, (unsigned long)btrfs_header_fsid(eb),
456                            BTRFS_FSID_SIZE);
457         while (fs_devices) {
458                 if (!memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE)) {
459                         ret = 0;
460                         break;
461                 }
462                 fs_devices = fs_devices->seed;
463         }
464         return ret;
465 }
466
467 #define CORRUPT(reason, eb, root, slot)                         \
468         printk(KERN_CRIT "btrfs: corrupt leaf, %s: block=%llu," \
469                "root=%llu, slot=%d\n", reason,                  \
470                (unsigned long long)btrfs_header_bytenr(eb),     \
471                (unsigned long long)root->objectid, slot)
472
473 static noinline int check_leaf(struct btrfs_root *root,
474                                struct extent_buffer *leaf)
475 {
476         struct btrfs_key key;
477         struct btrfs_key leaf_key;
478         u32 nritems = btrfs_header_nritems(leaf);
479         int slot;
480
481         if (nritems == 0)
482                 return 0;
483
484         /* Check the 0 item */
485         if (btrfs_item_offset_nr(leaf, 0) + btrfs_item_size_nr(leaf, 0) !=
486             BTRFS_LEAF_DATA_SIZE(root)) {
487                 CORRUPT("invalid item offset size pair", leaf, root, 0);
488                 return -EIO;
489         }
490
491         /*
492          * Check to make sure each items keys are in the correct order and their
493          * offsets make sense.  We only have to loop through nritems-1 because
494          * we check the current slot against the next slot, which verifies the
495          * next slot's offset+size makes sense and that the current's slot
496          * offset is correct.
497          */
498         for (slot = 0; slot < nritems - 1; slot++) {
499                 btrfs_item_key_to_cpu(leaf, &leaf_key, slot);
500                 btrfs_item_key_to_cpu(leaf, &key, slot + 1);
501
502                 /* Make sure the keys are in the right order */
503                 if (btrfs_comp_cpu_keys(&leaf_key, &key) >= 0) {
504                         CORRUPT("bad key order", leaf, root, slot);
505                         return -EIO;
506                 }
507
508                 /*
509                  * Make sure the offset and ends are right, remember that the
510                  * item data starts at the end of the leaf and grows towards the
511                  * front.
512                  */
513                 if (btrfs_item_offset_nr(leaf, slot) !=
514                         btrfs_item_end_nr(leaf, slot + 1)) {
515                         CORRUPT("slot offset bad", leaf, root, slot);
516                         return -EIO;
517                 }
518
519                 /*
520                  * Check to make sure that we don't point outside of the leaf,
521                  * just incase all the items are consistent to eachother, but
522                  * all point outside of the leaf.
523                  */
524                 if (btrfs_item_end_nr(leaf, slot) >
525                     BTRFS_LEAF_DATA_SIZE(root)) {
526                         CORRUPT("slot end outside of leaf", leaf, root, slot);
527                         return -EIO;
528                 }
529         }
530
531         return 0;
532 }
533
534 struct extent_buffer *find_eb_for_page(struct extent_io_tree *tree,
535                                        struct page *page, int max_walk)
536 {
537         struct extent_buffer *eb;
538         u64 start = page_offset(page);
539         u64 target = start;
540         u64 min_start;
541
542         if (start < max_walk)
543                 min_start = 0;
544         else
545                 min_start = start - max_walk;
546
547         while (start >= min_start) {
548                 eb = find_extent_buffer(tree, start, 0);
549                 if (eb) {
550                         /*
551                          * we found an extent buffer and it contains our page
552                          * horray!
553                          */
554                         if (eb->start <= target &&
555                             eb->start + eb->len > target)
556                                 return eb;
557
558                         /* we found an extent buffer that wasn't for us */
559                         free_extent_buffer(eb);
560                         return NULL;
561                 }
562                 if (start == 0)
563                         break;
564                 start -= PAGE_CACHE_SIZE;
565         }
566         return NULL;
567 }
568
569 static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
570                                struct extent_state *state, int mirror)
571 {
572         struct extent_io_tree *tree;
573         u64 found_start;
574         int found_level;
575         struct extent_buffer *eb;
576         struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
577         int ret = 0;
578         int reads_done;
579
580         if (!page->private)
581                 goto out;
582
583         tree = &BTRFS_I(page->mapping->host)->io_tree;
584         eb = (struct extent_buffer *)page->private;
585
586         /* the pending IO might have been the only thing that kept this buffer
587          * in memory.  Make sure we have a ref for all this other checks
588          */
589         extent_buffer_get(eb);
590
591         reads_done = atomic_dec_and_test(&eb->io_pages);
592         if (!reads_done)
593                 goto err;
594
595         eb->read_mirror = mirror;
596         if (test_bit(EXTENT_BUFFER_IOERR, &eb->bflags)) {
597                 ret = -EIO;
598                 goto err;
599         }
600
601         found_start = btrfs_header_bytenr(eb);
602         if (found_start != eb->start) {
603                 printk_ratelimited(KERN_INFO "btrfs bad tree block start "
604                                "%llu %llu\n",
605                                (unsigned long long)found_start,
606                                (unsigned long long)eb->start);
607                 ret = -EIO;
608                 goto err;
609         }
610         if (check_tree_block_fsid(root, eb)) {
611                 printk_ratelimited(KERN_INFO "btrfs bad fsid on block %llu\n",
612                                (unsigned long long)eb->start);
613                 ret = -EIO;
614                 goto err;
615         }
616         found_level = btrfs_header_level(eb);
617
618         btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb),
619                                        eb, found_level);
620
621         ret = csum_tree_block(root, eb, 1);
622         if (ret) {
623                 ret = -EIO;
624                 goto err;
625         }
626
627         /*
628          * If this is a leaf block and it is corrupt, set the corrupt bit so
629          * that we don't try and read the other copies of this block, just
630          * return -EIO.
631          */
632         if (found_level == 0 && check_leaf(root, eb)) {
633                 set_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
634                 ret = -EIO;
635         }
636
637         if (!ret)
638                 set_extent_buffer_uptodate(eb);
639 err:
640         if (test_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags)) {
641                 clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags);
642                 btree_readahead_hook(root, eb, eb->start, ret);
643         }
644
645         if (ret)
646                 clear_extent_buffer_uptodate(eb);
647         free_extent_buffer(eb);
648 out:
649         return ret;
650 }
651
652 static int btree_io_failed_hook(struct page *page, int failed_mirror)
653 {
654         struct extent_buffer *eb;
655         struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
656
657         eb = (struct extent_buffer *)page->private;
658         set_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
659         eb->read_mirror = failed_mirror;
660         if (test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags))
661                 btree_readahead_hook(root, eb, eb->start, -EIO);
662         return -EIO;    /* we fixed nothing */
663 }
664
665 static void end_workqueue_bio(struct bio *bio, int err)
666 {
667         struct end_io_wq *end_io_wq = bio->bi_private;
668         struct btrfs_fs_info *fs_info;
669
670         fs_info = end_io_wq->info;
671         end_io_wq->error = err;
672         end_io_wq->work.func = end_workqueue_fn;
673         end_io_wq->work.flags = 0;
674
675         if (bio->bi_rw & REQ_WRITE) {
676                 if (end_io_wq->metadata == 1)
677                         btrfs_queue_worker(&fs_info->endio_meta_write_workers,
678                                            &end_io_wq->work);
679                 else if (end_io_wq->metadata == 2)
680                         btrfs_queue_worker(&fs_info->endio_freespace_worker,
681                                            &end_io_wq->work);
682                 else
683                         btrfs_queue_worker(&fs_info->endio_write_workers,
684                                            &end_io_wq->work);
685         } else {
686                 if (end_io_wq->metadata)
687                         btrfs_queue_worker(&fs_info->endio_meta_workers,
688                                            &end_io_wq->work);
689                 else
690                         btrfs_queue_worker(&fs_info->endio_workers,
691                                            &end_io_wq->work);
692         }
693 }
694
695 /*
696  * For the metadata arg you want
697  *
698  * 0 - if data
699  * 1 - if normal metadta
700  * 2 - if writing to the free space cache area
701  */
702 int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
703                         int metadata)
704 {
705         struct end_io_wq *end_io_wq;
706         end_io_wq = kmalloc(sizeof(*end_io_wq), GFP_NOFS);
707         if (!end_io_wq)
708                 return -ENOMEM;
709
710         end_io_wq->private = bio->bi_private;
711         end_io_wq->end_io = bio->bi_end_io;
712         end_io_wq->info = info;
713         end_io_wq->error = 0;
714         end_io_wq->bio = bio;
715         end_io_wq->metadata = metadata;
716
717         bio->bi_private = end_io_wq;
718         bio->bi_end_io = end_workqueue_bio;
719         return 0;
720 }
721
722 unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info)
723 {
724         unsigned long limit = min_t(unsigned long,
725                                     info->workers.max_workers,
726                                     info->fs_devices->open_devices);
727         return 256 * limit;
728 }
729
730 static void run_one_async_start(struct btrfs_work *work)
731 {
732         struct async_submit_bio *async;
733         int ret;
734
735         async = container_of(work, struct  async_submit_bio, work);
736         ret = async->submit_bio_start(async->inode, async->rw, async->bio,
737                                       async->mirror_num, async->bio_flags,
738                                       async->bio_offset);
739         if (ret)
740                 async->error = ret;
741 }
742
743 static void run_one_async_done(struct btrfs_work *work)
744 {
745         struct btrfs_fs_info *fs_info;
746         struct async_submit_bio *async;
747         int limit;
748
749         async = container_of(work, struct  async_submit_bio, work);
750         fs_info = BTRFS_I(async->inode)->root->fs_info;
751
752         limit = btrfs_async_submit_limit(fs_info);
753         limit = limit * 2 / 3;
754
755         if (atomic_dec_return(&fs_info->nr_async_submits) < limit &&
756             waitqueue_active(&fs_info->async_submit_wait))
757                 wake_up(&fs_info->async_submit_wait);
758
759         /* If an error occured we just want to clean up the bio and move on */
760         if (async->error) {
761                 bio_endio(async->bio, async->error);
762                 return;
763         }
764
765         async->submit_bio_done(async->inode, async->rw, async->bio,
766                                async->mirror_num, async->bio_flags,
767                                async->bio_offset);
768 }
769
770 static void run_one_async_free(struct btrfs_work *work)
771 {
772         struct async_submit_bio *async;
773
774         async = container_of(work, struct  async_submit_bio, work);
775         kfree(async);
776 }
777
778 int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
779                         int rw, struct bio *bio, int mirror_num,
780                         unsigned long bio_flags,
781                         u64 bio_offset,
782                         extent_submit_bio_hook_t *submit_bio_start,
783                         extent_submit_bio_hook_t *submit_bio_done)
784 {
785         struct async_submit_bio *async;
786
787         async = kmalloc(sizeof(*async), GFP_NOFS);
788         if (!async)
789                 return -ENOMEM;
790
791         async->inode = inode;
792         async->rw = rw;
793         async->bio = bio;
794         async->mirror_num = mirror_num;
795         async->submit_bio_start = submit_bio_start;
796         async->submit_bio_done = submit_bio_done;
797
798         async->work.func = run_one_async_start;
799         async->work.ordered_func = run_one_async_done;
800         async->work.ordered_free = run_one_async_free;
801
802         async->work.flags = 0;
803         async->bio_flags = bio_flags;
804         async->bio_offset = bio_offset;
805
806         async->error = 0;
807
808         atomic_inc(&fs_info->nr_async_submits);
809
810         if (rw & REQ_SYNC)
811                 btrfs_set_work_high_prio(&async->work);
812
813         btrfs_queue_worker(&fs_info->workers, &async->work);
814
815         while (atomic_read(&fs_info->async_submit_draining) &&
816               atomic_read(&fs_info->nr_async_submits)) {
817                 wait_event(fs_info->async_submit_wait,
818                            (atomic_read(&fs_info->nr_async_submits) == 0));
819         }
820
821         return 0;
822 }
823
824 static int btree_csum_one_bio(struct bio *bio)
825 {
826         struct bio_vec *bvec = bio->bi_io_vec;
827         int bio_index = 0;
828         struct btrfs_root *root;
829         int ret = 0;
830
831         WARN_ON(bio->bi_vcnt <= 0);
832         while (bio_index < bio->bi_vcnt) {
833                 root = BTRFS_I(bvec->bv_page->mapping->host)->root;
834                 ret = csum_dirty_buffer(root, bvec->bv_page);
835                 if (ret)
836                         break;
837                 bio_index++;
838                 bvec++;
839         }
840         return ret;
841 }
842
843 static int __btree_submit_bio_start(struct inode *inode, int rw,
844                                     struct bio *bio, int mirror_num,
845                                     unsigned long bio_flags,
846                                     u64 bio_offset)
847 {
848         /*
849          * when we're called for a write, we're already in the async
850          * submission context.  Just jump into btrfs_map_bio
851          */
852         return btree_csum_one_bio(bio);
853 }
854
855 static int __btree_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
856                                  int mirror_num, unsigned long bio_flags,
857                                  u64 bio_offset)
858 {
859         /*
860          * when we're called for a write, we're already in the async
861          * submission context.  Just jump into btrfs_map_bio
862          */
863         return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, mirror_num, 1);
864 }
865
866 static int check_async_write(struct inode *inode, unsigned long bio_flags)
867 {
868         if (bio_flags & EXTENT_BIO_TREE_LOG)
869                 return 0;
870 #ifdef CONFIG_X86
871         if (cpu_has_xmm4_2)
872                 return 0;
873 #endif
874         return 1;
875 }
876
877 static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
878                                  int mirror_num, unsigned long bio_flags,
879                                  u64 bio_offset)
880 {
881         int async = check_async_write(inode, bio_flags);
882         int ret;
883
884         if (!(rw & REQ_WRITE)) {
885
886                 /*
887                  * called for a read, do the setup so that checksum validation
888                  * can happen in the async kernel threads
889                  */
890                 ret = btrfs_bio_wq_end_io(BTRFS_I(inode)->root->fs_info,
891                                           bio, 1);
892                 if (ret)
893                         return ret;
894                 return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio,
895                                      mirror_num, 0);
896         } else if (!async) {
897                 ret = btree_csum_one_bio(bio);
898                 if (ret)
899                         return ret;
900                 return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio,
901                                      mirror_num, 0);
902         }
903
904         /*
905          * kthread helpers are used to submit writes so that checksumming
906          * can happen in parallel across all CPUs
907          */
908         return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
909                                    inode, rw, bio, mirror_num, 0,
910                                    bio_offset,
911                                    __btree_submit_bio_start,
912                                    __btree_submit_bio_done);
913 }
914
915 #ifdef CONFIG_MIGRATION
916 static int btree_migratepage(struct address_space *mapping,
917                         struct page *newpage, struct page *page,
918                         enum migrate_mode mode)
919 {
920         /*
921          * we can't safely write a btree page from here,
922          * we haven't done the locking hook
923          */
924         if (PageDirty(page))
925                 return -EAGAIN;
926         /*
927          * Buffers may be managed in a filesystem specific way.
928          * We must have no buffers or drop them.
929          */
930         if (page_has_private(page) &&
931             !try_to_release_page(page, GFP_KERNEL))
932                 return -EAGAIN;
933         return migrate_page(mapping, newpage, page, mode);
934 }
935 #endif
936
937
938 static int btree_writepages(struct address_space *mapping,
939                             struct writeback_control *wbc)
940 {
941         struct extent_io_tree *tree;
942         tree = &BTRFS_I(mapping->host)->io_tree;
943         if (wbc->sync_mode == WB_SYNC_NONE) {
944                 struct btrfs_root *root = BTRFS_I(mapping->host)->root;
945                 u64 num_dirty;
946                 unsigned long thresh = 32 * 1024 * 1024;
947
948                 if (wbc->for_kupdate)
949                         return 0;
950
951                 /* this is a bit racy, but that's ok */
952                 num_dirty = root->fs_info->dirty_metadata_bytes;
953                 if (num_dirty < thresh)
954                         return 0;
955         }
956         return btree_write_cache_pages(mapping, wbc);
957 }
958
959 static int btree_readpage(struct file *file, struct page *page)
960 {
961         struct extent_io_tree *tree;
962         tree = &BTRFS_I(page->mapping->host)->io_tree;
963         return extent_read_full_page(tree, page, btree_get_extent, 0);
964 }
965
966 static int btree_releasepage(struct page *page, gfp_t gfp_flags)
967 {
968         if (PageWriteback(page) || PageDirty(page))
969                 return 0;
970         /*
971          * We need to mask out eg. __GFP_HIGHMEM and __GFP_DMA32 as we're doing
972          * slab allocation from alloc_extent_state down the callchain where
973          * it'd hit a BUG_ON as those flags are not allowed.
974          */
975         gfp_flags &= ~GFP_SLAB_BUG_MASK;
976
977         return try_release_extent_buffer(page, gfp_flags);
978 }
979
980 static void btree_invalidatepage(struct page *page, unsigned long offset)
981 {
982         struct extent_io_tree *tree;
983         tree = &BTRFS_I(page->mapping->host)->io_tree;
984         extent_invalidatepage(tree, page, offset);
985         btree_releasepage(page, GFP_NOFS);
986         if (PagePrivate(page)) {
987                 printk(KERN_WARNING "btrfs warning page private not zero "
988                        "on page %llu\n", (unsigned long long)page_offset(page));
989                 ClearPagePrivate(page);
990                 set_page_private(page, 0);
991                 page_cache_release(page);
992         }
993 }
994
995 static int btree_set_page_dirty(struct page *page)
996 {
997         struct extent_buffer *eb;
998
999         BUG_ON(!PagePrivate(page));
1000         eb = (struct extent_buffer *)page->private;
1001         BUG_ON(!eb);
1002         BUG_ON(!test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
1003         BUG_ON(!atomic_read(&eb->refs));
1004         btrfs_assert_tree_locked(eb);
1005         return __set_page_dirty_nobuffers(page);
1006 }
1007
1008 static const struct address_space_operations btree_aops = {
1009         .readpage       = btree_readpage,
1010         .writepages     = btree_writepages,
1011         .releasepage    = btree_releasepage,
1012         .invalidatepage = btree_invalidatepage,
1013 #ifdef CONFIG_MIGRATION
1014         .migratepage    = btree_migratepage,
1015 #endif
1016         .set_page_dirty = btree_set_page_dirty,
1017 };
1018
1019 int readahead_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize,
1020                          u64 parent_transid)
1021 {
1022         struct extent_buffer *buf = NULL;
1023         struct inode *btree_inode = root->fs_info->btree_inode;
1024         int ret = 0;
1025
1026         buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
1027         if (!buf)
1028                 return 0;
1029         read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree,
1030                                  buf, 0, WAIT_NONE, btree_get_extent, 0);
1031         free_extent_buffer(buf);
1032         return ret;
1033 }
1034
1035 int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr, u32 blocksize,
1036                          int mirror_num, struct extent_buffer **eb)
1037 {
1038         struct extent_buffer *buf = NULL;
1039         struct inode *btree_inode = root->fs_info->btree_inode;
1040         struct extent_io_tree *io_tree = &BTRFS_I(btree_inode)->io_tree;
1041         int ret;
1042
1043         buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
1044         if (!buf)
1045                 return 0;
1046
1047         set_bit(EXTENT_BUFFER_READAHEAD, &buf->bflags);
1048
1049         ret = read_extent_buffer_pages(io_tree, buf, 0, WAIT_PAGE_LOCK,
1050                                        btree_get_extent, mirror_num);
1051         if (ret) {
1052                 free_extent_buffer(buf);
1053                 return ret;
1054         }
1055
1056         if (test_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags)) {
1057                 free_extent_buffer(buf);
1058                 return -EIO;
1059         } else if (extent_buffer_uptodate(buf)) {
1060                 *eb = buf;
1061         } else {
1062                 free_extent_buffer(buf);
1063         }
1064         return 0;
1065 }
1066
1067 struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root,
1068                                             u64 bytenr, u32 blocksize)
1069 {
1070         struct inode *btree_inode = root->fs_info->btree_inode;
1071         struct extent_buffer *eb;
1072         eb = find_extent_buffer(&BTRFS_I(btree_inode)->io_tree,
1073                                 bytenr, blocksize);
1074         return eb;
1075 }
1076
1077 struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
1078                                                  u64 bytenr, u32 blocksize)
1079 {
1080         struct inode *btree_inode = root->fs_info->btree_inode;
1081         struct extent_buffer *eb;
1082
1083         eb = alloc_extent_buffer(&BTRFS_I(btree_inode)->io_tree,
1084                                  bytenr, blocksize);
1085         return eb;
1086 }
1087
1088
1089 int btrfs_write_tree_block(struct extent_buffer *buf)
1090 {
1091         return filemap_fdatawrite_range(buf->pages[0]->mapping, buf->start,
1092                                         buf->start + buf->len - 1);
1093 }
1094
1095 int btrfs_wait_tree_block_writeback(struct extent_buffer *buf)
1096 {
1097         return filemap_fdatawait_range(buf->pages[0]->mapping,
1098                                        buf->start, buf->start + buf->len - 1);
1099 }
1100
1101 struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
1102                                       u32 blocksize, u64 parent_transid)
1103 {
1104         struct extent_buffer *buf = NULL;
1105         int ret;
1106
1107         buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
1108         if (!buf)
1109                 return NULL;
1110
1111         ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
1112         return buf;
1113
1114 }
1115
1116 void clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root,
1117                       struct extent_buffer *buf)
1118 {
1119         if (btrfs_header_generation(buf) ==
1120             root->fs_info->running_transaction->transid) {
1121                 btrfs_assert_tree_locked(buf);
1122
1123                 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)) {
1124                         spin_lock(&root->fs_info->delalloc_lock);
1125                         if (root->fs_info->dirty_metadata_bytes >= buf->len)
1126                                 root->fs_info->dirty_metadata_bytes -= buf->len;
1127                         else {
1128                                 spin_unlock(&root->fs_info->delalloc_lock);
1129                                 btrfs_panic(root->fs_info, -EOVERFLOW,
1130                                           "Can't clear %lu bytes from "
1131                                           " dirty_mdatadata_bytes (%llu)",
1132                                           buf->len,
1133                                           root->fs_info->dirty_metadata_bytes);
1134                         }
1135                         spin_unlock(&root->fs_info->delalloc_lock);
1136                 }
1137
1138                 /* ugh, clear_extent_buffer_dirty needs to lock the page */
1139                 btrfs_set_lock_blocking(buf);
1140                 clear_extent_buffer_dirty(buf);
1141         }
1142 }
1143
1144 static void __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
1145                          u32 stripesize, struct btrfs_root *root,
1146                          struct btrfs_fs_info *fs_info,
1147                          u64 objectid)
1148 {
1149         root->node = NULL;
1150         root->commit_root = NULL;
1151         root->sectorsize = sectorsize;
1152         root->nodesize = nodesize;
1153         root->leafsize = leafsize;
1154         root->stripesize = stripesize;
1155         root->ref_cows = 0;
1156         root->track_dirty = 0;
1157         root->in_radix = 0;
1158         root->orphan_item_inserted = 0;
1159         root->orphan_cleanup_state = 0;
1160
1161         root->objectid = objectid;
1162         root->last_trans = 0;
1163         root->highest_objectid = 0;
1164         root->name = NULL;
1165         root->inode_tree = RB_ROOT;
1166         INIT_RADIX_TREE(&root->delayed_nodes_tree, GFP_ATOMIC);
1167         root->block_rsv = NULL;
1168         root->orphan_block_rsv = NULL;
1169
1170         INIT_LIST_HEAD(&root->dirty_list);
1171         INIT_LIST_HEAD(&root->root_list);
1172         spin_lock_init(&root->orphan_lock);
1173         spin_lock_init(&root->inode_lock);
1174         spin_lock_init(&root->accounting_lock);
1175         mutex_init(&root->objectid_mutex);
1176         mutex_init(&root->log_mutex);
1177         init_waitqueue_head(&root->log_writer_wait);
1178         init_waitqueue_head(&root->log_commit_wait[0]);
1179         init_waitqueue_head(&root->log_commit_wait[1]);
1180         atomic_set(&root->log_commit[0], 0);
1181         atomic_set(&root->log_commit[1], 0);
1182         atomic_set(&root->log_writers, 0);
1183         atomic_set(&root->log_batch, 0);
1184         atomic_set(&root->orphan_inodes, 0);
1185         root->log_transid = 0;
1186         root->last_log_commit = 0;
1187         extent_io_tree_init(&root->dirty_log_pages,
1188                              fs_info->btree_inode->i_mapping);
1189
1190         memset(&root->root_key, 0, sizeof(root->root_key));
1191         memset(&root->root_item, 0, sizeof(root->root_item));
1192         memset(&root->defrag_progress, 0, sizeof(root->defrag_progress));
1193         memset(&root->root_kobj, 0, sizeof(root->root_kobj));
1194         root->defrag_trans_start = fs_info->generation;
1195         init_completion(&root->kobj_unregister);
1196         root->defrag_running = 0;
1197         root->root_key.objectid = objectid;
1198         root->anon_dev = 0;
1199
1200         spin_lock_init(&root->root_times_lock);
1201 }
1202
1203 static int __must_check find_and_setup_root(struct btrfs_root *tree_root,
1204                                             struct btrfs_fs_info *fs_info,
1205                                             u64 objectid,
1206                                             struct btrfs_root *root)
1207 {
1208         int ret;
1209         u32 blocksize;
1210         u64 generation;
1211
1212         __setup_root(tree_root->nodesize, tree_root->leafsize,
1213                      tree_root->sectorsize, tree_root->stripesize,
1214                      root, fs_info, objectid);
1215         ret = btrfs_find_last_root(tree_root, objectid,
1216                                    &root->root_item, &root->root_key);
1217         if (ret > 0)
1218                 return -ENOENT;
1219         else if (ret < 0)
1220                 return ret;
1221
1222         generation = btrfs_root_generation(&root->root_item);
1223         blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item));
1224         root->commit_root = NULL;
1225         root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
1226                                      blocksize, generation);
1227         if (!root->node || !btrfs_buffer_uptodate(root->node, generation, 0)) {
1228                 free_extent_buffer(root->node);
1229                 root->node = NULL;
1230                 return -EIO;
1231         }
1232         root->commit_root = btrfs_root_node(root);
1233         return 0;
1234 }
1235
1236 static struct btrfs_root *btrfs_alloc_root(struct btrfs_fs_info *fs_info)
1237 {
1238         struct btrfs_root *root = kzalloc(sizeof(*root), GFP_NOFS);
1239         if (root)
1240                 root->fs_info = fs_info;
1241         return root;
1242 }
1243
1244 struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
1245                                      struct btrfs_fs_info *fs_info,
1246                                      u64 objectid)
1247 {
1248         struct extent_buffer *leaf;
1249         struct btrfs_root *tree_root = fs_info->tree_root;
1250         struct btrfs_root *root;
1251         struct btrfs_key key;
1252         int ret = 0;
1253         u64 bytenr;
1254
1255         root = btrfs_alloc_root(fs_info);
1256         if (!root)
1257                 return ERR_PTR(-ENOMEM);
1258
1259         __setup_root(tree_root->nodesize, tree_root->leafsize,
1260                      tree_root->sectorsize, tree_root->stripesize,
1261                      root, fs_info, objectid);
1262         root->root_key.objectid = objectid;
1263         root->root_key.type = BTRFS_ROOT_ITEM_KEY;
1264         root->root_key.offset = 0;
1265
1266         leaf = btrfs_alloc_free_block(trans, root, root->leafsize,
1267                                       0, objectid, NULL, 0, 0, 0);
1268         if (IS_ERR(leaf)) {
1269                 ret = PTR_ERR(leaf);
1270                 goto fail;
1271         }
1272
1273         bytenr = leaf->start;
1274         memset_extent_buffer(leaf, 0, 0, sizeof(struct btrfs_header));
1275         btrfs_set_header_bytenr(leaf, leaf->start);
1276         btrfs_set_header_generation(leaf, trans->transid);
1277         btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV);
1278         btrfs_set_header_owner(leaf, objectid);
1279         root->node = leaf;
1280
1281         write_extent_buffer(leaf, fs_info->fsid,
1282                             (unsigned long)btrfs_header_fsid(leaf),
1283                             BTRFS_FSID_SIZE);
1284         write_extent_buffer(leaf, fs_info->chunk_tree_uuid,
1285                             (unsigned long)btrfs_header_chunk_tree_uuid(leaf),
1286                             BTRFS_UUID_SIZE);
1287         btrfs_mark_buffer_dirty(leaf);
1288
1289         root->commit_root = btrfs_root_node(root);
1290         root->track_dirty = 1;
1291
1292
1293         root->root_item.flags = 0;
1294         root->root_item.byte_limit = 0;
1295         btrfs_set_root_bytenr(&root->root_item, leaf->start);
1296         btrfs_set_root_generation(&root->root_item, trans->transid);
1297         btrfs_set_root_level(&root->root_item, 0);
1298         btrfs_set_root_refs(&root->root_item, 1);
1299         btrfs_set_root_used(&root->root_item, leaf->len);
1300         btrfs_set_root_last_snapshot(&root->root_item, 0);
1301         btrfs_set_root_dirid(&root->root_item, 0);
1302         root->root_item.drop_level = 0;
1303
1304         key.objectid = objectid;
1305         key.type = BTRFS_ROOT_ITEM_KEY;
1306         key.offset = 0;
1307         ret = btrfs_insert_root(trans, tree_root, &key, &root->root_item);
1308         if (ret)
1309                 goto fail;
1310
1311         btrfs_tree_unlock(leaf);
1312
1313 fail:
1314         if (ret)
1315                 return ERR_PTR(ret);
1316
1317         return root;
1318 }
1319
1320 static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans,
1321                                          struct btrfs_fs_info *fs_info)
1322 {
1323         struct btrfs_root *root;
1324         struct btrfs_root *tree_root = fs_info->tree_root;
1325         struct extent_buffer *leaf;
1326
1327         root = btrfs_alloc_root(fs_info);
1328         if (!root)
1329                 return ERR_PTR(-ENOMEM);
1330
1331         __setup_root(tree_root->nodesize, tree_root->leafsize,
1332                      tree_root->sectorsize, tree_root->stripesize,
1333                      root, fs_info, BTRFS_TREE_LOG_OBJECTID);
1334
1335         root->root_key.objectid = BTRFS_TREE_LOG_OBJECTID;
1336         root->root_key.type = BTRFS_ROOT_ITEM_KEY;
1337         root->root_key.offset = BTRFS_TREE_LOG_OBJECTID;
1338         /*
1339          * log trees do not get reference counted because they go away
1340          * before a real commit is actually done.  They do store pointers
1341          * to file data extents, and those reference counts still get
1342          * updated (along with back refs to the log tree).
1343          */
1344         root->ref_cows = 0;
1345
1346         leaf = btrfs_alloc_free_block(trans, root, root->leafsize, 0,
1347                                       BTRFS_TREE_LOG_OBJECTID, NULL,
1348                                       0, 0, 0);
1349         if (IS_ERR(leaf)) {
1350                 kfree(root);
1351                 return ERR_CAST(leaf);
1352         }
1353
1354         memset_extent_buffer(leaf, 0, 0, sizeof(struct btrfs_header));
1355         btrfs_set_header_bytenr(leaf, leaf->start);
1356         btrfs_set_header_generation(leaf, trans->transid);
1357         btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV);
1358         btrfs_set_header_owner(leaf, BTRFS_TREE_LOG_OBJECTID);
1359         root->node = leaf;
1360
1361         write_extent_buffer(root->node, root->fs_info->fsid,
1362                             (unsigned long)btrfs_header_fsid(root->node),
1363                             BTRFS_FSID_SIZE);
1364         btrfs_mark_buffer_dirty(root->node);
1365         btrfs_tree_unlock(root->node);
1366         return root;
1367 }
1368
1369 int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
1370                              struct btrfs_fs_info *fs_info)
1371 {
1372         struct btrfs_root *log_root;
1373
1374         log_root = alloc_log_tree(trans, fs_info);
1375         if (IS_ERR(log_root))
1376                 return PTR_ERR(log_root);
1377         WARN_ON(fs_info->log_root_tree);
1378         fs_info->log_root_tree = log_root;
1379         return 0;
1380 }
1381
1382 int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
1383                        struct btrfs_root *root)
1384 {
1385         struct btrfs_root *log_root;
1386         struct btrfs_inode_item *inode_item;
1387
1388         log_root = alloc_log_tree(trans, root->fs_info);
1389         if (IS_ERR(log_root))
1390                 return PTR_ERR(log_root);
1391
1392         log_root->last_trans = trans->transid;
1393         log_root->root_key.offset = root->root_key.objectid;
1394
1395         inode_item = &log_root->root_item.inode;
1396         inode_item->generation = cpu_to_le64(1);
1397         inode_item->size = cpu_to_le64(3);
1398         inode_item->nlink = cpu_to_le32(1);
1399         inode_item->nbytes = cpu_to_le64(root->leafsize);
1400         inode_item->mode = cpu_to_le32(S_IFDIR | 0755);
1401
1402         btrfs_set_root_node(&log_root->root_item, log_root->node);
1403
1404         WARN_ON(root->log_root);
1405         root->log_root = log_root;
1406         root->log_transid = 0;
1407         root->last_log_commit = 0;
1408         return 0;
1409 }
1410
1411 struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root,
1412                                                struct btrfs_key *location)
1413 {
1414         struct btrfs_root *root;
1415         struct btrfs_fs_info *fs_info = tree_root->fs_info;
1416         struct btrfs_path *path;
1417         struct extent_buffer *l;
1418         u64 generation;
1419         u32 blocksize;
1420         int ret = 0;
1421         int slot;
1422
1423         root = btrfs_alloc_root(fs_info);
1424         if (!root)
1425                 return ERR_PTR(-ENOMEM);
1426         if (location->offset == (u64)-1) {
1427                 ret = find_and_setup_root(tree_root, fs_info,
1428                                           location->objectid, root);
1429                 if (ret) {
1430                         kfree(root);
1431                         return ERR_PTR(ret);
1432                 }
1433                 goto out;
1434         }
1435
1436         __setup_root(tree_root->nodesize, tree_root->leafsize,
1437                      tree_root->sectorsize, tree_root->stripesize,
1438                      root, fs_info, location->objectid);
1439
1440         path = btrfs_alloc_path();
1441         if (!path) {
1442                 kfree(root);
1443                 return ERR_PTR(-ENOMEM);
1444         }
1445         ret = btrfs_search_slot(NULL, tree_root, location, path, 0, 0);
1446         if (ret == 0) {
1447                 l = path->nodes[0];
1448                 slot = path->slots[0];
1449                 btrfs_read_root_item(tree_root, l, slot, &root->root_item);
1450                 memcpy(&root->root_key, location, sizeof(*location));
1451         }
1452         btrfs_free_path(path);
1453         if (ret) {
1454                 kfree(root);
1455                 if (ret > 0)
1456                         ret = -ENOENT;
1457                 return ERR_PTR(ret);
1458         }
1459
1460         generation = btrfs_root_generation(&root->root_item);
1461         blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item));
1462         root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
1463                                      blocksize, generation);
1464         root->commit_root = btrfs_root_node(root);
1465         BUG_ON(!root->node); /* -ENOMEM */
1466 out:
1467         if (location->objectid != BTRFS_TREE_LOG_OBJECTID) {
1468                 root->ref_cows = 1;
1469                 btrfs_check_and_init_root_item(&root->root_item);
1470         }
1471
1472         return root;
1473 }
1474
1475 struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info,
1476                                               struct btrfs_key *location)
1477 {
1478         struct btrfs_root *root;
1479         int ret;
1480
1481         if (location->objectid == BTRFS_ROOT_TREE_OBJECTID)
1482                 return fs_info->tree_root;
1483         if (location->objectid == BTRFS_EXTENT_TREE_OBJECTID)
1484                 return fs_info->extent_root;
1485         if (location->objectid == BTRFS_CHUNK_TREE_OBJECTID)
1486                 return fs_info->chunk_root;
1487         if (location->objectid == BTRFS_DEV_TREE_OBJECTID)
1488                 return fs_info->dev_root;
1489         if (location->objectid == BTRFS_CSUM_TREE_OBJECTID)
1490                 return fs_info->csum_root;
1491         if (location->objectid == BTRFS_QUOTA_TREE_OBJECTID)
1492                 return fs_info->quota_root ? fs_info->quota_root :
1493                                              ERR_PTR(-ENOENT);
1494 again:
1495         spin_lock(&fs_info->fs_roots_radix_lock);
1496         root = radix_tree_lookup(&fs_info->fs_roots_radix,
1497                                  (unsigned long)location->objectid);
1498         spin_unlock(&fs_info->fs_roots_radix_lock);
1499         if (root)
1500                 return root;
1501
1502         root = btrfs_read_fs_root_no_radix(fs_info->tree_root, location);
1503         if (IS_ERR(root))
1504                 return root;
1505
1506         root->free_ino_ctl = kzalloc(sizeof(*root->free_ino_ctl), GFP_NOFS);
1507         root->free_ino_pinned = kzalloc(sizeof(*root->free_ino_pinned),
1508                                         GFP_NOFS);
1509         if (!root->free_ino_pinned || !root->free_ino_ctl) {
1510                 ret = -ENOMEM;
1511                 goto fail;
1512         }
1513
1514         btrfs_init_free_ino_ctl(root);
1515         mutex_init(&root->fs_commit_mutex);
1516         spin_lock_init(&root->cache_lock);
1517         init_waitqueue_head(&root->cache_wait);
1518
1519         ret = get_anon_bdev(&root->anon_dev);
1520         if (ret)
1521                 goto fail;
1522
1523         if (btrfs_root_refs(&root->root_item) == 0) {
1524                 ret = -ENOENT;
1525                 goto fail;
1526         }
1527
1528         ret = btrfs_find_orphan_item(fs_info->tree_root, location->objectid);
1529         if (ret < 0)
1530                 goto fail;
1531         if (ret == 0)
1532                 root->orphan_item_inserted = 1;
1533
1534         ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
1535         if (ret)
1536                 goto fail;
1537
1538         spin_lock(&fs_info->fs_roots_radix_lock);
1539         ret = radix_tree_insert(&fs_info->fs_roots_radix,
1540                                 (unsigned long)root->root_key.objectid,
1541                                 root);
1542         if (ret == 0)
1543                 root->in_radix = 1;
1544
1545         spin_unlock(&fs_info->fs_roots_radix_lock);
1546         radix_tree_preload_end();
1547         if (ret) {
1548                 if (ret == -EEXIST) {
1549                         free_fs_root(root);
1550                         goto again;
1551                 }
1552                 goto fail;
1553         }
1554
1555         ret = btrfs_find_dead_roots(fs_info->tree_root,
1556                                     root->root_key.objectid);
1557         WARN_ON(ret);
1558         return root;
1559 fail:
1560         free_fs_root(root);
1561         return ERR_PTR(ret);
1562 }
1563
1564 static int btrfs_congested_fn(void *congested_data, int bdi_bits)
1565 {
1566         struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data;
1567         int ret = 0;
1568         struct btrfs_device *device;
1569         struct backing_dev_info *bdi;
1570
1571         rcu_read_lock();
1572         list_for_each_entry_rcu(device, &info->fs_devices->devices, dev_list) {
1573                 if (!device->bdev)
1574                         continue;
1575                 bdi = blk_get_backing_dev_info(device->bdev);
1576                 if (bdi && bdi_congested(bdi, bdi_bits)) {
1577                         ret = 1;
1578                         break;
1579                 }
1580         }
1581         rcu_read_unlock();
1582         return ret;
1583 }
1584
1585 /*
1586  * If this fails, caller must call bdi_destroy() to get rid of the
1587  * bdi again.
1588  */
1589 static int setup_bdi(struct btrfs_fs_info *info, struct backing_dev_info *bdi)
1590 {
1591         int err;
1592
1593         bdi->capabilities = BDI_CAP_MAP_COPY;
1594         err = bdi_setup_and_register(bdi, "btrfs", BDI_CAP_MAP_COPY);
1595         if (err)
1596                 return err;
1597
1598         bdi->ra_pages   = default_backing_dev_info.ra_pages;
1599         bdi->congested_fn       = btrfs_congested_fn;
1600         bdi->congested_data     = info;
1601         return 0;
1602 }
1603
1604 /*
1605  * called by the kthread helper functions to finally call the bio end_io
1606  * functions.  This is where read checksum verification actually happens
1607  */
1608 static void end_workqueue_fn(struct btrfs_work *work)
1609 {
1610         struct bio *bio;
1611         struct end_io_wq *end_io_wq;
1612         struct btrfs_fs_info *fs_info;
1613         int error;
1614
1615         end_io_wq = container_of(work, struct end_io_wq, work);
1616         bio = end_io_wq->bio;
1617         fs_info = end_io_wq->info;
1618
1619         error = end_io_wq->error;
1620         bio->bi_private = end_io_wq->private;
1621         bio->bi_end_io = end_io_wq->end_io;
1622         kfree(end_io_wq);
1623         bio_endio(bio, error);
1624 }
1625
1626 static int cleaner_kthread(void *arg)
1627 {
1628         struct btrfs_root *root = arg;
1629
1630         do {
1631                 if (!(root->fs_info->sb->s_flags & MS_RDONLY) &&
1632                     mutex_trylock(&root->fs_info->cleaner_mutex)) {
1633                         btrfs_run_delayed_iputs(root);
1634                         btrfs_clean_old_snapshots(root);
1635                         mutex_unlock(&root->fs_info->cleaner_mutex);
1636                         btrfs_run_defrag_inodes(root->fs_info);
1637                 }
1638
1639                 if (!try_to_freeze()) {
1640                         set_current_state(TASK_INTERRUPTIBLE);
1641                         if (!kthread_should_stop())
1642                                 schedule();
1643                         __set_current_state(TASK_RUNNING);
1644                 }
1645         } while (!kthread_should_stop());
1646         return 0;
1647 }
1648
1649 static int transaction_kthread(void *arg)
1650 {
1651         struct btrfs_root *root = arg;
1652         struct btrfs_trans_handle *trans;
1653         struct btrfs_transaction *cur;
1654         u64 transid;
1655         unsigned long now;
1656         unsigned long delay;
1657         bool cannot_commit;
1658
1659         do {
1660                 cannot_commit = false;
1661                 delay = HZ * 30;
1662                 mutex_lock(&root->fs_info->transaction_kthread_mutex);
1663
1664                 spin_lock(&root->fs_info->trans_lock);
1665                 cur = root->fs_info->running_transaction;
1666                 if (!cur) {
1667                         spin_unlock(&root->fs_info->trans_lock);
1668                         goto sleep;
1669                 }
1670
1671                 now = get_seconds();
1672                 if (!cur->blocked &&
1673                     (now < cur->start_time || now - cur->start_time < 30)) {
1674                         spin_unlock(&root->fs_info->trans_lock);
1675                         delay = HZ * 5;
1676                         goto sleep;
1677                 }
1678                 transid = cur->transid;
1679                 spin_unlock(&root->fs_info->trans_lock);
1680
1681                 /* If the file system is aborted, this will always fail. */
1682                 trans = btrfs_attach_transaction(root);
1683                 if (IS_ERR(trans)) {
1684                         if (PTR_ERR(trans) != -ENOENT)
1685                                 cannot_commit = true;
1686                         goto sleep;
1687                 }
1688                 if (transid == trans->transid) {
1689                         btrfs_commit_transaction(trans, root);
1690                 } else {
1691                         btrfs_end_transaction(trans, root);
1692                 }
1693 sleep:
1694                 wake_up_process(root->fs_info->cleaner_kthread);
1695                 mutex_unlock(&root->fs_info->transaction_kthread_mutex);
1696
1697                 if (!try_to_freeze()) {
1698                         set_current_state(TASK_INTERRUPTIBLE);
1699                         if (!kthread_should_stop() &&
1700                             (!btrfs_transaction_blocked(root->fs_info) ||
1701                              cannot_commit))
1702                                 schedule_timeout(delay);
1703                         __set_current_state(TASK_RUNNING);
1704                 }
1705         } while (!kthread_should_stop());
1706         return 0;
1707 }
1708
1709 /*
1710  * this will find the highest generation in the array of
1711  * root backups.  The index of the highest array is returned,
1712  * or -1 if we can't find anything.
1713  *
1714  * We check to make sure the array is valid by comparing the
1715  * generation of the latest  root in the array with the generation
1716  * in the super block.  If they don't match we pitch it.
1717  */
1718 static int find_newest_super_backup(struct btrfs_fs_info *info, u64 newest_gen)
1719 {
1720         u64 cur;
1721         int newest_index = -1;
1722         struct btrfs_root_backup *root_backup;
1723         int i;
1724
1725         for (i = 0; i < BTRFS_NUM_BACKUP_ROOTS; i++) {
1726                 root_backup = info->super_copy->super_roots + i;
1727                 cur = btrfs_backup_tree_root_gen(root_backup);
1728                 if (cur == newest_gen)
1729                         newest_index = i;
1730         }
1731
1732         /* check to see if we actually wrapped around */
1733         if (newest_index == BTRFS_NUM_BACKUP_ROOTS - 1) {
1734                 root_backup = info->super_copy->super_roots;
1735                 cur = btrfs_backup_tree_root_gen(root_backup);
1736                 if (cur == newest_gen)
1737                         newest_index = 0;
1738         }
1739         return newest_index;
1740 }
1741
1742
1743 /*
1744  * find the oldest backup so we know where to store new entries
1745  * in the backup array.  This will set the backup_root_index
1746  * field in the fs_info struct
1747  */
1748 static void find_oldest_super_backup(struct btrfs_fs_info *info,
1749                                      u64 newest_gen)
1750 {
1751         int newest_index = -1;
1752
1753         newest_index = find_newest_super_backup(info, newest_gen);
1754         /* if there was garbage in there, just move along */
1755         if (newest_index == -1) {
1756                 info->backup_root_index = 0;
1757         } else {
1758                 info->backup_root_index = (newest_index + 1) % BTRFS_NUM_BACKUP_ROOTS;
1759         }
1760 }
1761
1762 /*
1763  * copy all the root pointers into the super backup array.
1764  * this will bump the backup pointer by one when it is
1765  * done
1766  */
1767 static void backup_super_roots(struct btrfs_fs_info *info)
1768 {
1769         int next_backup;
1770         struct btrfs_root_backup *root_backup;
1771         int last_backup;
1772
1773         next_backup = info->backup_root_index;
1774         last_backup = (next_backup + BTRFS_NUM_BACKUP_ROOTS - 1) %
1775                 BTRFS_NUM_BACKUP_ROOTS;
1776
1777         /*
1778          * just overwrite the last backup if we're at the same generation
1779          * this happens only at umount
1780          */
1781         root_backup = info->super_for_commit->super_roots + last_backup;
1782         if (btrfs_backup_tree_root_gen(root_backup) ==
1783             btrfs_header_generation(info->tree_root->node))
1784                 next_backup = last_backup;
1785
1786         root_backup = info->super_for_commit->super_roots + next_backup;
1787
1788         /*
1789          * make sure all of our padding and empty slots get zero filled
1790          * regardless of which ones we use today
1791          */
1792         memset(root_backup, 0, sizeof(*root_backup));
1793
1794         info->backup_root_index = (next_backup + 1) % BTRFS_NUM_BACKUP_ROOTS;
1795
1796         btrfs_set_backup_tree_root(root_backup, info->tree_root->node->start);
1797         btrfs_set_backup_tree_root_gen(root_backup,
1798                                btrfs_header_generation(info->tree_root->node));
1799
1800         btrfs_set_backup_tree_root_level(root_backup,
1801                                btrfs_header_level(info->tree_root->node));
1802
1803         btrfs_set_backup_chunk_root(root_backup, info->chunk_root->node->start);
1804         btrfs_set_backup_chunk_root_gen(root_backup,
1805                                btrfs_header_generation(info->chunk_root->node));
1806         btrfs_set_backup_chunk_root_level(root_backup,
1807                                btrfs_header_level(info->chunk_root->node));
1808
1809         btrfs_set_backup_extent_root(root_backup, info->extent_root->node->start);
1810         btrfs_set_backup_extent_root_gen(root_backup,
1811                                btrfs_header_generation(info->extent_root->node));
1812         btrfs_set_backup_extent_root_level(root_backup,
1813                                btrfs_header_level(info->extent_root->node));
1814
1815         /*
1816          * we might commit during log recovery, which happens before we set
1817          * the fs_root.  Make sure it is valid before we fill it in.
1818          */
1819         if (info->fs_root && info->fs_root->node) {
1820                 btrfs_set_backup_fs_root(root_backup,
1821                                          info->fs_root->node->start);
1822                 btrfs_set_backup_fs_root_gen(root_backup,
1823                                btrfs_header_generation(info->fs_root->node));
1824                 btrfs_set_backup_fs_root_level(root_backup,
1825                                btrfs_header_level(info->fs_root->node));
1826         }
1827
1828         btrfs_set_backup_dev_root(root_backup, info->dev_root->node->start);
1829         btrfs_set_backup_dev_root_gen(root_backup,
1830                                btrfs_header_generation(info->dev_root->node));
1831         btrfs_set_backup_dev_root_level(root_backup,
1832                                        btrfs_header_level(info->dev_root->node));
1833
1834         btrfs_set_backup_csum_root(root_backup, info->csum_root->node->start);
1835         btrfs_set_backup_csum_root_gen(root_backup,
1836                                btrfs_header_generation(info->csum_root->node));
1837         btrfs_set_backup_csum_root_level(root_backup,
1838                                btrfs_header_level(info->csum_root->node));
1839
1840         btrfs_set_backup_total_bytes(root_backup,
1841                              btrfs_super_total_bytes(info->super_copy));
1842         btrfs_set_backup_bytes_used(root_backup,
1843                              btrfs_super_bytes_used(info->super_copy));
1844         btrfs_set_backup_num_devices(root_backup,
1845                              btrfs_super_num_devices(info->super_copy));
1846
1847         /*
1848          * if we don't copy this out to the super_copy, it won't get remembered
1849          * for the next commit
1850          */
1851         memcpy(&info->super_copy->super_roots,
1852                &info->super_for_commit->super_roots,
1853                sizeof(*root_backup) * BTRFS_NUM_BACKUP_ROOTS);
1854 }
1855
1856 /*
1857  * this copies info out of the root backup array and back into
1858  * the in-memory super block.  It is meant to help iterate through
1859  * the array, so you send it the number of backups you've already
1860  * tried and the last backup index you used.
1861  *
1862  * this returns -1 when it has tried all the backups
1863  */
1864 static noinline int next_root_backup(struct btrfs_fs_info *info,
1865                                      struct btrfs_super_block *super,
1866                                      int *num_backups_tried, int *backup_index)
1867 {
1868         struct btrfs_root_backup *root_backup;
1869         int newest = *backup_index;
1870
1871         if (*num_backups_tried == 0) {
1872                 u64 gen = btrfs_super_generation(super);
1873
1874                 newest = find_newest_super_backup(info, gen);
1875                 if (newest == -1)
1876                         return -1;
1877
1878                 *backup_index = newest;
1879                 *num_backups_tried = 1;
1880         } else if (*num_backups_tried == BTRFS_NUM_BACKUP_ROOTS) {
1881                 /* we've tried all the backups, all done */
1882                 return -1;
1883         } else {
1884                 /* jump to the next oldest backup */
1885                 newest = (*backup_index + BTRFS_NUM_BACKUP_ROOTS - 1) %
1886                         BTRFS_NUM_BACKUP_ROOTS;
1887                 *backup_index = newest;
1888                 *num_backups_tried += 1;
1889         }
1890         root_backup = super->super_roots + newest;
1891
1892         btrfs_set_super_generation(super,
1893                                    btrfs_backup_tree_root_gen(root_backup));
1894         btrfs_set_super_root(super, btrfs_backup_tree_root(root_backup));
1895         btrfs_set_super_root_level(super,
1896                                    btrfs_backup_tree_root_level(root_backup));
1897         btrfs_set_super_bytes_used(super, btrfs_backup_bytes_used(root_backup));
1898
1899         /*
1900          * fixme: the total bytes and num_devices need to match or we should
1901          * need a fsck
1902          */
1903         btrfs_set_super_total_bytes(super, btrfs_backup_total_bytes(root_backup));
1904         btrfs_set_super_num_devices(super, btrfs_backup_num_devices(root_backup));
1905         return 0;
1906 }
1907
1908 /* helper to cleanup tree roots */
1909 static void free_root_pointers(struct btrfs_fs_info *info, int chunk_root)
1910 {
1911         free_extent_buffer(info->tree_root->node);
1912         free_extent_buffer(info->tree_root->commit_root);
1913         free_extent_buffer(info->dev_root->node);
1914         free_extent_buffer(info->dev_root->commit_root);
1915         free_extent_buffer(info->extent_root->node);
1916         free_extent_buffer(info->extent_root->commit_root);
1917         free_extent_buffer(info->csum_root->node);
1918         free_extent_buffer(info->csum_root->commit_root);
1919         if (info->quota_root) {
1920                 free_extent_buffer(info->quota_root->node);
1921                 free_extent_buffer(info->quota_root->commit_root);
1922         }
1923
1924         info->tree_root->node = NULL;
1925         info->tree_root->commit_root = NULL;
1926         info->dev_root->node = NULL;
1927         info->dev_root->commit_root = NULL;
1928         info->extent_root->node = NULL;
1929         info->extent_root->commit_root = NULL;
1930         info->csum_root->node = NULL;
1931         info->csum_root->commit_root = NULL;
1932         if (info->quota_root) {
1933                 info->quota_root->node = NULL;
1934                 info->quota_root->commit_root = NULL;
1935         }
1936
1937         if (chunk_root) {
1938                 free_extent_buffer(info->chunk_root->node);
1939                 free_extent_buffer(info->chunk_root->commit_root);
1940                 info->chunk_root->node = NULL;
1941                 info->chunk_root->commit_root = NULL;
1942         }
1943 }
1944
1945
1946 int open_ctree(struct super_block *sb,
1947                struct btrfs_fs_devices *fs_devices,
1948                char *options)
1949 {
1950         u32 sectorsize;
1951         u32 nodesize;
1952         u32 leafsize;
1953         u32 blocksize;
1954         u32 stripesize;
1955         u64 generation;
1956         u64 features;
1957         struct btrfs_key location;
1958         struct buffer_head *bh;
1959         struct btrfs_super_block *disk_super;
1960         struct btrfs_fs_info *fs_info = btrfs_sb(sb);
1961         struct btrfs_root *tree_root;
1962         struct btrfs_root *extent_root;
1963         struct btrfs_root *csum_root;
1964         struct btrfs_root *chunk_root;
1965         struct btrfs_root *dev_root;
1966         struct btrfs_root *quota_root;
1967         struct btrfs_root *log_tree_root;
1968         int ret;
1969         int err = -EINVAL;
1970         int num_backups_tried = 0;
1971         int backup_index = 0;
1972
1973         tree_root = fs_info->tree_root = btrfs_alloc_root(fs_info);
1974         extent_root = fs_info->extent_root = btrfs_alloc_root(fs_info);
1975         csum_root = fs_info->csum_root = btrfs_alloc_root(fs_info);
1976         chunk_root = fs_info->chunk_root = btrfs_alloc_root(fs_info);
1977         dev_root = fs_info->dev_root = btrfs_alloc_root(fs_info);
1978         quota_root = fs_info->quota_root = btrfs_alloc_root(fs_info);
1979
1980         if (!tree_root || !extent_root || !csum_root ||
1981             !chunk_root || !dev_root || !quota_root) {
1982                 err = -ENOMEM;
1983                 goto fail;
1984         }
1985
1986         ret = init_srcu_struct(&fs_info->subvol_srcu);
1987         if (ret) {
1988                 err = ret;
1989                 goto fail;
1990         }
1991
1992         ret = setup_bdi(fs_info, &fs_info->bdi);
1993         if (ret) {
1994                 err = ret;
1995                 goto fail_srcu;
1996         }
1997
1998         fs_info->btree_inode = new_inode(sb);
1999         if (!fs_info->btree_inode) {
2000                 err = -ENOMEM;
2001                 goto fail_bdi;
2002         }
2003
2004         mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS);
2005
2006         INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC);
2007         INIT_LIST_HEAD(&fs_info->trans_list);
2008         INIT_LIST_HEAD(&fs_info->dead_roots);
2009         INIT_LIST_HEAD(&fs_info->delayed_iputs);
2010         INIT_LIST_HEAD(&fs_info->delalloc_inodes);
2011         INIT_LIST_HEAD(&fs_info->ordered_operations);
2012         INIT_LIST_HEAD(&fs_info->caching_block_groups);
2013         spin_lock_init(&fs_info->delalloc_lock);
2014         spin_lock_init(&fs_info->trans_lock);
2015         spin_lock_init(&fs_info->fs_roots_radix_lock);
2016         spin_lock_init(&fs_info->delayed_iput_lock);
2017         spin_lock_init(&fs_info->defrag_inodes_lock);
2018         spin_lock_init(&fs_info->free_chunk_lock);
2019         spin_lock_init(&fs_info->tree_mod_seq_lock);
2020         rwlock_init(&fs_info->tree_mod_log_lock);
2021         mutex_init(&fs_info->reloc_mutex);
2022
2023         init_completion(&fs_info->kobj_unregister);
2024         INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
2025         INIT_LIST_HEAD(&fs_info->space_info);
2026         INIT_LIST_HEAD(&fs_info->tree_mod_seq_list);
2027         btrfs_mapping_init(&fs_info->mapping_tree);
2028         btrfs_init_block_rsv(&fs_info->global_block_rsv,
2029                              BTRFS_BLOCK_RSV_GLOBAL);
2030         btrfs_init_block_rsv(&fs_info->delalloc_block_rsv,
2031                              BTRFS_BLOCK_RSV_DELALLOC);
2032         btrfs_init_block_rsv(&fs_info->trans_block_rsv, BTRFS_BLOCK_RSV_TRANS);
2033         btrfs_init_block_rsv(&fs_info->chunk_block_rsv, BTRFS_BLOCK_RSV_CHUNK);
2034         btrfs_init_block_rsv(&fs_info->empty_block_rsv, BTRFS_BLOCK_RSV_EMPTY);
2035         btrfs_init_block_rsv(&fs_info->delayed_block_rsv,
2036                              BTRFS_BLOCK_RSV_DELOPS);
2037         atomic_set(&fs_info->nr_async_submits, 0);
2038         atomic_set(&fs_info->async_delalloc_pages, 0);
2039         atomic_set(&fs_info->async_submit_draining, 0);
2040         atomic_set(&fs_info->nr_async_bios, 0);
2041         atomic_set(&fs_info->defrag_running, 0);
2042         atomic_set(&fs_info->tree_mod_seq, 0);
2043         fs_info->sb = sb;
2044         fs_info->max_inline = 8192 * 1024;
2045         fs_info->metadata_ratio = 0;
2046         fs_info->defrag_inodes = RB_ROOT;
2047         fs_info->trans_no_join = 0;
2048         fs_info->free_chunk_space = 0;
2049         fs_info->tree_mod_log = RB_ROOT;
2050
2051         /* readahead state */
2052         INIT_RADIX_TREE(&fs_info->reada_tree, GFP_NOFS & ~__GFP_WAIT);
2053         spin_lock_init(&fs_info->reada_lock);
2054
2055         fs_info->thread_pool_size = min_t(unsigned long,
2056                                           num_online_cpus() + 2, 8);
2057
2058         INIT_LIST_HEAD(&fs_info->ordered_extents);
2059         spin_lock_init(&fs_info->ordered_extent_lock);
2060         fs_info->delayed_root = kmalloc(sizeof(struct btrfs_delayed_root),
2061                                         GFP_NOFS);
2062         if (!fs_info->delayed_root) {
2063                 err = -ENOMEM;
2064                 goto fail_iput;
2065         }
2066         btrfs_init_delayed_root(fs_info->delayed_root);
2067
2068         mutex_init(&fs_info->scrub_lock);
2069         atomic_set(&fs_info->scrubs_running, 0);
2070         atomic_set(&fs_info->scrub_pause_req, 0);
2071         atomic_set(&fs_info->scrubs_paused, 0);
2072         atomic_set(&fs_info->scrub_cancel_req, 0);
2073         init_waitqueue_head(&fs_info->scrub_pause_wait);
2074         init_rwsem(&fs_info->scrub_super_lock);
2075         fs_info->scrub_workers_refcnt = 0;
2076 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
2077         fs_info->check_integrity_print_mask = 0;
2078 #endif
2079
2080         spin_lock_init(&fs_info->balance_lock);
2081         mutex_init(&fs_info->balance_mutex);
2082         atomic_set(&fs_info->balance_running, 0);
2083         atomic_set(&fs_info->balance_pause_req, 0);
2084         atomic_set(&fs_info->balance_cancel_req, 0);
2085         fs_info->balance_ctl = NULL;
2086         init_waitqueue_head(&fs_info->balance_wait_q);
2087
2088         sb->s_blocksize = 4096;
2089         sb->s_blocksize_bits = blksize_bits(4096);
2090         sb->s_bdi = &fs_info->bdi;
2091
2092         fs_info->btree_inode->i_ino = BTRFS_BTREE_INODE_OBJECTID;
2093         set_nlink(fs_info->btree_inode, 1);
2094         /*
2095          * we set the i_size on the btree inode to the max possible int.
2096          * the real end of the address space is determined by all of
2097          * the devices in the system
2098          */
2099         fs_info->btree_inode->i_size = OFFSET_MAX;
2100         fs_info->btree_inode->i_mapping->a_ops = &btree_aops;
2101         fs_info->btree_inode->i_mapping->backing_dev_info = &fs_info->bdi;
2102
2103         RB_CLEAR_NODE(&BTRFS_I(fs_info->btree_inode)->rb_node);
2104         extent_io_tree_init(&BTRFS_I(fs_info->btree_inode)->io_tree,
2105                              fs_info->btree_inode->i_mapping);
2106         BTRFS_I(fs_info->btree_inode)->io_tree.track_uptodate = 0;
2107         extent_map_tree_init(&BTRFS_I(fs_info->btree_inode)->extent_tree);
2108
2109         BTRFS_I(fs_info->btree_inode)->io_tree.ops = &btree_extent_io_ops;
2110
2111         BTRFS_I(fs_info->btree_inode)->root = tree_root;
2112         memset(&BTRFS_I(fs_info->btree_inode)->location, 0,
2113                sizeof(struct btrfs_key));
2114         set_bit(BTRFS_INODE_DUMMY,
2115                 &BTRFS_I(fs_info->btree_inode)->runtime_flags);
2116         insert_inode_hash(fs_info->btree_inode);
2117
2118         spin_lock_init(&fs_info->block_group_cache_lock);
2119         fs_info->block_group_cache_tree = RB_ROOT;
2120
2121         extent_io_tree_init(&fs_info->freed_extents[0],
2122                              fs_info->btree_inode->i_mapping);
2123         extent_io_tree_init(&fs_info->freed_extents[1],
2124                              fs_info->btree_inode->i_mapping);
2125         fs_info->pinned_extents = &fs_info->freed_extents[0];
2126         fs_info->do_barriers = 1;
2127
2128
2129         mutex_init(&fs_info->ordered_operations_mutex);
2130         mutex_init(&fs_info->tree_log_mutex);
2131         mutex_init(&fs_info->chunk_mutex);
2132         mutex_init(&fs_info->transaction_kthread_mutex);
2133         mutex_init(&fs_info->cleaner_mutex);
2134         mutex_init(&fs_info->volume_mutex);
2135         init_rwsem(&fs_info->extent_commit_sem);
2136         init_rwsem(&fs_info->cleanup_work_sem);
2137         init_rwsem(&fs_info->subvol_sem);
2138
2139         spin_lock_init(&fs_info->qgroup_lock);
2140         fs_info->qgroup_tree = RB_ROOT;
2141         INIT_LIST_HEAD(&fs_info->dirty_qgroups);
2142         fs_info->qgroup_seq = 1;
2143         fs_info->quota_enabled = 0;
2144         fs_info->pending_quota_state = 0;
2145
2146         btrfs_init_free_cluster(&fs_info->meta_alloc_cluster);
2147         btrfs_init_free_cluster(&fs_info->data_alloc_cluster);
2148
2149         init_waitqueue_head(&fs_info->transaction_throttle);
2150         init_waitqueue_head(&fs_info->transaction_wait);
2151         init_waitqueue_head(&fs_info->transaction_blocked_wait);
2152         init_waitqueue_head(&fs_info->async_submit_wait);
2153
2154         __setup_root(4096, 4096, 4096, 4096, tree_root,
2155                      fs_info, BTRFS_ROOT_TREE_OBJECTID);
2156
2157         invalidate_bdev(fs_devices->latest_bdev);
2158         bh = btrfs_read_dev_super(fs_devices->latest_bdev);
2159         if (!bh) {
2160                 err = -EINVAL;
2161                 goto fail_alloc;
2162         }
2163
2164         memcpy(fs_info->super_copy, bh->b_data, sizeof(*fs_info->super_copy));
2165         memcpy(fs_info->super_for_commit, fs_info->super_copy,
2166                sizeof(*fs_info->super_for_commit));
2167         brelse(bh);
2168
2169         memcpy(fs_info->fsid, fs_info->super_copy->fsid, BTRFS_FSID_SIZE);
2170
2171         disk_super = fs_info->super_copy;
2172         if (!btrfs_super_root(disk_super))
2173                 goto fail_alloc;
2174
2175         /* check FS state, whether FS is broken. */
2176         fs_info->fs_state |= btrfs_super_flags(disk_super);
2177
2178         ret = btrfs_check_super_valid(fs_info, sb->s_flags & MS_RDONLY);
2179         if (ret) {
2180                 printk(KERN_ERR "btrfs: superblock contains fatal errors\n");
2181                 err = ret;
2182                 goto fail_alloc;
2183         }
2184
2185         /*
2186          * run through our array of backup supers and setup
2187          * our ring pointer to the oldest one
2188          */
2189         generation = btrfs_super_generation(disk_super);
2190         find_oldest_super_backup(fs_info, generation);
2191
2192         /*
2193          * In the long term, we'll store the compression type in the super
2194          * block, and it'll be used for per file compression control.
2195          */
2196         fs_info->compress_type = BTRFS_COMPRESS_ZLIB;
2197
2198         ret = btrfs_parse_options(tree_root, options);
2199         if (ret) {
2200                 err = ret;
2201                 goto fail_alloc;
2202         }
2203
2204         features = btrfs_super_incompat_flags(disk_super) &
2205                 ~BTRFS_FEATURE_INCOMPAT_SUPP;
2206         if (features) {
2207                 printk(KERN_ERR "BTRFS: couldn't mount because of "
2208                        "unsupported optional features (%Lx).\n",
2209                        (unsigned long long)features);
2210                 err = -EINVAL;
2211                 goto fail_alloc;
2212         }
2213
2214         if (btrfs_super_leafsize(disk_super) !=
2215             btrfs_super_nodesize(disk_super)) {
2216                 printk(KERN_ERR "BTRFS: couldn't mount because metadata "
2217                        "blocksizes don't match.  node %d leaf %d\n",
2218                        btrfs_super_nodesize(disk_super),
2219                        btrfs_super_leafsize(disk_super));
2220                 err = -EINVAL;
2221                 goto fail_alloc;
2222         }
2223         if (btrfs_super_leafsize(disk_super) > BTRFS_MAX_METADATA_BLOCKSIZE) {
2224                 printk(KERN_ERR "BTRFS: couldn't mount because metadata "
2225                        "blocksize (%d) was too large\n",
2226                        btrfs_super_leafsize(disk_super));
2227                 err = -EINVAL;
2228                 goto fail_alloc;
2229         }
2230
2231         features = btrfs_super_incompat_flags(disk_super);
2232         features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF;
2233         if (tree_root->fs_info->compress_type == BTRFS_COMPRESS_LZO)
2234                 features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO;
2235
2236         /*
2237          * flag our filesystem as having big metadata blocks if
2238          * they are bigger than the page size
2239          */
2240         if (btrfs_super_leafsize(disk_super) > PAGE_CACHE_SIZE) {
2241                 if (!(features & BTRFS_FEATURE_INCOMPAT_BIG_METADATA))
2242                         printk(KERN_INFO "btrfs flagging fs with big metadata feature\n");
2243                 features |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA;
2244         }
2245
2246         nodesize = btrfs_super_nodesize(disk_super);
2247         leafsize = btrfs_super_leafsize(disk_super);
2248         sectorsize = btrfs_super_sectorsize(disk_super);
2249         stripesize = btrfs_super_stripesize(disk_super);
2250
2251         /*
2252          * mixed block groups end up with duplicate but slightly offset
2253          * extent buffers for the same range.  It leads to corruptions
2254          */
2255         if ((features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) &&
2256             (sectorsize != leafsize)) {
2257                 printk(KERN_WARNING "btrfs: unequal leaf/node/sector sizes "
2258                                 "are not allowed for mixed block groups on %s\n",
2259                                 sb->s_id);
2260                 goto fail_alloc;
2261         }
2262
2263         btrfs_set_super_incompat_flags(disk_super, features);
2264
2265         features = btrfs_super_compat_ro_flags(disk_super) &
2266                 ~BTRFS_FEATURE_COMPAT_RO_SUPP;
2267         if (!(sb->s_flags & MS_RDONLY) && features) {
2268                 printk(KERN_ERR "BTRFS: couldn't mount RDWR because of "
2269                        "unsupported option features (%Lx).\n",
2270                        (unsigned long long)features);
2271                 err = -EINVAL;
2272                 goto fail_alloc;
2273         }
2274
2275         btrfs_init_workers(&fs_info->generic_worker,
2276                            "genwork", 1, NULL);
2277
2278         btrfs_init_workers(&fs_info->workers, "worker",
2279                            fs_info->thread_pool_size,
2280                            &fs_info->generic_worker);
2281
2282         btrfs_init_workers(&fs_info->delalloc_workers, "delalloc",
2283                            fs_info->thread_pool_size,
2284                            &fs_info->generic_worker);
2285
2286         btrfs_init_workers(&fs_info->submit_workers, "submit",
2287                            min_t(u64, fs_devices->num_devices,
2288                            fs_info->thread_pool_size),
2289                            &fs_info->generic_worker);
2290
2291         btrfs_init_workers(&fs_info->caching_workers, "cache",
2292                            2, &fs_info->generic_worker);
2293
2294         /* a higher idle thresh on the submit workers makes it much more
2295          * likely that bios will be send down in a sane order to the
2296          * devices
2297          */
2298         fs_info->submit_workers.idle_thresh = 64;
2299
2300         fs_info->workers.idle_thresh = 16;
2301         fs_info->workers.ordered = 1;
2302
2303         fs_info->delalloc_workers.idle_thresh = 2;
2304         fs_info->delalloc_workers.ordered = 1;
2305
2306         btrfs_init_workers(&fs_info->fixup_workers, "fixup", 1,
2307                            &fs_info->generic_worker);
2308         btrfs_init_workers(&fs_info->endio_workers, "endio",
2309                            fs_info->thread_pool_size,
2310                            &fs_info->generic_worker);
2311         btrfs_init_workers(&fs_info->endio_meta_workers, "endio-meta",
2312                            fs_info->thread_pool_size,
2313                            &fs_info->generic_worker);
2314         btrfs_init_workers(&fs_info->endio_meta_write_workers,
2315                            "endio-meta-write", fs_info->thread_pool_size,
2316                            &fs_info->generic_worker);
2317         btrfs_init_workers(&fs_info->endio_write_workers, "endio-write",
2318                            fs_info->thread_pool_size,
2319                            &fs_info->generic_worker);
2320         btrfs_init_workers(&fs_info->endio_freespace_worker, "freespace-write",
2321                            1, &fs_info->generic_worker);
2322         btrfs_init_workers(&fs_info->delayed_workers, "delayed-meta",
2323                            fs_info->thread_pool_size,
2324                            &fs_info->generic_worker);
2325         btrfs_init_workers(&fs_info->readahead_workers, "readahead",
2326                            fs_info->thread_pool_size,
2327                            &fs_info->generic_worker);
2328
2329         /*
2330          * endios are largely parallel and should have a very
2331          * low idle thresh
2332          */
2333         fs_info->endio_workers.idle_thresh = 4;
2334         fs_info->endio_meta_workers.idle_thresh = 4;
2335
2336         fs_info->endio_write_workers.idle_thresh = 2;
2337         fs_info->endio_meta_write_workers.idle_thresh = 2;
2338         fs_info->readahead_workers.idle_thresh = 2;
2339
2340         /*
2341          * btrfs_start_workers can really only fail because of ENOMEM so just
2342          * return -ENOMEM if any of these fail.
2343          */
2344         ret = btrfs_start_workers(&fs_info->workers);
2345         ret |= btrfs_start_workers(&fs_info->generic_worker);
2346         ret |= btrfs_start_workers(&fs_info->submit_workers);
2347         ret |= btrfs_start_workers(&fs_info->delalloc_workers);
2348         ret |= btrfs_start_workers(&fs_info->fixup_workers);
2349         ret |= btrfs_start_workers(&fs_info->endio_workers);
2350         ret |= btrfs_start_workers(&fs_info->endio_meta_workers);
2351         ret |= btrfs_start_workers(&fs_info->endio_meta_write_workers);
2352         ret |= btrfs_start_workers(&fs_info->endio_write_workers);
2353         ret |= btrfs_start_workers(&fs_info->endio_freespace_worker);
2354         ret |= btrfs_start_workers(&fs_info->delayed_workers);
2355         ret |= btrfs_start_workers(&fs_info->caching_workers);
2356         ret |= btrfs_start_workers(&fs_info->readahead_workers);
2357         if (ret) {
2358                 err = -ENOMEM;
2359                 goto fail_sb_buffer;
2360         }
2361
2362         fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super);
2363         fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages,
2364                                     4 * 1024 * 1024 / PAGE_CACHE_SIZE);
2365
2366         tree_root->nodesize = nodesize;
2367         tree_root->leafsize = leafsize;
2368         tree_root->sectorsize = sectorsize;
2369         tree_root->stripesize = stripesize;
2370
2371         sb->s_blocksize = sectorsize;
2372         sb->s_blocksize_bits = blksize_bits(sectorsize);
2373
2374         if (strncmp((char *)(&disk_super->magic), BTRFS_MAGIC,
2375                     sizeof(disk_super->magic))) {
2376                 printk(KERN_INFO "btrfs: valid FS not found on %s\n", sb->s_id);
2377                 goto fail_sb_buffer;
2378         }
2379
2380         if (sectorsize != PAGE_SIZE) {
2381                 printk(KERN_WARNING "btrfs: Incompatible sector size(%lu) "
2382                        "found on %s\n", (unsigned long)sectorsize, sb->s_id);
2383                 goto fail_sb_buffer;
2384         }
2385
2386         mutex_lock(&fs_info->chunk_mutex);
2387         ret = btrfs_read_sys_array(tree_root);
2388         mutex_unlock(&fs_info->chunk_mutex);
2389         if (ret) {
2390                 printk(KERN_WARNING "btrfs: failed to read the system "
2391                        "array on %s\n", sb->s_id);
2392                 goto fail_sb_buffer;
2393         }
2394
2395         blocksize = btrfs_level_size(tree_root,
2396                                      btrfs_super_chunk_root_level(disk_super));
2397         generation = btrfs_super_chunk_root_generation(disk_super);
2398
2399         __setup_root(nodesize, leafsize, sectorsize, stripesize,
2400                      chunk_root, fs_info, BTRFS_CHUNK_TREE_OBJECTID);
2401
2402         chunk_root->node = read_tree_block(chunk_root,
2403                                            btrfs_super_chunk_root(disk_super),
2404                                            blocksize, generation);
2405         BUG_ON(!chunk_root->node); /* -ENOMEM */
2406         if (!test_bit(EXTENT_BUFFER_UPTODATE, &chunk_root->node->bflags)) {
2407                 printk(KERN_WARNING "btrfs: failed to read chunk root on %s\n",
2408                        sb->s_id);
2409                 goto fail_tree_roots;
2410         }
2411         btrfs_set_root_node(&chunk_root->root_item, chunk_root->node);
2412         chunk_root->commit_root = btrfs_root_node(chunk_root);
2413
2414         read_extent_buffer(chunk_root->node, fs_info->chunk_tree_uuid,
2415            (unsigned long)btrfs_header_chunk_tree_uuid(chunk_root->node),
2416            BTRFS_UUID_SIZE);
2417
2418         ret = btrfs_read_chunk_tree(chunk_root);
2419         if (ret) {
2420                 printk(KERN_WARNING "btrfs: failed to read chunk tree on %s\n",
2421                        sb->s_id);
2422                 goto fail_tree_roots;
2423         }
2424
2425         btrfs_close_extra_devices(fs_devices);
2426
2427         if (!fs_devices->latest_bdev) {
2428                 printk(KERN_CRIT "btrfs: failed to read devices on %s\n",
2429                        sb->s_id);
2430                 goto fail_tree_roots;
2431         }
2432
2433 retry_root_backup:
2434         blocksize = btrfs_level_size(tree_root,
2435                                      btrfs_super_root_level(disk_super));
2436         generation = btrfs_super_generation(disk_super);
2437
2438         tree_root->node = read_tree_block(tree_root,
2439                                           btrfs_super_root(disk_super),
2440                                           blocksize, generation);
2441         if (!tree_root->node ||
2442             !test_bit(EXTENT_BUFFER_UPTODATE, &tree_root->node->bflags)) {
2443                 printk(KERN_WARNING "btrfs: failed to read tree root on %s\n",
2444                        sb->s_id);
2445
2446                 goto recovery_tree_root;
2447         }
2448
2449         btrfs_set_root_node(&tree_root->root_item, tree_root->node);
2450         tree_root->commit_root = btrfs_root_node(tree_root);
2451
2452         ret = find_and_setup_root(tree_root, fs_info,
2453                                   BTRFS_EXTENT_TREE_OBJECTID, extent_root);
2454         if (ret)
2455                 goto recovery_tree_root;
2456         extent_root->track_dirty = 1;
2457
2458         ret = find_and_setup_root(tree_root, fs_info,
2459                                   BTRFS_DEV_TREE_OBJECTID, dev_root);
2460         if (ret)
2461                 goto recovery_tree_root;
2462         dev_root->track_dirty = 1;
2463
2464         ret = find_and_setup_root(tree_root, fs_info,
2465                                   BTRFS_CSUM_TREE_OBJECTID, csum_root);
2466         if (ret)
2467                 goto recovery_tree_root;
2468         csum_root->track_dirty = 1;
2469
2470         ret = find_and_setup_root(tree_root, fs_info,
2471                                   BTRFS_QUOTA_TREE_OBJECTID, quota_root);
2472         if (ret) {
2473                 kfree(quota_root);
2474                 quota_root = fs_info->quota_root = NULL;
2475         } else {
2476                 quota_root->track_dirty = 1;
2477                 fs_info->quota_enabled = 1;
2478                 fs_info->pending_quota_state = 1;
2479         }
2480
2481         fs_info->generation = generation;
2482         fs_info->last_trans_committed = generation;
2483
2484         ret = btrfs_recover_balance(fs_info);
2485         if (ret) {
2486                 printk(KERN_WARNING "btrfs: failed to recover balance\n");
2487                 goto fail_block_groups;
2488         }
2489
2490         ret = btrfs_init_dev_stats(fs_info);
2491         if (ret) {
2492                 printk(KERN_ERR "btrfs: failed to init dev_stats: %d\n",
2493                        ret);
2494                 goto fail_block_groups;
2495         }
2496
2497         ret = btrfs_init_space_info(fs_info);
2498         if (ret) {
2499                 printk(KERN_ERR "Failed to initial space info: %d\n", ret);
2500                 goto fail_block_groups;
2501         }
2502
2503         ret = btrfs_read_block_groups(extent_root);
2504         if (ret) {
2505                 printk(KERN_ERR "Failed to read block groups: %d\n", ret);
2506                 goto fail_block_groups;
2507         }
2508
2509         fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root,
2510                                                "btrfs-cleaner");
2511         if (IS_ERR(fs_info->cleaner_kthread))
2512                 goto fail_block_groups;
2513
2514         fs_info->transaction_kthread = kthread_run(transaction_kthread,
2515                                                    tree_root,
2516                                                    "btrfs-transaction");
2517         if (IS_ERR(fs_info->transaction_kthread))
2518                 goto fail_cleaner;
2519
2520         if (!btrfs_test_opt(tree_root, SSD) &&
2521             !btrfs_test_opt(tree_root, NOSSD) &&
2522             !fs_info->fs_devices->rotating) {
2523                 printk(KERN_INFO "Btrfs detected SSD devices, enabling SSD "
2524                        "mode\n");
2525                 btrfs_set_opt(fs_info->mount_opt, SSD);
2526         }
2527
2528 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
2529         if (btrfs_test_opt(tree_root, CHECK_INTEGRITY)) {
2530                 ret = btrfsic_mount(tree_root, fs_devices,
2531                                     btrfs_test_opt(tree_root,
2532                                         CHECK_INTEGRITY_INCLUDING_EXTENT_DATA) ?
2533                                     1 : 0,
2534                                     fs_info->check_integrity_print_mask);
2535                 if (ret)
2536                         printk(KERN_WARNING "btrfs: failed to initialize"
2537                                " integrity check module %s\n", sb->s_id);
2538         }
2539 #endif
2540         ret = btrfs_read_qgroup_config(fs_info);
2541         if (ret)
2542                 goto fail_trans_kthread;
2543
2544         /* do not make disk changes in broken FS */
2545         if (btrfs_super_log_root(disk_super) != 0) {
2546                 u64 bytenr = btrfs_super_log_root(disk_super);
2547
2548                 if (fs_devices->rw_devices == 0) {
2549                         printk(KERN_WARNING "Btrfs log replay required "
2550                                "on RO media\n");
2551                         err = -EIO;
2552                         goto fail_qgroup;
2553                 }
2554                 blocksize =
2555                      btrfs_level_size(tree_root,
2556                                       btrfs_super_log_root_level(disk_super));
2557
2558                 log_tree_root = btrfs_alloc_root(fs_info);
2559                 if (!log_tree_root) {
2560                         err = -ENOMEM;
2561                         goto fail_qgroup;
2562                 }
2563
2564                 __setup_root(nodesize, leafsize, sectorsize, stripesize,
2565                              log_tree_root, fs_info, BTRFS_TREE_LOG_OBJECTID);
2566
2567                 log_tree_root->node = read_tree_block(tree_root, bytenr,
2568                                                       blocksize,
2569                                                       generation + 1);
2570                 /* returns with log_tree_root freed on success */
2571                 ret = btrfs_recover_log_trees(log_tree_root);
2572                 if (ret) {
2573                         btrfs_error(tree_root->fs_info, ret,
2574                                     "Failed to recover log tree");
2575                         free_extent_buffer(log_tree_root->node);
2576                         kfree(log_tree_root);
2577                         goto fail_trans_kthread;
2578                 }
2579
2580                 if (sb->s_flags & MS_RDONLY) {
2581                         ret = btrfs_commit_super(tree_root);
2582                         if (ret)
2583                                 goto fail_trans_kthread;
2584                 }
2585         }
2586
2587         ret = btrfs_find_orphan_roots(tree_root);
2588         if (ret)
2589                 goto fail_trans_kthread;
2590
2591         if (!(sb->s_flags & MS_RDONLY)) {
2592                 ret = btrfs_cleanup_fs_roots(fs_info);
2593                 if (ret)
2594                         goto fail_trans_kthread;
2595
2596                 ret = btrfs_recover_relocation(tree_root);
2597                 if (ret < 0) {
2598                         printk(KERN_WARNING
2599                                "btrfs: failed to recover relocation\n");
2600                         err = -EINVAL;
2601                         goto fail_qgroup;
2602                 }
2603         }
2604
2605         location.objectid = BTRFS_FS_TREE_OBJECTID;
2606         location.type = BTRFS_ROOT_ITEM_KEY;
2607         location.offset = (u64)-1;
2608
2609         fs_info->fs_root = btrfs_read_fs_root_no_name(fs_info, &location);
2610         if (!fs_info->fs_root)
2611                 goto fail_qgroup;
2612         if (IS_ERR(fs_info->fs_root)) {
2613                 err = PTR_ERR(fs_info->fs_root);
2614                 goto fail_qgroup;
2615         }
2616
2617         if (sb->s_flags & MS_RDONLY)
2618                 return 0;
2619
2620         down_read(&fs_info->cleanup_work_sem);
2621         if ((ret = btrfs_orphan_cleanup(fs_info->fs_root)) ||
2622             (ret = btrfs_orphan_cleanup(fs_info->tree_root))) {
2623                 up_read(&fs_info->cleanup_work_sem);
2624                 close_ctree(tree_root);
2625                 return ret;
2626         }
2627         up_read(&fs_info->cleanup_work_sem);
2628
2629         ret = btrfs_resume_balance_async(fs_info);
2630         if (ret) {
2631                 printk(KERN_WARNING "btrfs: failed to resume balance\n");
2632                 close_ctree(tree_root);
2633                 return ret;
2634         }
2635
2636         return 0;
2637
2638 fail_qgroup:
2639         btrfs_free_qgroup_config(fs_info);
2640 fail_trans_kthread:
2641         kthread_stop(fs_info->transaction_kthread);
2642 fail_cleaner:
2643         kthread_stop(fs_info->cleaner_kthread);
2644
2645         /*
2646          * make sure we're done with the btree inode before we stop our
2647          * kthreads
2648          */
2649         filemap_write_and_wait(fs_info->btree_inode->i_mapping);
2650         invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
2651
2652 fail_block_groups:
2653         btrfs_free_block_groups(fs_info);
2654
2655 fail_tree_roots:
2656         free_root_pointers(fs_info, 1);
2657
2658 fail_sb_buffer:
2659         btrfs_stop_workers(&fs_info->generic_worker);
2660         btrfs_stop_workers(&fs_info->readahead_workers);
2661         btrfs_stop_workers(&fs_info->fixup_workers);
2662         btrfs_stop_workers(&fs_info->delalloc_workers);
2663         btrfs_stop_workers(&fs_info->workers);
2664         btrfs_stop_workers(&fs_info->endio_workers);
2665         btrfs_stop_workers(&fs_info->endio_meta_workers);
2666         btrfs_stop_workers(&fs_info->endio_meta_write_workers);
2667         btrfs_stop_workers(&fs_info->endio_write_workers);
2668         btrfs_stop_workers(&fs_info->endio_freespace_worker);
2669         btrfs_stop_workers(&fs_info->submit_workers);
2670         btrfs_stop_workers(&fs_info->delayed_workers);
2671         btrfs_stop_workers(&fs_info->caching_workers);
2672 fail_alloc:
2673 fail_iput:
2674         btrfs_mapping_tree_free(&fs_info->mapping_tree);
2675
2676         invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
2677         iput(fs_info->btree_inode);
2678 fail_bdi:
2679         bdi_destroy(&fs_info->bdi);
2680 fail_srcu:
2681         cleanup_srcu_struct(&fs_info->subvol_srcu);
2682 fail:
2683         btrfs_close_devices(fs_info->fs_devices);
2684         return err;
2685
2686 recovery_tree_root:
2687         if (!btrfs_test_opt(tree_root, RECOVERY))
2688                 goto fail_tree_roots;
2689
2690         free_root_pointers(fs_info, 0);
2691
2692         /* don't use the log in recovery mode, it won't be valid */
2693         btrfs_set_super_log_root(disk_super, 0);
2694
2695         /* we can't trust the free space cache either */
2696         btrfs_set_opt(fs_info->mount_opt, CLEAR_CACHE);
2697
2698         ret = next_root_backup(fs_info, fs_info->super_copy,
2699                                &num_backups_tried, &backup_index);
2700         if (ret == -1)
2701                 goto fail_block_groups;
2702         goto retry_root_backup;
2703 }
2704
2705 static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate)
2706 {
2707         if (uptodate) {
2708                 set_buffer_uptodate(bh);
2709         } else {
2710                 struct btrfs_device *device = (struct btrfs_device *)
2711                         bh->b_private;
2712
2713                 printk_ratelimited_in_rcu(KERN_WARNING "lost page write due to "
2714                                           "I/O error on %s\n",
2715                                           rcu_str_deref(device->name));
2716                 /* note, we dont' set_buffer_write_io_error because we have
2717                  * our own ways of dealing with the IO errors
2718                  */
2719                 clear_buffer_uptodate(bh);
2720                 btrfs_dev_stat_inc_and_print(device, BTRFS_DEV_STAT_WRITE_ERRS);
2721         }
2722         unlock_buffer(bh);
2723         put_bh(bh);
2724 }
2725
2726 struct buffer_head *btrfs_read_dev_super(struct block_device *bdev)
2727 {
2728         struct buffer_head *bh;
2729         struct buffer_head *latest = NULL;
2730         struct btrfs_super_block *super;
2731         int i;
2732         u64 transid = 0;
2733         u64 bytenr;
2734
2735         /* we would like to check all the supers, but that would make
2736          * a btrfs mount succeed after a mkfs from a different FS.
2737          * So, we need to add a special mount option to scan for
2738          * later supers, using BTRFS_SUPER_MIRROR_MAX instead
2739          */
2740         for (i = 0; i < 1; i++) {
2741                 bytenr = btrfs_sb_offset(i);
2742                 if (bytenr + 4096 >= i_size_read(bdev->bd_inode))
2743                         break;
2744                 bh = __bread(bdev, bytenr / 4096, 4096);
2745                 if (!bh)
2746                         continue;
2747
2748                 super = (struct btrfs_super_block *)bh->b_data;
2749                 if (btrfs_super_bytenr(super) != bytenr ||
2750                     strncmp((char *)(&super->magic), BTRFS_MAGIC,
2751                             sizeof(super->magic))) {
2752                         brelse(bh);
2753                         continue;
2754                 }
2755
2756                 if (!latest || btrfs_super_generation(super) > transid) {
2757                         brelse(latest);
2758                         latest = bh;
2759                         transid = btrfs_super_generation(super);
2760                 } else {
2761                         brelse(bh);
2762                 }
2763         }
2764         return latest;
2765 }
2766
2767 /*
2768  * this should be called twice, once with wait == 0 and
2769  * once with wait == 1.  When wait == 0 is done, all the buffer heads
2770  * we write are pinned.
2771  *
2772  * They are released when wait == 1 is done.
2773  * max_mirrors must be the same for both runs, and it indicates how
2774  * many supers on this one device should be written.
2775  *
2776  * max_mirrors == 0 means to write them all.
2777  */
2778 static int write_dev_supers(struct btrfs_device *device,
2779                             struct btrfs_super_block *sb,
2780                             int do_barriers, int wait, int max_mirrors)
2781 {
2782         struct buffer_head *bh;
2783         int i;
2784         int ret;
2785         int errors = 0;
2786         u32 crc;
2787         u64 bytenr;
2788
2789         if (max_mirrors == 0)
2790                 max_mirrors = BTRFS_SUPER_MIRROR_MAX;
2791
2792         for (i = 0; i < max_mirrors; i++) {
2793                 bytenr = btrfs_sb_offset(i);
2794                 if (bytenr + BTRFS_SUPER_INFO_SIZE >= device->total_bytes)
2795                         break;
2796
2797                 if (wait) {
2798                         bh = __find_get_block(device->bdev, bytenr / 4096,
2799                                               BTRFS_SUPER_INFO_SIZE);
2800                         BUG_ON(!bh);
2801                         wait_on_buffer(bh);
2802                         if (!buffer_uptodate(bh))
2803                                 errors++;
2804
2805                         /* drop our reference */
2806                         brelse(bh);
2807
2808                         /* drop the reference from the wait == 0 run */
2809                         brelse(bh);
2810                         continue;
2811                 } else {
2812                         btrfs_set_super_bytenr(sb, bytenr);
2813
2814                         crc = ~(u32)0;
2815                         crc = btrfs_csum_data(NULL, (char *)sb +
2816                                               BTRFS_CSUM_SIZE, crc,
2817                                               BTRFS_SUPER_INFO_SIZE -
2818                                               BTRFS_CSUM_SIZE);
2819                         btrfs_csum_final(crc, sb->csum);
2820
2821                         /*
2822                          * one reference for us, and we leave it for the
2823                          * caller
2824                          */
2825                         bh = __getblk(device->bdev, bytenr / 4096,
2826                                       BTRFS_SUPER_INFO_SIZE);
2827                         memcpy(bh->b_data, sb, BTRFS_SUPER_INFO_SIZE);
2828
2829                         /* one reference for submit_bh */
2830                         get_bh(bh);
2831
2832                         set_buffer_uptodate(bh);
2833                         lock_buffer(bh);
2834                         bh->b_end_io = btrfs_end_buffer_write_sync;
2835                         bh->b_private = device;
2836                 }
2837
2838                 /*
2839                  * we fua the first super.  The others we allow
2840                  * to go down lazy.
2841                  */
2842                 ret = btrfsic_submit_bh(WRITE_FUA, bh);
2843                 if (ret)
2844                         errors++;
2845         }
2846         return errors < i ? 0 : -1;
2847 }
2848
2849 /*
2850  * endio for the write_dev_flush, this will wake anyone waiting
2851  * for the barrier when it is done
2852  */
2853 static void btrfs_end_empty_barrier(struct bio *bio, int err)
2854 {
2855         if (err) {
2856                 if (err == -EOPNOTSUPP)
2857                         set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
2858                 clear_bit(BIO_UPTODATE, &bio->bi_flags);
2859         }
2860         if (bio->bi_private)
2861                 complete(bio->bi_private);
2862         bio_put(bio);
2863 }
2864
2865 /*
2866  * trigger flushes for one the devices.  If you pass wait == 0, the flushes are
2867  * sent down.  With wait == 1, it waits for the previous flush.
2868  *
2869  * any device where the flush fails with eopnotsupp are flagged as not-barrier
2870  * capable
2871  */
2872 static int write_dev_flush(struct btrfs_device *device, int wait)
2873 {
2874         struct bio *bio;
2875         int ret = 0;
2876
2877         if (device->nobarriers)
2878                 return 0;
2879
2880         if (wait) {
2881                 bio = device->flush_bio;
2882                 if (!bio)
2883                         return 0;
2884
2885                 wait_for_completion(&device->flush_wait);
2886
2887                 if (bio_flagged(bio, BIO_EOPNOTSUPP)) {
2888                         printk_in_rcu("btrfs: disabling barriers on dev %s\n",
2889                                       rcu_str_deref(device->name));
2890                         device->nobarriers = 1;
2891                 }
2892                 if (!bio_flagged(bio, BIO_UPTODATE)) {
2893                         ret = -EIO;
2894                         if (!bio_flagged(bio, BIO_EOPNOTSUPP))
2895                                 btrfs_dev_stat_inc_and_print(device,
2896                                         BTRFS_DEV_STAT_FLUSH_ERRS);
2897                 }
2898
2899                 /* drop the reference from the wait == 0 run */
2900                 bio_put(bio);
2901                 device->flush_bio = NULL;
2902
2903                 return ret;
2904         }
2905
2906         /*
2907          * one reference for us, and we leave it for the
2908          * caller
2909          */
2910         device->flush_bio = NULL;
2911         bio = bio_alloc(GFP_NOFS, 0);
2912         if (!bio)
2913                 return -ENOMEM;
2914
2915         bio->bi_end_io = btrfs_end_empty_barrier;
2916         bio->bi_bdev = device->bdev;
2917         init_completion(&device->flush_wait);
2918         bio->bi_private = &device->flush_wait;
2919         device->flush_bio = bio;
2920
2921         bio_get(bio);
2922         btrfsic_submit_bio(WRITE_FLUSH, bio);
2923
2924         return 0;
2925 }
2926
2927 /*
2928  * send an empty flush down to each device in parallel,
2929  * then wait for them
2930  */
2931 static int barrier_all_devices(struct btrfs_fs_info *info)
2932 {
2933         struct list_head *head;
2934         struct btrfs_device *dev;
2935         int errors = 0;
2936         int ret;
2937
2938         /* send down all the barriers */
2939         head = &info->fs_devices->devices;
2940         list_for_each_entry_rcu(dev, head, dev_list) {
2941                 if (!dev->bdev) {
2942                         errors++;
2943                         continue;
2944                 }
2945                 if (!dev->in_fs_metadata || !dev->writeable)
2946                         continue;
2947
2948                 ret = write_dev_flush(dev, 0);
2949                 if (ret)
2950                         errors++;
2951         }
2952
2953         /* wait for all the barriers */
2954         list_for_each_entry_rcu(dev, head, dev_list) {
2955                 if (!dev->bdev) {
2956                         errors++;
2957                         continue;
2958                 }
2959                 if (!dev->in_fs_metadata || !dev->writeable)
2960                         continue;
2961
2962                 ret = write_dev_flush(dev, 1);
2963                 if (ret)
2964                         errors++;
2965         }
2966         if (errors)
2967                 return -EIO;
2968         return 0;
2969 }
2970
2971 int write_all_supers(struct btrfs_root *root, int max_mirrors)
2972 {
2973         struct list_head *head;
2974         struct btrfs_device *dev;
2975         struct btrfs_super_block *sb;
2976         struct btrfs_dev_item *dev_item;
2977         int ret;
2978         int do_barriers;
2979         int max_errors;
2980         int total_errors = 0;
2981         u64 flags;
2982
2983         max_errors = btrfs_super_num_devices(root->fs_info->super_copy) - 1;
2984         do_barriers = !btrfs_test_opt(root, NOBARRIER);
2985         backup_super_roots(root->fs_info);
2986
2987         sb = root->fs_info->super_for_commit;
2988         dev_item = &sb->dev_item;
2989
2990         mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2991         head = &root->fs_info->fs_devices->devices;
2992
2993         if (do_barriers)
2994                 barrier_all_devices(root->fs_info);
2995
2996         list_for_each_entry_rcu(dev, head, dev_list) {
2997                 if (!dev->bdev) {
2998                         total_errors++;
2999                         continue;
3000                 }
3001                 if (!dev->in_fs_metadata || !dev->writeable)
3002                         continue;
3003
3004                 btrfs_set_stack_device_generation(dev_item, 0);
3005                 btrfs_set_stack_device_type(dev_item, dev->type);
3006                 btrfs_set_stack_device_id(dev_item, dev->devid);
3007                 btrfs_set_stack_device_total_bytes(dev_item, dev->total_bytes);
3008                 btrfs_set_stack_device_bytes_used(dev_item, dev->bytes_used);
3009                 btrfs_set_stack_device_io_align(dev_item, dev->io_align);
3010                 btrfs_set_stack_device_io_width(dev_item, dev->io_width);
3011                 btrfs_set_stack_device_sector_size(dev_item, dev->sector_size);
3012                 memcpy(dev_item->uuid, dev->uuid, BTRFS_UUID_SIZE);
3013                 memcpy(dev_item->fsid, dev->fs_devices->fsid, BTRFS_UUID_SIZE);
3014
3015                 flags = btrfs_super_flags(sb);
3016                 btrfs_set_super_flags(sb, flags | BTRFS_HEADER_FLAG_WRITTEN);
3017
3018                 ret = write_dev_supers(dev, sb, do_barriers, 0, max_mirrors);
3019                 if (ret)
3020                         total_errors++;
3021         }
3022         if (total_errors > max_errors) {
3023                 printk(KERN_ERR "btrfs: %d errors while writing supers\n",
3024                        total_errors);
3025
3026                 /* This shouldn't happen. FUA is masked off if unsupported */
3027                 BUG();
3028         }
3029
3030         total_errors = 0;
3031         list_for_each_entry_rcu(dev, head, dev_list) {
3032                 if (!dev->bdev)
3033                         continue;
3034                 if (!dev->in_fs_metadata || !dev->writeable)
3035                         continue;
3036
3037                 ret = write_dev_supers(dev, sb, do_barriers, 1, max_mirrors);
3038                 if (ret)
3039                         total_errors++;
3040         }
3041         mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
3042         if (total_errors > max_errors) {
3043                 btrfs_error(root->fs_info, -EIO,
3044                             "%d errors while writing supers", total_errors);
3045                 return -EIO;
3046         }
3047         return 0;
3048 }
3049
3050 int write_ctree_super(struct btrfs_trans_handle *trans,
3051                       struct btrfs_root *root, int max_mirrors)
3052 {
3053         int ret;
3054
3055         ret = write_all_supers(root, max_mirrors);
3056         return ret;
3057 }
3058
3059 void btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
3060 {
3061         spin_lock(&fs_info->fs_roots_radix_lock);
3062         radix_tree_delete(&fs_info->fs_roots_radix,
3063                           (unsigned long)root->root_key.objectid);
3064         spin_unlock(&fs_info->fs_roots_radix_lock);
3065
3066         if (btrfs_root_refs(&root->root_item) == 0)
3067                 synchronize_srcu(&fs_info->subvol_srcu);
3068
3069         __btrfs_remove_free_space_cache(root->free_ino_pinned);
3070         __btrfs_remove_free_space_cache(root->free_ino_ctl);
3071         free_fs_root(root);
3072 }
3073
3074 static void free_fs_root(struct btrfs_root *root)
3075 {
3076         iput(root->cache_inode);
3077         WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree));
3078         if (root->anon_dev)
3079                 free_anon_bdev(root->anon_dev);
3080         free_extent_buffer(root->node);
3081         free_extent_buffer(root->commit_root);
3082         kfree(root->free_ino_ctl);
3083         kfree(root->free_ino_pinned);
3084         kfree(root->name);
3085         kfree(root);
3086 }
3087
3088 static void del_fs_roots(struct btrfs_fs_info *fs_info)
3089 {
3090         int ret;
3091         struct btrfs_root *gang[8];
3092         int i;
3093
3094         while (!list_empty(&fs_info->dead_roots)) {
3095                 gang[0] = list_entry(fs_info->dead_roots.next,
3096                                      struct btrfs_root, root_list);
3097                 list_del(&gang[0]->root_list);
3098
3099                 if (gang[0]->in_radix) {
3100                         btrfs_free_fs_root(fs_info, gang[0]);
3101                 } else {
3102                         free_extent_buffer(gang[0]->node);
3103                         free_extent_buffer(gang[0]->commit_root);
3104                         kfree(gang[0]);
3105                 }
3106         }
3107
3108         while (1) {
3109                 ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
3110                                              (void **)gang, 0,
3111                                              ARRAY_SIZE(gang));
3112                 if (!ret)
3113                         break;
3114                 for (i = 0; i < ret; i++)
3115                         btrfs_free_fs_root(fs_info, gang[i]);
3116         }
3117 }
3118
3119 int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info)
3120 {
3121         u64 root_objectid = 0;
3122         struct btrfs_root *gang[8];
3123         int i;
3124         int ret;
3125
3126         while (1) {
3127                 ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
3128                                              (void **)gang, root_objectid,
3129                                              ARRAY_SIZE(gang));
3130                 if (!ret)
3131                         break;
3132
3133                 root_objectid = gang[ret - 1]->root_key.objectid + 1;
3134                 for (i = 0; i < ret; i++) {
3135                         int err;
3136
3137                         root_objectid = gang[i]->root_key.objectid;
3138                         err = btrfs_orphan_cleanup(gang[i]);
3139                         if (err)
3140                                 return err;
3141                 }
3142                 root_objectid++;
3143         }
3144         return 0;
3145 }
3146
3147 int btrfs_commit_super(struct btrfs_root *root)
3148 {
3149         struct btrfs_trans_handle *trans;
3150         int ret;
3151
3152         mutex_lock(&root->fs_info->cleaner_mutex);
3153         btrfs_run_delayed_iputs(root);
3154         btrfs_clean_old_snapshots(root);
3155         mutex_unlock(&root->fs_info->cleaner_mutex);
3156
3157         /* wait until ongoing cleanup work done */
3158         down_write(&root->fs_info->cleanup_work_sem);
3159         up_write(&root->fs_info->cleanup_work_sem);
3160
3161         trans = btrfs_join_transaction(root);
3162         if (IS_ERR(trans))
3163                 return PTR_ERR(trans);
3164         ret = btrfs_commit_transaction(trans, root);
3165         if (ret)
3166                 return ret;
3167         /* run commit again to drop the original snapshot */
3168         trans = btrfs_join_transaction(root);
3169         if (IS_ERR(trans))
3170                 return PTR_ERR(trans);
3171         ret = btrfs_commit_transaction(trans, root);
3172         if (ret)
3173                 return ret;
3174         ret = btrfs_write_and_wait_transaction(NULL, root);
3175         if (ret) {
3176                 btrfs_error(root->fs_info, ret,
3177                             "Failed to sync btree inode to disk.");
3178                 return ret;
3179         }
3180
3181         ret = write_ctree_super(NULL, root, 0);
3182         return ret;
3183 }
3184
3185 int close_ctree(struct btrfs_root *root)
3186 {
3187         struct btrfs_fs_info *fs_info = root->fs_info;
3188         int ret;
3189
3190         fs_info->closing = 1;
3191         smp_mb();
3192
3193         /* pause restriper - we want to resume on mount */
3194         btrfs_pause_balance(root->fs_info);
3195
3196         btrfs_scrub_cancel(root);
3197
3198         /* wait for any defraggers to finish */
3199         wait_event(fs_info->transaction_wait,
3200                    (atomic_read(&fs_info->defrag_running) == 0));
3201
3202         /* clear out the rbtree of defraggable inodes */
3203         btrfs_run_defrag_inodes(fs_info);
3204
3205         if (!(fs_info->sb->s_flags & MS_RDONLY)) {
3206                 ret = btrfs_commit_super(root);
3207                 if (ret)
3208                         printk(KERN_ERR "btrfs: commit super ret %d\n", ret);
3209         }
3210
3211         if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)
3212                 btrfs_error_commit_super(root);
3213
3214         btrfs_put_block_group_cache(fs_info);
3215
3216         kthread_stop(fs_info->transaction_kthread);
3217         kthread_stop(fs_info->cleaner_kthread);
3218
3219         fs_info->closing = 2;
3220         smp_mb();
3221
3222         btrfs_free_qgroup_config(root->fs_info);
3223
3224         if (fs_info->delalloc_bytes) {
3225                 printk(KERN_INFO "btrfs: at unmount delalloc count %llu\n",
3226                        (unsigned long long)fs_info->delalloc_bytes);
3227         }
3228
3229         free_extent_buffer(fs_info->extent_root->node);
3230         free_extent_buffer(fs_info->extent_root->commit_root);
3231         free_extent_buffer(fs_info->tree_root->node);
3232         free_extent_buffer(fs_info->tree_root->commit_root);
3233         free_extent_buffer(fs_info->chunk_root->node);
3234         free_extent_buffer(fs_info->chunk_root->commit_root);
3235         free_extent_buffer(fs_info->dev_root->node);
3236         free_extent_buffer(fs_info->dev_root->commit_root);
3237         free_extent_buffer(fs_info->csum_root->node);
3238         free_extent_buffer(fs_info->csum_root->commit_root);
3239         if (fs_info->quota_root) {
3240                 free_extent_buffer(fs_info->quota_root->node);
3241                 free_extent_buffer(fs_info->quota_root->commit_root);
3242         }
3243
3244         btrfs_free_block_groups(fs_info);
3245
3246         del_fs_roots(fs_info);
3247
3248         iput(fs_info->btree_inode);
3249
3250         btrfs_stop_workers(&fs_info->generic_worker);
3251         btrfs_stop_workers(&fs_info->fixup_workers);
3252         btrfs_stop_workers(&fs_info->delalloc_workers);
3253         btrfs_stop_workers(&fs_info->workers);
3254         btrfs_stop_workers(&fs_info->endio_workers);
3255         btrfs_stop_workers(&fs_info->endio_meta_workers);
3256         btrfs_stop_workers(&fs_info->endio_meta_write_workers);
3257         btrfs_stop_workers(&fs_info->endio_write_workers);
3258         btrfs_stop_workers(&fs_info->endio_freespace_worker);
3259         btrfs_stop_workers(&fs_info->submit_workers);
3260         btrfs_stop_workers(&fs_info->delayed_workers);
3261         btrfs_stop_workers(&fs_info->caching_workers);
3262         btrfs_stop_workers(&fs_info->readahead_workers);
3263
3264 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
3265         if (btrfs_test_opt(root, CHECK_INTEGRITY))
3266                 btrfsic_unmount(root, fs_info->fs_devices);
3267 #endif
3268
3269         btrfs_close_devices(fs_info->fs_devices);
3270         btrfs_mapping_tree_free(&fs_info->mapping_tree);
3271
3272         bdi_destroy(&fs_info->bdi);
3273         cleanup_srcu_struct(&fs_info->subvol_srcu);
3274
3275         return 0;
3276 }
3277
3278 int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid,
3279                           int atomic)
3280 {
3281         int ret;
3282         struct inode *btree_inode = buf->pages[0]->mapping->host;
3283
3284         ret = extent_buffer_uptodate(buf);
3285         if (!ret)
3286                 return ret;
3287
3288         ret = verify_parent_transid(&BTRFS_I(btree_inode)->io_tree, buf,
3289                                     parent_transid, atomic);
3290         if (ret == -EAGAIN)
3291                 return ret;
3292         return !ret;
3293 }
3294
3295 int btrfs_set_buffer_uptodate(struct extent_buffer *buf)
3296 {
3297         return set_extent_buffer_uptodate(buf);
3298 }
3299
3300 void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
3301 {
3302         struct btrfs_root *root = BTRFS_I(buf->pages[0]->mapping->host)->root;
3303         u64 transid = btrfs_header_generation(buf);
3304         int was_dirty;
3305
3306         btrfs_assert_tree_locked(buf);
3307         if (transid != root->fs_info->generation) {
3308                 printk(KERN_CRIT "btrfs transid mismatch buffer %llu, "
3309                        "found %llu running %llu\n",
3310                         (unsigned long long)buf->start,
3311                         (unsigned long long)transid,
3312                         (unsigned long long)root->fs_info->generation);
3313                 WARN_ON(1);
3314         }
3315         was_dirty = set_extent_buffer_dirty(buf);
3316         if (!was_dirty) {
3317                 spin_lock(&root->fs_info->delalloc_lock);
3318                 root->fs_info->dirty_metadata_bytes += buf->len;
3319                 spin_unlock(&root->fs_info->delalloc_lock);
3320         }
3321 }
3322
3323 void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr)
3324 {
3325         /*
3326          * looks as though older kernels can get into trouble with
3327          * this code, they end up stuck in balance_dirty_pages forever
3328          */
3329         u64 num_dirty;
3330         unsigned long thresh = 32 * 1024 * 1024;
3331
3332         if (current->flags & PF_MEMALLOC)
3333                 return;
3334
3335         btrfs_balance_delayed_items(root);
3336
3337         num_dirty = root->fs_info->dirty_metadata_bytes;
3338
3339         if (num_dirty > thresh) {
3340                 balance_dirty_pages_ratelimited_nr(
3341                                    root->fs_info->btree_inode->i_mapping, 1);
3342         }
3343         return;
3344 }
3345
3346 void __btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr)
3347 {
3348         /*
3349          * looks as though older kernels can get into trouble with
3350          * this code, they end up stuck in balance_dirty_pages forever
3351          */
3352         u64 num_dirty;
3353         unsigned long thresh = 32 * 1024 * 1024;
3354
3355         if (current->flags & PF_MEMALLOC)
3356                 return;
3357
3358         num_dirty = root->fs_info->dirty_metadata_bytes;
3359
3360         if (num_dirty > thresh) {
3361                 balance_dirty_pages_ratelimited_nr(
3362                                    root->fs_info->btree_inode->i_mapping, 1);
3363         }
3364         return;
3365 }
3366
3367 int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid)
3368 {
3369         struct btrfs_root *root = BTRFS_I(buf->pages[0]->mapping->host)->root;
3370         return btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
3371 }
3372
3373 static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
3374                               int read_only)
3375 {
3376         if (btrfs_super_csum_type(fs_info->super_copy) >= ARRAY_SIZE(btrfs_csum_sizes)) {
3377                 printk(KERN_ERR "btrfs: unsupported checksum algorithm\n");
3378                 return -EINVAL;
3379         }
3380
3381         if (read_only)
3382                 return 0;
3383
3384         return 0;
3385 }
3386
3387 void btrfs_error_commit_super(struct btrfs_root *root)
3388 {
3389         mutex_lock(&root->fs_info->cleaner_mutex);
3390         btrfs_run_delayed_iputs(root);
3391         mutex_unlock(&root->fs_info->cleaner_mutex);
3392
3393         down_write(&root->fs_info->cleanup_work_sem);
3394         up_write(&root->fs_info->cleanup_work_sem);
3395
3396         /* cleanup FS via transaction */
3397         btrfs_cleanup_transaction(root);
3398 }
3399
3400 static void btrfs_destroy_ordered_operations(struct btrfs_root *root)
3401 {
3402         struct btrfs_inode *btrfs_inode;
3403         struct list_head splice;
3404
3405         INIT_LIST_HEAD(&splice);
3406
3407         mutex_lock(&root->fs_info->ordered_operations_mutex);
3408         spin_lock(&root->fs_info->ordered_extent_lock);
3409
3410         list_splice_init(&root->fs_info->ordered_operations, &splice);
3411         while (!list_empty(&splice)) {
3412                 btrfs_inode = list_entry(splice.next, struct btrfs_inode,
3413                                          ordered_operations);
3414
3415                 list_del_init(&btrfs_inode->ordered_operations);
3416
3417                 btrfs_invalidate_inodes(btrfs_inode->root);
3418         }
3419
3420         spin_unlock(&root->fs_info->ordered_extent_lock);
3421         mutex_unlock(&root->fs_info->ordered_operations_mutex);
3422 }
3423
3424 static void btrfs_destroy_ordered_extents(struct btrfs_root *root)
3425 {
3426         struct list_head splice;
3427         struct btrfs_ordered_extent *ordered;
3428         struct inode *inode;
3429
3430         INIT_LIST_HEAD(&splice);
3431
3432         spin_lock(&root->fs_info->ordered_extent_lock);
3433
3434         list_splice_init(&root->fs_info->ordered_extents, &splice);
3435         while (!list_empty(&splice)) {
3436                 ordered = list_entry(splice.next, struct btrfs_ordered_extent,
3437                                      root_extent_list);
3438
3439                 list_del_init(&ordered->root_extent_list);
3440                 atomic_inc(&ordered->refs);
3441
3442                 /* the inode may be getting freed (in sys_unlink path). */
3443                 inode = igrab(ordered->inode);
3444
3445                 spin_unlock(&root->fs_info->ordered_extent_lock);
3446                 if (inode)
3447                         iput(inode);
3448
3449                 atomic_set(&ordered->refs, 1);
3450                 btrfs_put_ordered_extent(ordered);
3451
3452                 spin_lock(&root->fs_info->ordered_extent_lock);
3453         }
3454
3455         spin_unlock(&root->fs_info->ordered_extent_lock);
3456 }
3457
3458 int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
3459                                struct btrfs_root *root)
3460 {
3461         struct rb_node *node;
3462         struct btrfs_delayed_ref_root *delayed_refs;
3463         struct btrfs_delayed_ref_node *ref;
3464         int ret = 0;
3465
3466         delayed_refs = &trans->delayed_refs;
3467
3468         spin_lock(&delayed_refs->lock);
3469         if (delayed_refs->num_entries == 0) {
3470                 spin_unlock(&delayed_refs->lock);
3471                 printk(KERN_INFO "delayed_refs has NO entry\n");
3472                 return ret;
3473         }
3474
3475         while ((node = rb_first(&delayed_refs->root)) != NULL) {
3476                 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
3477
3478                 atomic_set(&ref->refs, 1);
3479                 if (btrfs_delayed_ref_is_head(ref)) {
3480                         struct btrfs_delayed_ref_head *head;
3481
3482                         head = btrfs_delayed_node_to_head(ref);
3483                         if (!mutex_trylock(&head->mutex)) {
3484                                 atomic_inc(&ref->refs);
3485                                 spin_unlock(&delayed_refs->lock);
3486
3487                                 /* Need to wait for the delayed ref to run */
3488                                 mutex_lock(&head->mutex);
3489                                 mutex_unlock(&head->mutex);
3490                                 btrfs_put_delayed_ref(ref);
3491
3492                                 spin_lock(&delayed_refs->lock);
3493                                 continue;
3494                         }
3495
3496                         kfree(head->extent_op);
3497                         delayed_refs->num_heads--;
3498                         if (list_empty(&head->cluster))
3499                                 delayed_refs->num_heads_ready--;
3500                         list_del_init(&head->cluster);
3501                 }
3502                 ref->in_tree = 0;
3503                 rb_erase(&ref->rb_node, &delayed_refs->root);
3504                 delayed_refs->num_entries--;
3505
3506                 spin_unlock(&delayed_refs->lock);
3507                 btrfs_put_delayed_ref(ref);
3508
3509                 cond_resched();
3510                 spin_lock(&delayed_refs->lock);
3511         }
3512
3513         spin_unlock(&delayed_refs->lock);
3514
3515         return ret;
3516 }
3517
3518 static void btrfs_destroy_pending_snapshots(struct btrfs_transaction *t)
3519 {
3520         struct btrfs_pending_snapshot *snapshot;
3521         struct list_head splice;
3522
3523         INIT_LIST_HEAD(&splice);
3524
3525         list_splice_init(&t->pending_snapshots, &splice);
3526
3527         while (!list_empty(&splice)) {
3528                 snapshot = list_entry(splice.next,
3529                                       struct btrfs_pending_snapshot,
3530                                       list);
3531
3532                 list_del_init(&snapshot->list);
3533
3534                 kfree(snapshot);
3535         }
3536 }
3537
3538 static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root)
3539 {
3540         struct btrfs_inode *btrfs_inode;
3541         struct list_head splice;
3542
3543         INIT_LIST_HEAD(&splice);
3544
3545         spin_lock(&root->fs_info->delalloc_lock);
3546         list_splice_init(&root->fs_info->delalloc_inodes, &splice);
3547
3548         while (!list_empty(&splice)) {
3549                 btrfs_inode = list_entry(splice.next, struct btrfs_inode,
3550                                     delalloc_inodes);
3551
3552                 list_del_init(&btrfs_inode->delalloc_inodes);
3553
3554                 btrfs_invalidate_inodes(btrfs_inode->root);
3555         }
3556
3557         spin_unlock(&root->fs_info->delalloc_lock);
3558 }
3559
3560 static int btrfs_destroy_marked_extents(struct btrfs_root *root,
3561                                         struct extent_io_tree *dirty_pages,
3562                                         int mark)
3563 {
3564         int ret;
3565         struct page *page;
3566         struct inode *btree_inode = root->fs_info->btree_inode;
3567         struct extent_buffer *eb;
3568         u64 start = 0;
3569         u64 end;
3570         u64 offset;
3571         unsigned long index;
3572
3573         while (1) {
3574                 ret = find_first_extent_bit(dirty_pages, start, &start, &end,
3575                                             mark);
3576                 if (ret)
3577                         break;
3578
3579                 clear_extent_bits(dirty_pages, start, end, mark, GFP_NOFS);
3580                 while (start <= end) {
3581                         index = start >> PAGE_CACHE_SHIFT;
3582                         start = (u64)(index + 1) << PAGE_CACHE_SHIFT;
3583                         page = find_get_page(btree_inode->i_mapping, index);
3584                         if (!page)
3585                                 continue;
3586                         offset = page_offset(page);
3587
3588                         spin_lock(&dirty_pages->buffer_lock);
3589                         eb = radix_tree_lookup(
3590                              &(&BTRFS_I(page->mapping->host)->io_tree)->buffer,
3591                                                offset >> PAGE_CACHE_SHIFT);
3592                         spin_unlock(&dirty_pages->buffer_lock);
3593                         if (eb)
3594                                 ret = test_and_clear_bit(EXTENT_BUFFER_DIRTY,
3595                                                          &eb->bflags);
3596                         if (PageWriteback(page))
3597                                 end_page_writeback(page);
3598
3599                         lock_page(page);
3600                         if (PageDirty(page)) {
3601                                 clear_page_dirty_for_io(page);
3602                                 spin_lock_irq(&page->mapping->tree_lock);
3603                                 radix_tree_tag_clear(&page->mapping->page_tree,
3604                                                         page_index(page),
3605                                                         PAGECACHE_TAG_DIRTY);
3606                                 spin_unlock_irq(&page->mapping->tree_lock);
3607                         }
3608
3609                         unlock_page(page);
3610                         page_cache_release(page);
3611                 }
3612         }
3613
3614         return ret;
3615 }
3616
3617 static int btrfs_destroy_pinned_extent(struct btrfs_root *root,
3618                                        struct extent_io_tree *pinned_extents)
3619 {
3620         struct extent_io_tree *unpin;
3621         u64 start;
3622         u64 end;
3623         int ret;
3624         bool loop = true;
3625
3626         unpin = pinned_extents;
3627 again:
3628         while (1) {
3629                 ret = find_first_extent_bit(unpin, 0, &start, &end,
3630                                             EXTENT_DIRTY);
3631                 if (ret)
3632                         break;
3633
3634                 /* opt_discard */
3635                 if (btrfs_test_opt(root, DISCARD))
3636                         ret = btrfs_error_discard_extent(root, start,
3637                                                          end + 1 - start,
3638                                                          NULL);
3639
3640                 clear_extent_dirty(unpin, start, end, GFP_NOFS);
3641                 btrfs_error_unpin_extent_range(root, start, end);
3642                 cond_resched();
3643         }
3644
3645         if (loop) {
3646                 if (unpin == &root->fs_info->freed_extents[0])
3647                         unpin = &root->fs_info->freed_extents[1];
3648                 else
3649                         unpin = &root->fs_info->freed_extents[0];
3650                 loop = false;
3651                 goto again;
3652         }
3653
3654         return 0;
3655 }
3656
3657 void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
3658                                    struct btrfs_root *root)
3659 {
3660         btrfs_destroy_delayed_refs(cur_trans, root);
3661         btrfs_block_rsv_release(root, &root->fs_info->trans_block_rsv,
3662                                 cur_trans->dirty_pages.dirty_bytes);
3663
3664         /* FIXME: cleanup wait for commit */
3665         cur_trans->in_commit = 1;
3666         cur_trans->blocked = 1;
3667         wake_up(&root->fs_info->transaction_blocked_wait);
3668
3669         cur_trans->blocked = 0;
3670         wake_up(&root->fs_info->transaction_wait);
3671
3672         cur_trans->commit_done = 1;
3673         wake_up(&cur_trans->commit_wait);
3674
3675         btrfs_destroy_delayed_inodes(root);
3676         btrfs_assert_delayed_root_empty(root);
3677
3678         btrfs_destroy_pending_snapshots(cur_trans);
3679
3680         btrfs_destroy_marked_extents(root, &cur_trans->dirty_pages,
3681                                      EXTENT_DIRTY);
3682         btrfs_destroy_pinned_extent(root,
3683                                     root->fs_info->pinned_extents);
3684
3685         /*
3686         memset(cur_trans, 0, sizeof(*cur_trans));
3687         kmem_cache_free(btrfs_transaction_cachep, cur_trans);
3688         */
3689 }
3690
3691 int btrfs_cleanup_transaction(struct btrfs_root *root)
3692 {
3693         struct btrfs_transaction *t;
3694         LIST_HEAD(list);
3695
3696         mutex_lock(&root->fs_info->transaction_kthread_mutex);
3697
3698         spin_lock(&root->fs_info->trans_lock);
3699         list_splice_init(&root->fs_info->trans_list, &list);
3700         root->fs_info->trans_no_join = 1;
3701         spin_unlock(&root->fs_info->trans_lock);
3702
3703         while (!list_empty(&list)) {
3704                 t = list_entry(list.next, struct btrfs_transaction, list);
3705                 if (!t)
3706                         break;
3707
3708                 btrfs_destroy_ordered_operations(root);
3709
3710                 btrfs_destroy_ordered_extents(root);
3711
3712                 btrfs_destroy_delayed_refs(t, root);
3713
3714                 btrfs_block_rsv_release(root,
3715                                         &root->fs_info->trans_block_rsv,
3716                                         t->dirty_pages.dirty_bytes);
3717
3718                 /* FIXME: cleanup wait for commit */
3719                 t->in_commit = 1;
3720                 t->blocked = 1;
3721                 smp_mb();
3722                 if (waitqueue_active(&root->fs_info->transaction_blocked_wait))
3723                         wake_up(&root->fs_info->transaction_blocked_wait);
3724
3725                 t->blocked = 0;
3726                 smp_mb();
3727                 if (waitqueue_active(&root->fs_info->transaction_wait))
3728                         wake_up(&root->fs_info->transaction_wait);
3729
3730                 t->commit_done = 1;
3731                 smp_mb();
3732                 if (waitqueue_active(&t->commit_wait))
3733                         wake_up(&t->commit_wait);
3734
3735                 btrfs_destroy_delayed_inodes(root);
3736                 btrfs_assert_delayed_root_empty(root);
3737
3738                 btrfs_destroy_pending_snapshots(t);
3739
3740                 btrfs_destroy_delalloc_inodes(root);
3741
3742                 spin_lock(&root->fs_info->trans_lock);
3743                 root->fs_info->running_transaction = NULL;
3744                 spin_unlock(&root->fs_info->trans_lock);
3745
3746                 btrfs_destroy_marked_extents(root, &t->dirty_pages,
3747                                              EXTENT_DIRTY);
3748
3749                 btrfs_destroy_pinned_extent(root,
3750                                             root->fs_info->pinned_extents);
3751
3752                 atomic_set(&t->use_count, 0);
3753                 list_del_init(&t->list);
3754                 memset(t, 0, sizeof(*t));
3755                 kmem_cache_free(btrfs_transaction_cachep, t);
3756         }
3757
3758         spin_lock(&root->fs_info->trans_lock);
3759         root->fs_info->trans_no_join = 0;
3760         spin_unlock(&root->fs_info->trans_lock);
3761         mutex_unlock(&root->fs_info->transaction_kthread_mutex);
3762
3763         return 0;
3764 }
3765
3766 static struct extent_io_ops btree_extent_io_ops = {
3767         .readpage_end_io_hook = btree_readpage_end_io_hook,
3768         .readpage_io_failed_hook = btree_io_failed_hook,
3769         .submit_bio_hook = btree_submit_bio_hook,
3770         /* note we're sharing with inode.c for the merge bio hook */
3771         .merge_bio_hook = btrfs_merge_bio_hook,
3772 };