Btrfs: use percpu counter for fs_info->delalloc_bytes
[linux-2.6-block.git] / fs / btrfs / disk-io.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <linux/fs.h>
20 #include <linux/blkdev.h>
21 #include <linux/scatterlist.h>
22 #include <linux/swap.h>
23 #include <linux/radix-tree.h>
24 #include <linux/writeback.h>
25 #include <linux/buffer_head.h>
26 #include <linux/workqueue.h>
27 #include <linux/kthread.h>
28 #include <linux/freezer.h>
29 #include <linux/crc32c.h>
30 #include <linux/slab.h>
31 #include <linux/migrate.h>
32 #include <linux/ratelimit.h>
33 #include <asm/unaligned.h>
34 #include "compat.h"
35 #include "ctree.h"
36 #include "disk-io.h"
37 #include "transaction.h"
38 #include "btrfs_inode.h"
39 #include "volumes.h"
40 #include "print-tree.h"
41 #include "async-thread.h"
42 #include "locking.h"
43 #include "tree-log.h"
44 #include "free-space-cache.h"
45 #include "inode-map.h"
46 #include "check-integrity.h"
47 #include "rcu-string.h"
48 #include "dev-replace.h"
49
50 #ifdef CONFIG_X86
51 #include <asm/cpufeature.h>
52 #endif
53
54 static struct extent_io_ops btree_extent_io_ops;
55 static void end_workqueue_fn(struct btrfs_work *work);
56 static void free_fs_root(struct btrfs_root *root);
57 static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
58                                     int read_only);
59 static void btrfs_destroy_ordered_operations(struct btrfs_root *root);
60 static void btrfs_destroy_ordered_extents(struct btrfs_root *root);
61 static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
62                                       struct btrfs_root *root);
63 static void btrfs_destroy_pending_snapshots(struct btrfs_transaction *t);
64 static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root);
65 static int btrfs_destroy_marked_extents(struct btrfs_root *root,
66                                         struct extent_io_tree *dirty_pages,
67                                         int mark);
68 static int btrfs_destroy_pinned_extent(struct btrfs_root *root,
69                                        struct extent_io_tree *pinned_extents);
70
71 /*
72  * end_io_wq structs are used to do processing in task context when an IO is
73  * complete.  This is used during reads to verify checksums, and it is used
74  * by writes to insert metadata for new file extents after IO is complete.
75  */
76 struct end_io_wq {
77         struct bio *bio;
78         bio_end_io_t *end_io;
79         void *private;
80         struct btrfs_fs_info *info;
81         int error;
82         int metadata;
83         struct list_head list;
84         struct btrfs_work work;
85 };
86
87 /*
88  * async submit bios are used to offload expensive checksumming
89  * onto the worker threads.  They checksum file and metadata bios
90  * just before they are sent down the IO stack.
91  */
92 struct async_submit_bio {
93         struct inode *inode;
94         struct bio *bio;
95         struct list_head list;
96         extent_submit_bio_hook_t *submit_bio_start;
97         extent_submit_bio_hook_t *submit_bio_done;
98         int rw;
99         int mirror_num;
100         unsigned long bio_flags;
101         /*
102          * bio_offset is optional, can be used if the pages in the bio
103          * can't tell us where in the file the bio should go
104          */
105         u64 bio_offset;
106         struct btrfs_work work;
107         int error;
108 };
109
110 /*
111  * Lockdep class keys for extent_buffer->lock's in this root.  For a given
112  * eb, the lockdep key is determined by the btrfs_root it belongs to and
113  * the level the eb occupies in the tree.
114  *
115  * Different roots are used for different purposes and may nest inside each
116  * other and they require separate keysets.  As lockdep keys should be
117  * static, assign keysets according to the purpose of the root as indicated
118  * by btrfs_root->objectid.  This ensures that all special purpose roots
119  * have separate keysets.
120  *
121  * Lock-nesting across peer nodes is always done with the immediate parent
122  * node locked thus preventing deadlock.  As lockdep doesn't know this, use
123  * subclass to avoid triggering lockdep warning in such cases.
124  *
125  * The key is set by the readpage_end_io_hook after the buffer has passed
126  * csum validation but before the pages are unlocked.  It is also set by
127  * btrfs_init_new_buffer on freshly allocated blocks.
128  *
129  * We also add a check to make sure the highest level of the tree is the
130  * same as our lockdep setup here.  If BTRFS_MAX_LEVEL changes, this code
131  * needs update as well.
132  */
133 #ifdef CONFIG_DEBUG_LOCK_ALLOC
134 # if BTRFS_MAX_LEVEL != 8
135 #  error
136 # endif
137
138 static struct btrfs_lockdep_keyset {
139         u64                     id;             /* root objectid */
140         const char              *name_stem;     /* lock name stem */
141         char                    names[BTRFS_MAX_LEVEL + 1][20];
142         struct lock_class_key   keys[BTRFS_MAX_LEVEL + 1];
143 } btrfs_lockdep_keysets[] = {
144         { .id = BTRFS_ROOT_TREE_OBJECTID,       .name_stem = "root"     },
145         { .id = BTRFS_EXTENT_TREE_OBJECTID,     .name_stem = "extent"   },
146         { .id = BTRFS_CHUNK_TREE_OBJECTID,      .name_stem = "chunk"    },
147         { .id = BTRFS_DEV_TREE_OBJECTID,        .name_stem = "dev"      },
148         { .id = BTRFS_FS_TREE_OBJECTID,         .name_stem = "fs"       },
149         { .id = BTRFS_CSUM_TREE_OBJECTID,       .name_stem = "csum"     },
150         { .id = BTRFS_ORPHAN_OBJECTID,          .name_stem = "orphan"   },
151         { .id = BTRFS_TREE_LOG_OBJECTID,        .name_stem = "log"      },
152         { .id = BTRFS_TREE_RELOC_OBJECTID,      .name_stem = "treloc"   },
153         { .id = BTRFS_DATA_RELOC_TREE_OBJECTID, .name_stem = "dreloc"   },
154         { .id = 0,                              .name_stem = "tree"     },
155 };
156
157 void __init btrfs_init_lockdep(void)
158 {
159         int i, j;
160
161         /* initialize lockdep class names */
162         for (i = 0; i < ARRAY_SIZE(btrfs_lockdep_keysets); i++) {
163                 struct btrfs_lockdep_keyset *ks = &btrfs_lockdep_keysets[i];
164
165                 for (j = 0; j < ARRAY_SIZE(ks->names); j++)
166                         snprintf(ks->names[j], sizeof(ks->names[j]),
167                                  "btrfs-%s-%02d", ks->name_stem, j);
168         }
169 }
170
171 void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb,
172                                     int level)
173 {
174         struct btrfs_lockdep_keyset *ks;
175
176         BUG_ON(level >= ARRAY_SIZE(ks->keys));
177
178         /* find the matching keyset, id 0 is the default entry */
179         for (ks = btrfs_lockdep_keysets; ks->id; ks++)
180                 if (ks->id == objectid)
181                         break;
182
183         lockdep_set_class_and_name(&eb->lock,
184                                    &ks->keys[level], ks->names[level]);
185 }
186
187 #endif
188
189 /*
190  * extents on the btree inode are pretty simple, there's one extent
191  * that covers the entire device
192  */
193 static struct extent_map *btree_get_extent(struct inode *inode,
194                 struct page *page, size_t pg_offset, u64 start, u64 len,
195                 int create)
196 {
197         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
198         struct extent_map *em;
199         int ret;
200
201         read_lock(&em_tree->lock);
202         em = lookup_extent_mapping(em_tree, start, len);
203         if (em) {
204                 em->bdev =
205                         BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
206                 read_unlock(&em_tree->lock);
207                 goto out;
208         }
209         read_unlock(&em_tree->lock);
210
211         em = alloc_extent_map();
212         if (!em) {
213                 em = ERR_PTR(-ENOMEM);
214                 goto out;
215         }
216         em->start = 0;
217         em->len = (u64)-1;
218         em->block_len = (u64)-1;
219         em->block_start = 0;
220         em->bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
221
222         write_lock(&em_tree->lock);
223         ret = add_extent_mapping(em_tree, em);
224         if (ret == -EEXIST) {
225                 free_extent_map(em);
226                 em = lookup_extent_mapping(em_tree, start, len);
227                 if (!em)
228                         em = ERR_PTR(-EIO);
229         } else if (ret) {
230                 free_extent_map(em);
231                 em = ERR_PTR(ret);
232         }
233         write_unlock(&em_tree->lock);
234
235 out:
236         return em;
237 }
238
239 u32 btrfs_csum_data(struct btrfs_root *root, char *data, u32 seed, size_t len)
240 {
241         return crc32c(seed, data, len);
242 }
243
244 void btrfs_csum_final(u32 crc, char *result)
245 {
246         put_unaligned_le32(~crc, result);
247 }
248
249 /*
250  * compute the csum for a btree block, and either verify it or write it
251  * into the csum field of the block.
252  */
253 static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf,
254                            int verify)
255 {
256         u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
257         char *result = NULL;
258         unsigned long len;
259         unsigned long cur_len;
260         unsigned long offset = BTRFS_CSUM_SIZE;
261         char *kaddr;
262         unsigned long map_start;
263         unsigned long map_len;
264         int err;
265         u32 crc = ~(u32)0;
266         unsigned long inline_result;
267
268         len = buf->len - offset;
269         while (len > 0) {
270                 err = map_private_extent_buffer(buf, offset, 32,
271                                         &kaddr, &map_start, &map_len);
272                 if (err)
273                         return 1;
274                 cur_len = min(len, map_len - (offset - map_start));
275                 crc = btrfs_csum_data(root, kaddr + offset - map_start,
276                                       crc, cur_len);
277                 len -= cur_len;
278                 offset += cur_len;
279         }
280         if (csum_size > sizeof(inline_result)) {
281                 result = kzalloc(csum_size * sizeof(char), GFP_NOFS);
282                 if (!result)
283                         return 1;
284         } else {
285                 result = (char *)&inline_result;
286         }
287
288         btrfs_csum_final(crc, result);
289
290         if (verify) {
291                 if (memcmp_extent_buffer(buf, result, 0, csum_size)) {
292                         u32 val;
293                         u32 found = 0;
294                         memcpy(&found, result, csum_size);
295
296                         read_extent_buffer(buf, &val, 0, csum_size);
297                         printk_ratelimited(KERN_INFO "btrfs: %s checksum verify "
298                                        "failed on %llu wanted %X found %X "
299                                        "level %d\n",
300                                        root->fs_info->sb->s_id,
301                                        (unsigned long long)buf->start, val, found,
302                                        btrfs_header_level(buf));
303                         if (result != (char *)&inline_result)
304                                 kfree(result);
305                         return 1;
306                 }
307         } else {
308                 write_extent_buffer(buf, result, 0, csum_size);
309         }
310         if (result != (char *)&inline_result)
311                 kfree(result);
312         return 0;
313 }
314
315 /*
316  * we can't consider a given block up to date unless the transid of the
317  * block matches the transid in the parent node's pointer.  This is how we
318  * detect blocks that either didn't get written at all or got written
319  * in the wrong place.
320  */
321 static int verify_parent_transid(struct extent_io_tree *io_tree,
322                                  struct extent_buffer *eb, u64 parent_transid,
323                                  int atomic)
324 {
325         struct extent_state *cached_state = NULL;
326         int ret;
327
328         if (!parent_transid || btrfs_header_generation(eb) == parent_transid)
329                 return 0;
330
331         if (atomic)
332                 return -EAGAIN;
333
334         lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1,
335                          0, &cached_state);
336         if (extent_buffer_uptodate(eb) &&
337             btrfs_header_generation(eb) == parent_transid) {
338                 ret = 0;
339                 goto out;
340         }
341         printk_ratelimited("parent transid verify failed on %llu wanted %llu "
342                        "found %llu\n",
343                        (unsigned long long)eb->start,
344                        (unsigned long long)parent_transid,
345                        (unsigned long long)btrfs_header_generation(eb));
346         ret = 1;
347         clear_extent_buffer_uptodate(eb);
348 out:
349         unlock_extent_cached(io_tree, eb->start, eb->start + eb->len - 1,
350                              &cached_state, GFP_NOFS);
351         return ret;
352 }
353
354 /*
355  * helper to read a given tree block, doing retries as required when
356  * the checksums don't match and we have alternate mirrors to try.
357  */
358 static int btree_read_extent_buffer_pages(struct btrfs_root *root,
359                                           struct extent_buffer *eb,
360                                           u64 start, u64 parent_transid)
361 {
362         struct extent_io_tree *io_tree;
363         int failed = 0;
364         int ret;
365         int num_copies = 0;
366         int mirror_num = 0;
367         int failed_mirror = 0;
368
369         clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
370         io_tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree;
371         while (1) {
372                 ret = read_extent_buffer_pages(io_tree, eb, start,
373                                                WAIT_COMPLETE,
374                                                btree_get_extent, mirror_num);
375                 if (!ret) {
376                         if (!verify_parent_transid(io_tree, eb,
377                                                    parent_transid, 0))
378                                 break;
379                         else
380                                 ret = -EIO;
381                 }
382
383                 /*
384                  * This buffer's crc is fine, but its contents are corrupted, so
385                  * there is no reason to read the other copies, they won't be
386                  * any less wrong.
387                  */
388                 if (test_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags))
389                         break;
390
391                 num_copies = btrfs_num_copies(root->fs_info,
392                                               eb->start, eb->len);
393                 if (num_copies == 1)
394                         break;
395
396                 if (!failed_mirror) {
397                         failed = 1;
398                         failed_mirror = eb->read_mirror;
399                 }
400
401                 mirror_num++;
402                 if (mirror_num == failed_mirror)
403                         mirror_num++;
404
405                 if (mirror_num > num_copies)
406                         break;
407         }
408
409         if (failed && !ret && failed_mirror)
410                 repair_eb_io_failure(root, eb, failed_mirror);
411
412         return ret;
413 }
414
415 /*
416  * checksum a dirty tree block before IO.  This has extra checks to make sure
417  * we only fill in the checksum field in the first page of a multi-page block
418  */
419
420 static int csum_dirty_buffer(struct btrfs_root *root, struct page *page)
421 {
422         struct extent_io_tree *tree;
423         u64 start = page_offset(page);
424         u64 found_start;
425         struct extent_buffer *eb;
426
427         tree = &BTRFS_I(page->mapping->host)->io_tree;
428
429         eb = (struct extent_buffer *)page->private;
430         if (page != eb->pages[0])
431                 return 0;
432         found_start = btrfs_header_bytenr(eb);
433         if (found_start != start) {
434                 WARN_ON(1);
435                 return 0;
436         }
437         if (!PageUptodate(page)) {
438                 WARN_ON(1);
439                 return 0;
440         }
441         csum_tree_block(root, eb, 0);
442         return 0;
443 }
444
445 static int check_tree_block_fsid(struct btrfs_root *root,
446                                  struct extent_buffer *eb)
447 {
448         struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
449         u8 fsid[BTRFS_UUID_SIZE];
450         int ret = 1;
451
452         read_extent_buffer(eb, fsid, (unsigned long)btrfs_header_fsid(eb),
453                            BTRFS_FSID_SIZE);
454         while (fs_devices) {
455                 if (!memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE)) {
456                         ret = 0;
457                         break;
458                 }
459                 fs_devices = fs_devices->seed;
460         }
461         return ret;
462 }
463
464 #define CORRUPT(reason, eb, root, slot)                         \
465         printk(KERN_CRIT "btrfs: corrupt leaf, %s: block=%llu," \
466                "root=%llu, slot=%d\n", reason,                  \
467                (unsigned long long)btrfs_header_bytenr(eb),     \
468                (unsigned long long)root->objectid, slot)
469
470 static noinline int check_leaf(struct btrfs_root *root,
471                                struct extent_buffer *leaf)
472 {
473         struct btrfs_key key;
474         struct btrfs_key leaf_key;
475         u32 nritems = btrfs_header_nritems(leaf);
476         int slot;
477
478         if (nritems == 0)
479                 return 0;
480
481         /* Check the 0 item */
482         if (btrfs_item_offset_nr(leaf, 0) + btrfs_item_size_nr(leaf, 0) !=
483             BTRFS_LEAF_DATA_SIZE(root)) {
484                 CORRUPT("invalid item offset size pair", leaf, root, 0);
485                 return -EIO;
486         }
487
488         /*
489          * Check to make sure each items keys are in the correct order and their
490          * offsets make sense.  We only have to loop through nritems-1 because
491          * we check the current slot against the next slot, which verifies the
492          * next slot's offset+size makes sense and that the current's slot
493          * offset is correct.
494          */
495         for (slot = 0; slot < nritems - 1; slot++) {
496                 btrfs_item_key_to_cpu(leaf, &leaf_key, slot);
497                 btrfs_item_key_to_cpu(leaf, &key, slot + 1);
498
499                 /* Make sure the keys are in the right order */
500                 if (btrfs_comp_cpu_keys(&leaf_key, &key) >= 0) {
501                         CORRUPT("bad key order", leaf, root, slot);
502                         return -EIO;
503                 }
504
505                 /*
506                  * Make sure the offset and ends are right, remember that the
507                  * item data starts at the end of the leaf and grows towards the
508                  * front.
509                  */
510                 if (btrfs_item_offset_nr(leaf, slot) !=
511                         btrfs_item_end_nr(leaf, slot + 1)) {
512                         CORRUPT("slot offset bad", leaf, root, slot);
513                         return -EIO;
514                 }
515
516                 /*
517                  * Check to make sure that we don't point outside of the leaf,
518                  * just incase all the items are consistent to eachother, but
519                  * all point outside of the leaf.
520                  */
521                 if (btrfs_item_end_nr(leaf, slot) >
522                     BTRFS_LEAF_DATA_SIZE(root)) {
523                         CORRUPT("slot end outside of leaf", leaf, root, slot);
524                         return -EIO;
525                 }
526         }
527
528         return 0;
529 }
530
531 struct extent_buffer *find_eb_for_page(struct extent_io_tree *tree,
532                                        struct page *page, int max_walk)
533 {
534         struct extent_buffer *eb;
535         u64 start = page_offset(page);
536         u64 target = start;
537         u64 min_start;
538
539         if (start < max_walk)
540                 min_start = 0;
541         else
542                 min_start = start - max_walk;
543
544         while (start >= min_start) {
545                 eb = find_extent_buffer(tree, start, 0);
546                 if (eb) {
547                         /*
548                          * we found an extent buffer and it contains our page
549                          * horray!
550                          */
551                         if (eb->start <= target &&
552                             eb->start + eb->len > target)
553                                 return eb;
554
555                         /* we found an extent buffer that wasn't for us */
556                         free_extent_buffer(eb);
557                         return NULL;
558                 }
559                 if (start == 0)
560                         break;
561                 start -= PAGE_CACHE_SIZE;
562         }
563         return NULL;
564 }
565
566 static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
567                                struct extent_state *state, int mirror)
568 {
569         struct extent_io_tree *tree;
570         u64 found_start;
571         int found_level;
572         struct extent_buffer *eb;
573         struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
574         int ret = 0;
575         int reads_done;
576
577         if (!page->private)
578                 goto out;
579
580         tree = &BTRFS_I(page->mapping->host)->io_tree;
581         eb = (struct extent_buffer *)page->private;
582
583         /* the pending IO might have been the only thing that kept this buffer
584          * in memory.  Make sure we have a ref for all this other checks
585          */
586         extent_buffer_get(eb);
587
588         reads_done = atomic_dec_and_test(&eb->io_pages);
589         if (!reads_done)
590                 goto err;
591
592         eb->read_mirror = mirror;
593         if (test_bit(EXTENT_BUFFER_IOERR, &eb->bflags)) {
594                 ret = -EIO;
595                 goto err;
596         }
597
598         found_start = btrfs_header_bytenr(eb);
599         if (found_start != eb->start) {
600                 printk_ratelimited(KERN_INFO "btrfs bad tree block start "
601                                "%llu %llu\n",
602                                (unsigned long long)found_start,
603                                (unsigned long long)eb->start);
604                 ret = -EIO;
605                 goto err;
606         }
607         if (check_tree_block_fsid(root, eb)) {
608                 printk_ratelimited(KERN_INFO "btrfs bad fsid on block %llu\n",
609                                (unsigned long long)eb->start);
610                 ret = -EIO;
611                 goto err;
612         }
613         found_level = btrfs_header_level(eb);
614
615         btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb),
616                                        eb, found_level);
617
618         ret = csum_tree_block(root, eb, 1);
619         if (ret) {
620                 ret = -EIO;
621                 goto err;
622         }
623
624         /*
625          * If this is a leaf block and it is corrupt, set the corrupt bit so
626          * that we don't try and read the other copies of this block, just
627          * return -EIO.
628          */
629         if (found_level == 0 && check_leaf(root, eb)) {
630                 set_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
631                 ret = -EIO;
632         }
633
634         if (!ret)
635                 set_extent_buffer_uptodate(eb);
636 err:
637         if (test_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags)) {
638                 clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags);
639                 btree_readahead_hook(root, eb, eb->start, ret);
640         }
641
642         if (ret)
643                 clear_extent_buffer_uptodate(eb);
644         free_extent_buffer(eb);
645 out:
646         return ret;
647 }
648
649 static int btree_io_failed_hook(struct page *page, int failed_mirror)
650 {
651         struct extent_buffer *eb;
652         struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
653
654         eb = (struct extent_buffer *)page->private;
655         set_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
656         eb->read_mirror = failed_mirror;
657         if (test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags))
658                 btree_readahead_hook(root, eb, eb->start, -EIO);
659         return -EIO;    /* we fixed nothing */
660 }
661
662 static void end_workqueue_bio(struct bio *bio, int err)
663 {
664         struct end_io_wq *end_io_wq = bio->bi_private;
665         struct btrfs_fs_info *fs_info;
666
667         fs_info = end_io_wq->info;
668         end_io_wq->error = err;
669         end_io_wq->work.func = end_workqueue_fn;
670         end_io_wq->work.flags = 0;
671
672         if (bio->bi_rw & REQ_WRITE) {
673                 if (end_io_wq->metadata == 1)
674                         btrfs_queue_worker(&fs_info->endio_meta_write_workers,
675                                            &end_io_wq->work);
676                 else if (end_io_wq->metadata == 2)
677                         btrfs_queue_worker(&fs_info->endio_freespace_worker,
678                                            &end_io_wq->work);
679                 else
680                         btrfs_queue_worker(&fs_info->endio_write_workers,
681                                            &end_io_wq->work);
682         } else {
683                 if (end_io_wq->metadata)
684                         btrfs_queue_worker(&fs_info->endio_meta_workers,
685                                            &end_io_wq->work);
686                 else
687                         btrfs_queue_worker(&fs_info->endio_workers,
688                                            &end_io_wq->work);
689         }
690 }
691
692 /*
693  * For the metadata arg you want
694  *
695  * 0 - if data
696  * 1 - if normal metadta
697  * 2 - if writing to the free space cache area
698  */
699 int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
700                         int metadata)
701 {
702         struct end_io_wq *end_io_wq;
703         end_io_wq = kmalloc(sizeof(*end_io_wq), GFP_NOFS);
704         if (!end_io_wq)
705                 return -ENOMEM;
706
707         end_io_wq->private = bio->bi_private;
708         end_io_wq->end_io = bio->bi_end_io;
709         end_io_wq->info = info;
710         end_io_wq->error = 0;
711         end_io_wq->bio = bio;
712         end_io_wq->metadata = metadata;
713
714         bio->bi_private = end_io_wq;
715         bio->bi_end_io = end_workqueue_bio;
716         return 0;
717 }
718
719 unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info)
720 {
721         unsigned long limit = min_t(unsigned long,
722                                     info->workers.max_workers,
723                                     info->fs_devices->open_devices);
724         return 256 * limit;
725 }
726
727 static void run_one_async_start(struct btrfs_work *work)
728 {
729         struct async_submit_bio *async;
730         int ret;
731
732         async = container_of(work, struct  async_submit_bio, work);
733         ret = async->submit_bio_start(async->inode, async->rw, async->bio,
734                                       async->mirror_num, async->bio_flags,
735                                       async->bio_offset);
736         if (ret)
737                 async->error = ret;
738 }
739
740 static void run_one_async_done(struct btrfs_work *work)
741 {
742         struct btrfs_fs_info *fs_info;
743         struct async_submit_bio *async;
744         int limit;
745
746         async = container_of(work, struct  async_submit_bio, work);
747         fs_info = BTRFS_I(async->inode)->root->fs_info;
748
749         limit = btrfs_async_submit_limit(fs_info);
750         limit = limit * 2 / 3;
751
752         if (atomic_dec_return(&fs_info->nr_async_submits) < limit &&
753             waitqueue_active(&fs_info->async_submit_wait))
754                 wake_up(&fs_info->async_submit_wait);
755
756         /* If an error occured we just want to clean up the bio and move on */
757         if (async->error) {
758                 bio_endio(async->bio, async->error);
759                 return;
760         }
761
762         async->submit_bio_done(async->inode, async->rw, async->bio,
763                                async->mirror_num, async->bio_flags,
764                                async->bio_offset);
765 }
766
767 static void run_one_async_free(struct btrfs_work *work)
768 {
769         struct async_submit_bio *async;
770
771         async = container_of(work, struct  async_submit_bio, work);
772         kfree(async);
773 }
774
775 int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
776                         int rw, struct bio *bio, int mirror_num,
777                         unsigned long bio_flags,
778                         u64 bio_offset,
779                         extent_submit_bio_hook_t *submit_bio_start,
780                         extent_submit_bio_hook_t *submit_bio_done)
781 {
782         struct async_submit_bio *async;
783
784         async = kmalloc(sizeof(*async), GFP_NOFS);
785         if (!async)
786                 return -ENOMEM;
787
788         async->inode = inode;
789         async->rw = rw;
790         async->bio = bio;
791         async->mirror_num = mirror_num;
792         async->submit_bio_start = submit_bio_start;
793         async->submit_bio_done = submit_bio_done;
794
795         async->work.func = run_one_async_start;
796         async->work.ordered_func = run_one_async_done;
797         async->work.ordered_free = run_one_async_free;
798
799         async->work.flags = 0;
800         async->bio_flags = bio_flags;
801         async->bio_offset = bio_offset;
802
803         async->error = 0;
804
805         atomic_inc(&fs_info->nr_async_submits);
806
807         if (rw & REQ_SYNC)
808                 btrfs_set_work_high_prio(&async->work);
809
810         btrfs_queue_worker(&fs_info->workers, &async->work);
811
812         while (atomic_read(&fs_info->async_submit_draining) &&
813               atomic_read(&fs_info->nr_async_submits)) {
814                 wait_event(fs_info->async_submit_wait,
815                            (atomic_read(&fs_info->nr_async_submits) == 0));
816         }
817
818         return 0;
819 }
820
821 static int btree_csum_one_bio(struct bio *bio)
822 {
823         struct bio_vec *bvec = bio->bi_io_vec;
824         int bio_index = 0;
825         struct btrfs_root *root;
826         int ret = 0;
827
828         WARN_ON(bio->bi_vcnt <= 0);
829         while (bio_index < bio->bi_vcnt) {
830                 root = BTRFS_I(bvec->bv_page->mapping->host)->root;
831                 ret = csum_dirty_buffer(root, bvec->bv_page);
832                 if (ret)
833                         break;
834                 bio_index++;
835                 bvec++;
836         }
837         return ret;
838 }
839
840 static int __btree_submit_bio_start(struct inode *inode, int rw,
841                                     struct bio *bio, int mirror_num,
842                                     unsigned long bio_flags,
843                                     u64 bio_offset)
844 {
845         /*
846          * when we're called for a write, we're already in the async
847          * submission context.  Just jump into btrfs_map_bio
848          */
849         return btree_csum_one_bio(bio);
850 }
851
852 static int __btree_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
853                                  int mirror_num, unsigned long bio_flags,
854                                  u64 bio_offset)
855 {
856         int ret;
857
858         /*
859          * when we're called for a write, we're already in the async
860          * submission context.  Just jump into btrfs_map_bio
861          */
862         ret = btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, mirror_num, 1);
863         if (ret)
864                 bio_endio(bio, ret);
865         return ret;
866 }
867
868 static int check_async_write(struct inode *inode, unsigned long bio_flags)
869 {
870         if (bio_flags & EXTENT_BIO_TREE_LOG)
871                 return 0;
872 #ifdef CONFIG_X86
873         if (cpu_has_xmm4_2)
874                 return 0;
875 #endif
876         return 1;
877 }
878
879 static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
880                                  int mirror_num, unsigned long bio_flags,
881                                  u64 bio_offset)
882 {
883         int async = check_async_write(inode, bio_flags);
884         int ret;
885
886         if (!(rw & REQ_WRITE)) {
887                 /*
888                  * called for a read, do the setup so that checksum validation
889                  * can happen in the async kernel threads
890                  */
891                 ret = btrfs_bio_wq_end_io(BTRFS_I(inode)->root->fs_info,
892                                           bio, 1);
893                 if (ret)
894                         goto out_w_error;
895                 ret = btrfs_map_bio(BTRFS_I(inode)->root, rw, bio,
896                                     mirror_num, 0);
897         } else if (!async) {
898                 ret = btree_csum_one_bio(bio);
899                 if (ret)
900                         goto out_w_error;
901                 ret = btrfs_map_bio(BTRFS_I(inode)->root, rw, bio,
902                                     mirror_num, 0);
903         } else {
904                 /*
905                  * kthread helpers are used to submit writes so that
906                  * checksumming can happen in parallel across all CPUs
907                  */
908                 ret = btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
909                                           inode, rw, bio, mirror_num, 0,
910                                           bio_offset,
911                                           __btree_submit_bio_start,
912                                           __btree_submit_bio_done);
913         }
914
915         if (ret) {
916 out_w_error:
917                 bio_endio(bio, ret);
918         }
919         return ret;
920 }
921
922 #ifdef CONFIG_MIGRATION
923 static int btree_migratepage(struct address_space *mapping,
924                         struct page *newpage, struct page *page,
925                         enum migrate_mode mode)
926 {
927         /*
928          * we can't safely write a btree page from here,
929          * we haven't done the locking hook
930          */
931         if (PageDirty(page))
932                 return -EAGAIN;
933         /*
934          * Buffers may be managed in a filesystem specific way.
935          * We must have no buffers or drop them.
936          */
937         if (page_has_private(page) &&
938             !try_to_release_page(page, GFP_KERNEL))
939                 return -EAGAIN;
940         return migrate_page(mapping, newpage, page, mode);
941 }
942 #endif
943
944
945 static int btree_writepages(struct address_space *mapping,
946                             struct writeback_control *wbc)
947 {
948         struct extent_io_tree *tree;
949         struct btrfs_fs_info *fs_info;
950         int ret;
951
952         tree = &BTRFS_I(mapping->host)->io_tree;
953         if (wbc->sync_mode == WB_SYNC_NONE) {
954
955                 if (wbc->for_kupdate)
956                         return 0;
957
958                 fs_info = BTRFS_I(mapping->host)->root->fs_info;
959                 /* this is a bit racy, but that's ok */
960                 ret = percpu_counter_compare(&fs_info->dirty_metadata_bytes,
961                                              BTRFS_DIRTY_METADATA_THRESH);
962                 if (ret < 0)
963                         return 0;
964         }
965         return btree_write_cache_pages(mapping, wbc);
966 }
967
968 static int btree_readpage(struct file *file, struct page *page)
969 {
970         struct extent_io_tree *tree;
971         tree = &BTRFS_I(page->mapping->host)->io_tree;
972         return extent_read_full_page(tree, page, btree_get_extent, 0);
973 }
974
975 static int btree_releasepage(struct page *page, gfp_t gfp_flags)
976 {
977         if (PageWriteback(page) || PageDirty(page))
978                 return 0;
979         /*
980          * We need to mask out eg. __GFP_HIGHMEM and __GFP_DMA32 as we're doing
981          * slab allocation from alloc_extent_state down the callchain where
982          * it'd hit a BUG_ON as those flags are not allowed.
983          */
984         gfp_flags &= ~GFP_SLAB_BUG_MASK;
985
986         return try_release_extent_buffer(page, gfp_flags);
987 }
988
989 static void btree_invalidatepage(struct page *page, unsigned long offset)
990 {
991         struct extent_io_tree *tree;
992         tree = &BTRFS_I(page->mapping->host)->io_tree;
993         extent_invalidatepage(tree, page, offset);
994         btree_releasepage(page, GFP_NOFS);
995         if (PagePrivate(page)) {
996                 printk(KERN_WARNING "btrfs warning page private not zero "
997                        "on page %llu\n", (unsigned long long)page_offset(page));
998                 ClearPagePrivate(page);
999                 set_page_private(page, 0);
1000                 page_cache_release(page);
1001         }
1002 }
1003
1004 static int btree_set_page_dirty(struct page *page)
1005 {
1006 #ifdef DEBUG
1007         struct extent_buffer *eb;
1008
1009         BUG_ON(!PagePrivate(page));
1010         eb = (struct extent_buffer *)page->private;
1011         BUG_ON(!eb);
1012         BUG_ON(!test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
1013         BUG_ON(!atomic_read(&eb->refs));
1014         btrfs_assert_tree_locked(eb);
1015 #endif
1016         return __set_page_dirty_nobuffers(page);
1017 }
1018
1019 static const struct address_space_operations btree_aops = {
1020         .readpage       = btree_readpage,
1021         .writepages     = btree_writepages,
1022         .releasepage    = btree_releasepage,
1023         .invalidatepage = btree_invalidatepage,
1024 #ifdef CONFIG_MIGRATION
1025         .migratepage    = btree_migratepage,
1026 #endif
1027         .set_page_dirty = btree_set_page_dirty,
1028 };
1029
1030 int readahead_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize,
1031                          u64 parent_transid)
1032 {
1033         struct extent_buffer *buf = NULL;
1034         struct inode *btree_inode = root->fs_info->btree_inode;
1035         int ret = 0;
1036
1037         buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
1038         if (!buf)
1039                 return 0;
1040         read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree,
1041                                  buf, 0, WAIT_NONE, btree_get_extent, 0);
1042         free_extent_buffer(buf);
1043         return ret;
1044 }
1045
1046 int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr, u32 blocksize,
1047                          int mirror_num, struct extent_buffer **eb)
1048 {
1049         struct extent_buffer *buf = NULL;
1050         struct inode *btree_inode = root->fs_info->btree_inode;
1051         struct extent_io_tree *io_tree = &BTRFS_I(btree_inode)->io_tree;
1052         int ret;
1053
1054         buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
1055         if (!buf)
1056                 return 0;
1057
1058         set_bit(EXTENT_BUFFER_READAHEAD, &buf->bflags);
1059
1060         ret = read_extent_buffer_pages(io_tree, buf, 0, WAIT_PAGE_LOCK,
1061                                        btree_get_extent, mirror_num);
1062         if (ret) {
1063                 free_extent_buffer(buf);
1064                 return ret;
1065         }
1066
1067         if (test_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags)) {
1068                 free_extent_buffer(buf);
1069                 return -EIO;
1070         } else if (extent_buffer_uptodate(buf)) {
1071                 *eb = buf;
1072         } else {
1073                 free_extent_buffer(buf);
1074         }
1075         return 0;
1076 }
1077
1078 struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root,
1079                                             u64 bytenr, u32 blocksize)
1080 {
1081         struct inode *btree_inode = root->fs_info->btree_inode;
1082         struct extent_buffer *eb;
1083         eb = find_extent_buffer(&BTRFS_I(btree_inode)->io_tree,
1084                                 bytenr, blocksize);
1085         return eb;
1086 }
1087
1088 struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
1089                                                  u64 bytenr, u32 blocksize)
1090 {
1091         struct inode *btree_inode = root->fs_info->btree_inode;
1092         struct extent_buffer *eb;
1093
1094         eb = alloc_extent_buffer(&BTRFS_I(btree_inode)->io_tree,
1095                                  bytenr, blocksize);
1096         return eb;
1097 }
1098
1099
1100 int btrfs_write_tree_block(struct extent_buffer *buf)
1101 {
1102         return filemap_fdatawrite_range(buf->pages[0]->mapping, buf->start,
1103                                         buf->start + buf->len - 1);
1104 }
1105
1106 int btrfs_wait_tree_block_writeback(struct extent_buffer *buf)
1107 {
1108         return filemap_fdatawait_range(buf->pages[0]->mapping,
1109                                        buf->start, buf->start + buf->len - 1);
1110 }
1111
1112 struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
1113                                       u32 blocksize, u64 parent_transid)
1114 {
1115         struct extent_buffer *buf = NULL;
1116         int ret;
1117
1118         buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
1119         if (!buf)
1120                 return NULL;
1121
1122         ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
1123         return buf;
1124
1125 }
1126
1127 void clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root,
1128                       struct extent_buffer *buf)
1129 {
1130         struct btrfs_fs_info *fs_info = root->fs_info;
1131
1132         if (btrfs_header_generation(buf) ==
1133             fs_info->running_transaction->transid) {
1134                 btrfs_assert_tree_locked(buf);
1135
1136                 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)) {
1137                         __percpu_counter_add(&fs_info->dirty_metadata_bytes,
1138                                              -buf->len,
1139                                              fs_info->dirty_metadata_batch);
1140                         /* ugh, clear_extent_buffer_dirty needs to lock the page */
1141                         btrfs_set_lock_blocking(buf);
1142                         clear_extent_buffer_dirty(buf);
1143                 }
1144         }
1145 }
1146
1147 static void __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
1148                          u32 stripesize, struct btrfs_root *root,
1149                          struct btrfs_fs_info *fs_info,
1150                          u64 objectid)
1151 {
1152         root->node = NULL;
1153         root->commit_root = NULL;
1154         root->sectorsize = sectorsize;
1155         root->nodesize = nodesize;
1156         root->leafsize = leafsize;
1157         root->stripesize = stripesize;
1158         root->ref_cows = 0;
1159         root->track_dirty = 0;
1160         root->in_radix = 0;
1161         root->orphan_item_inserted = 0;
1162         root->orphan_cleanup_state = 0;
1163
1164         root->objectid = objectid;
1165         root->last_trans = 0;
1166         root->highest_objectid = 0;
1167         root->name = NULL;
1168         root->inode_tree = RB_ROOT;
1169         INIT_RADIX_TREE(&root->delayed_nodes_tree, GFP_ATOMIC);
1170         root->block_rsv = NULL;
1171         root->orphan_block_rsv = NULL;
1172
1173         INIT_LIST_HEAD(&root->dirty_list);
1174         INIT_LIST_HEAD(&root->root_list);
1175         INIT_LIST_HEAD(&root->logged_list[0]);
1176         INIT_LIST_HEAD(&root->logged_list[1]);
1177         spin_lock_init(&root->orphan_lock);
1178         spin_lock_init(&root->inode_lock);
1179         spin_lock_init(&root->accounting_lock);
1180         spin_lock_init(&root->log_extents_lock[0]);
1181         spin_lock_init(&root->log_extents_lock[1]);
1182         mutex_init(&root->objectid_mutex);
1183         mutex_init(&root->log_mutex);
1184         init_waitqueue_head(&root->log_writer_wait);
1185         init_waitqueue_head(&root->log_commit_wait[0]);
1186         init_waitqueue_head(&root->log_commit_wait[1]);
1187         atomic_set(&root->log_commit[0], 0);
1188         atomic_set(&root->log_commit[1], 0);
1189         atomic_set(&root->log_writers, 0);
1190         atomic_set(&root->log_batch, 0);
1191         atomic_set(&root->orphan_inodes, 0);
1192         root->log_transid = 0;
1193         root->last_log_commit = 0;
1194         extent_io_tree_init(&root->dirty_log_pages,
1195                              fs_info->btree_inode->i_mapping);
1196
1197         memset(&root->root_key, 0, sizeof(root->root_key));
1198         memset(&root->root_item, 0, sizeof(root->root_item));
1199         memset(&root->defrag_progress, 0, sizeof(root->defrag_progress));
1200         memset(&root->root_kobj, 0, sizeof(root->root_kobj));
1201         root->defrag_trans_start = fs_info->generation;
1202         init_completion(&root->kobj_unregister);
1203         root->defrag_running = 0;
1204         root->root_key.objectid = objectid;
1205         root->anon_dev = 0;
1206
1207         spin_lock_init(&root->root_item_lock);
1208 }
1209
1210 static int __must_check find_and_setup_root(struct btrfs_root *tree_root,
1211                                             struct btrfs_fs_info *fs_info,
1212                                             u64 objectid,
1213                                             struct btrfs_root *root)
1214 {
1215         int ret;
1216         u32 blocksize;
1217         u64 generation;
1218
1219         __setup_root(tree_root->nodesize, tree_root->leafsize,
1220                      tree_root->sectorsize, tree_root->stripesize,
1221                      root, fs_info, objectid);
1222         ret = btrfs_find_last_root(tree_root, objectid,
1223                                    &root->root_item, &root->root_key);
1224         if (ret > 0)
1225                 return -ENOENT;
1226         else if (ret < 0)
1227                 return ret;
1228
1229         generation = btrfs_root_generation(&root->root_item);
1230         blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item));
1231         root->commit_root = NULL;
1232         root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
1233                                      blocksize, generation);
1234         if (!root->node || !btrfs_buffer_uptodate(root->node, generation, 0)) {
1235                 free_extent_buffer(root->node);
1236                 root->node = NULL;
1237                 return -EIO;
1238         }
1239         root->commit_root = btrfs_root_node(root);
1240         return 0;
1241 }
1242
1243 static struct btrfs_root *btrfs_alloc_root(struct btrfs_fs_info *fs_info)
1244 {
1245         struct btrfs_root *root = kzalloc(sizeof(*root), GFP_NOFS);
1246         if (root)
1247                 root->fs_info = fs_info;
1248         return root;
1249 }
1250
1251 struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
1252                                      struct btrfs_fs_info *fs_info,
1253                                      u64 objectid)
1254 {
1255         struct extent_buffer *leaf;
1256         struct btrfs_root *tree_root = fs_info->tree_root;
1257         struct btrfs_root *root;
1258         struct btrfs_key key;
1259         int ret = 0;
1260         u64 bytenr;
1261
1262         root = btrfs_alloc_root(fs_info);
1263         if (!root)
1264                 return ERR_PTR(-ENOMEM);
1265
1266         __setup_root(tree_root->nodesize, tree_root->leafsize,
1267                      tree_root->sectorsize, tree_root->stripesize,
1268                      root, fs_info, objectid);
1269         root->root_key.objectid = objectid;
1270         root->root_key.type = BTRFS_ROOT_ITEM_KEY;
1271         root->root_key.offset = 0;
1272
1273         leaf = btrfs_alloc_free_block(trans, root, root->leafsize,
1274                                       0, objectid, NULL, 0, 0, 0);
1275         if (IS_ERR(leaf)) {
1276                 ret = PTR_ERR(leaf);
1277                 goto fail;
1278         }
1279
1280         bytenr = leaf->start;
1281         memset_extent_buffer(leaf, 0, 0, sizeof(struct btrfs_header));
1282         btrfs_set_header_bytenr(leaf, leaf->start);
1283         btrfs_set_header_generation(leaf, trans->transid);
1284         btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV);
1285         btrfs_set_header_owner(leaf, objectid);
1286         root->node = leaf;
1287
1288         write_extent_buffer(leaf, fs_info->fsid,
1289                             (unsigned long)btrfs_header_fsid(leaf),
1290                             BTRFS_FSID_SIZE);
1291         write_extent_buffer(leaf, fs_info->chunk_tree_uuid,
1292                             (unsigned long)btrfs_header_chunk_tree_uuid(leaf),
1293                             BTRFS_UUID_SIZE);
1294         btrfs_mark_buffer_dirty(leaf);
1295
1296         root->commit_root = btrfs_root_node(root);
1297         root->track_dirty = 1;
1298
1299
1300         root->root_item.flags = 0;
1301         root->root_item.byte_limit = 0;
1302         btrfs_set_root_bytenr(&root->root_item, leaf->start);
1303         btrfs_set_root_generation(&root->root_item, trans->transid);
1304         btrfs_set_root_level(&root->root_item, 0);
1305         btrfs_set_root_refs(&root->root_item, 1);
1306         btrfs_set_root_used(&root->root_item, leaf->len);
1307         btrfs_set_root_last_snapshot(&root->root_item, 0);
1308         btrfs_set_root_dirid(&root->root_item, 0);
1309         root->root_item.drop_level = 0;
1310
1311         key.objectid = objectid;
1312         key.type = BTRFS_ROOT_ITEM_KEY;
1313         key.offset = 0;
1314         ret = btrfs_insert_root(trans, tree_root, &key, &root->root_item);
1315         if (ret)
1316                 goto fail;
1317
1318         btrfs_tree_unlock(leaf);
1319
1320 fail:
1321         if (ret)
1322                 return ERR_PTR(ret);
1323
1324         return root;
1325 }
1326
1327 static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans,
1328                                          struct btrfs_fs_info *fs_info)
1329 {
1330         struct btrfs_root *root;
1331         struct btrfs_root *tree_root = fs_info->tree_root;
1332         struct extent_buffer *leaf;
1333
1334         root = btrfs_alloc_root(fs_info);
1335         if (!root)
1336                 return ERR_PTR(-ENOMEM);
1337
1338         __setup_root(tree_root->nodesize, tree_root->leafsize,
1339                      tree_root->sectorsize, tree_root->stripesize,
1340                      root, fs_info, BTRFS_TREE_LOG_OBJECTID);
1341
1342         root->root_key.objectid = BTRFS_TREE_LOG_OBJECTID;
1343         root->root_key.type = BTRFS_ROOT_ITEM_KEY;
1344         root->root_key.offset = BTRFS_TREE_LOG_OBJECTID;
1345         /*
1346          * log trees do not get reference counted because they go away
1347          * before a real commit is actually done.  They do store pointers
1348          * to file data extents, and those reference counts still get
1349          * updated (along with back refs to the log tree).
1350          */
1351         root->ref_cows = 0;
1352
1353         leaf = btrfs_alloc_free_block(trans, root, root->leafsize, 0,
1354                                       BTRFS_TREE_LOG_OBJECTID, NULL,
1355                                       0, 0, 0);
1356         if (IS_ERR(leaf)) {
1357                 kfree(root);
1358                 return ERR_CAST(leaf);
1359         }
1360
1361         memset_extent_buffer(leaf, 0, 0, sizeof(struct btrfs_header));
1362         btrfs_set_header_bytenr(leaf, leaf->start);
1363         btrfs_set_header_generation(leaf, trans->transid);
1364         btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV);
1365         btrfs_set_header_owner(leaf, BTRFS_TREE_LOG_OBJECTID);
1366         root->node = leaf;
1367
1368         write_extent_buffer(root->node, root->fs_info->fsid,
1369                             (unsigned long)btrfs_header_fsid(root->node),
1370                             BTRFS_FSID_SIZE);
1371         btrfs_mark_buffer_dirty(root->node);
1372         btrfs_tree_unlock(root->node);
1373         return root;
1374 }
1375
1376 int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
1377                              struct btrfs_fs_info *fs_info)
1378 {
1379         struct btrfs_root *log_root;
1380
1381         log_root = alloc_log_tree(trans, fs_info);
1382         if (IS_ERR(log_root))
1383                 return PTR_ERR(log_root);
1384         WARN_ON(fs_info->log_root_tree);
1385         fs_info->log_root_tree = log_root;
1386         return 0;
1387 }
1388
1389 int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
1390                        struct btrfs_root *root)
1391 {
1392         struct btrfs_root *log_root;
1393         struct btrfs_inode_item *inode_item;
1394
1395         log_root = alloc_log_tree(trans, root->fs_info);
1396         if (IS_ERR(log_root))
1397                 return PTR_ERR(log_root);
1398
1399         log_root->last_trans = trans->transid;
1400         log_root->root_key.offset = root->root_key.objectid;
1401
1402         inode_item = &log_root->root_item.inode;
1403         inode_item->generation = cpu_to_le64(1);
1404         inode_item->size = cpu_to_le64(3);
1405         inode_item->nlink = cpu_to_le32(1);
1406         inode_item->nbytes = cpu_to_le64(root->leafsize);
1407         inode_item->mode = cpu_to_le32(S_IFDIR | 0755);
1408
1409         btrfs_set_root_node(&log_root->root_item, log_root->node);
1410
1411         WARN_ON(root->log_root);
1412         root->log_root = log_root;
1413         root->log_transid = 0;
1414         root->last_log_commit = 0;
1415         return 0;
1416 }
1417
1418 struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root,
1419                                                struct btrfs_key *location)
1420 {
1421         struct btrfs_root *root;
1422         struct btrfs_fs_info *fs_info = tree_root->fs_info;
1423         struct btrfs_path *path;
1424         struct extent_buffer *l;
1425         u64 generation;
1426         u32 blocksize;
1427         int ret = 0;
1428         int slot;
1429
1430         root = btrfs_alloc_root(fs_info);
1431         if (!root)
1432                 return ERR_PTR(-ENOMEM);
1433         if (location->offset == (u64)-1) {
1434                 ret = find_and_setup_root(tree_root, fs_info,
1435                                           location->objectid, root);
1436                 if (ret) {
1437                         kfree(root);
1438                         return ERR_PTR(ret);
1439                 }
1440                 goto out;
1441         }
1442
1443         __setup_root(tree_root->nodesize, tree_root->leafsize,
1444                      tree_root->sectorsize, tree_root->stripesize,
1445                      root, fs_info, location->objectid);
1446
1447         path = btrfs_alloc_path();
1448         if (!path) {
1449                 kfree(root);
1450                 return ERR_PTR(-ENOMEM);
1451         }
1452         ret = btrfs_search_slot(NULL, tree_root, location, path, 0, 0);
1453         if (ret == 0) {
1454                 l = path->nodes[0];
1455                 slot = path->slots[0];
1456                 btrfs_read_root_item(tree_root, l, slot, &root->root_item);
1457                 memcpy(&root->root_key, location, sizeof(*location));
1458         }
1459         btrfs_free_path(path);
1460         if (ret) {
1461                 kfree(root);
1462                 if (ret > 0)
1463                         ret = -ENOENT;
1464                 return ERR_PTR(ret);
1465         }
1466
1467         generation = btrfs_root_generation(&root->root_item);
1468         blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item));
1469         root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
1470                                      blocksize, generation);
1471         root->commit_root = btrfs_root_node(root);
1472         BUG_ON(!root->node); /* -ENOMEM */
1473 out:
1474         if (location->objectid != BTRFS_TREE_LOG_OBJECTID) {
1475                 root->ref_cows = 1;
1476                 btrfs_check_and_init_root_item(&root->root_item);
1477         }
1478
1479         return root;
1480 }
1481
1482 struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info,
1483                                               struct btrfs_key *location)
1484 {
1485         struct btrfs_root *root;
1486         int ret;
1487
1488         if (location->objectid == BTRFS_ROOT_TREE_OBJECTID)
1489                 return fs_info->tree_root;
1490         if (location->objectid == BTRFS_EXTENT_TREE_OBJECTID)
1491                 return fs_info->extent_root;
1492         if (location->objectid == BTRFS_CHUNK_TREE_OBJECTID)
1493                 return fs_info->chunk_root;
1494         if (location->objectid == BTRFS_DEV_TREE_OBJECTID)
1495                 return fs_info->dev_root;
1496         if (location->objectid == BTRFS_CSUM_TREE_OBJECTID)
1497                 return fs_info->csum_root;
1498         if (location->objectid == BTRFS_QUOTA_TREE_OBJECTID)
1499                 return fs_info->quota_root ? fs_info->quota_root :
1500                                              ERR_PTR(-ENOENT);
1501 again:
1502         spin_lock(&fs_info->fs_roots_radix_lock);
1503         root = radix_tree_lookup(&fs_info->fs_roots_radix,
1504                                  (unsigned long)location->objectid);
1505         spin_unlock(&fs_info->fs_roots_radix_lock);
1506         if (root)
1507                 return root;
1508
1509         root = btrfs_read_fs_root_no_radix(fs_info->tree_root, location);
1510         if (IS_ERR(root))
1511                 return root;
1512
1513         root->free_ino_ctl = kzalloc(sizeof(*root->free_ino_ctl), GFP_NOFS);
1514         root->free_ino_pinned = kzalloc(sizeof(*root->free_ino_pinned),
1515                                         GFP_NOFS);
1516         if (!root->free_ino_pinned || !root->free_ino_ctl) {
1517                 ret = -ENOMEM;
1518                 goto fail;
1519         }
1520
1521         btrfs_init_free_ino_ctl(root);
1522         mutex_init(&root->fs_commit_mutex);
1523         spin_lock_init(&root->cache_lock);
1524         init_waitqueue_head(&root->cache_wait);
1525
1526         ret = get_anon_bdev(&root->anon_dev);
1527         if (ret)
1528                 goto fail;
1529
1530         if (btrfs_root_refs(&root->root_item) == 0) {
1531                 ret = -ENOENT;
1532                 goto fail;
1533         }
1534
1535         ret = btrfs_find_orphan_item(fs_info->tree_root, location->objectid);
1536         if (ret < 0)
1537                 goto fail;
1538         if (ret == 0)
1539                 root->orphan_item_inserted = 1;
1540
1541         ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
1542         if (ret)
1543                 goto fail;
1544
1545         spin_lock(&fs_info->fs_roots_radix_lock);
1546         ret = radix_tree_insert(&fs_info->fs_roots_radix,
1547                                 (unsigned long)root->root_key.objectid,
1548                                 root);
1549         if (ret == 0)
1550                 root->in_radix = 1;
1551
1552         spin_unlock(&fs_info->fs_roots_radix_lock);
1553         radix_tree_preload_end();
1554         if (ret) {
1555                 if (ret == -EEXIST) {
1556                         free_fs_root(root);
1557                         goto again;
1558                 }
1559                 goto fail;
1560         }
1561
1562         ret = btrfs_find_dead_roots(fs_info->tree_root,
1563                                     root->root_key.objectid);
1564         WARN_ON(ret);
1565         return root;
1566 fail:
1567         free_fs_root(root);
1568         return ERR_PTR(ret);
1569 }
1570
1571 static int btrfs_congested_fn(void *congested_data, int bdi_bits)
1572 {
1573         struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data;
1574         int ret = 0;
1575         struct btrfs_device *device;
1576         struct backing_dev_info *bdi;
1577
1578         rcu_read_lock();
1579         list_for_each_entry_rcu(device, &info->fs_devices->devices, dev_list) {
1580                 if (!device->bdev)
1581                         continue;
1582                 bdi = blk_get_backing_dev_info(device->bdev);
1583                 if (bdi && bdi_congested(bdi, bdi_bits)) {
1584                         ret = 1;
1585                         break;
1586                 }
1587         }
1588         rcu_read_unlock();
1589         return ret;
1590 }
1591
1592 /*
1593  * If this fails, caller must call bdi_destroy() to get rid of the
1594  * bdi again.
1595  */
1596 static int setup_bdi(struct btrfs_fs_info *info, struct backing_dev_info *bdi)
1597 {
1598         int err;
1599
1600         bdi->capabilities = BDI_CAP_MAP_COPY;
1601         err = bdi_setup_and_register(bdi, "btrfs", BDI_CAP_MAP_COPY);
1602         if (err)
1603                 return err;
1604
1605         bdi->ra_pages   = default_backing_dev_info.ra_pages;
1606         bdi->congested_fn       = btrfs_congested_fn;
1607         bdi->congested_data     = info;
1608         return 0;
1609 }
1610
1611 /*
1612  * called by the kthread helper functions to finally call the bio end_io
1613  * functions.  This is where read checksum verification actually happens
1614  */
1615 static void end_workqueue_fn(struct btrfs_work *work)
1616 {
1617         struct bio *bio;
1618         struct end_io_wq *end_io_wq;
1619         struct btrfs_fs_info *fs_info;
1620         int error;
1621
1622         end_io_wq = container_of(work, struct end_io_wq, work);
1623         bio = end_io_wq->bio;
1624         fs_info = end_io_wq->info;
1625
1626         error = end_io_wq->error;
1627         bio->bi_private = end_io_wq->private;
1628         bio->bi_end_io = end_io_wq->end_io;
1629         kfree(end_io_wq);
1630         bio_endio(bio, error);
1631 }
1632
1633 static int cleaner_kthread(void *arg)
1634 {
1635         struct btrfs_root *root = arg;
1636
1637         do {
1638                 if (!(root->fs_info->sb->s_flags & MS_RDONLY) &&
1639                     mutex_trylock(&root->fs_info->cleaner_mutex)) {
1640                         btrfs_run_delayed_iputs(root);
1641                         btrfs_clean_old_snapshots(root);
1642                         mutex_unlock(&root->fs_info->cleaner_mutex);
1643                         btrfs_run_defrag_inodes(root->fs_info);
1644                 }
1645
1646                 if (!try_to_freeze()) {
1647                         set_current_state(TASK_INTERRUPTIBLE);
1648                         if (!kthread_should_stop())
1649                                 schedule();
1650                         __set_current_state(TASK_RUNNING);
1651                 }
1652         } while (!kthread_should_stop());
1653         return 0;
1654 }
1655
1656 static int transaction_kthread(void *arg)
1657 {
1658         struct btrfs_root *root = arg;
1659         struct btrfs_trans_handle *trans;
1660         struct btrfs_transaction *cur;
1661         u64 transid;
1662         unsigned long now;
1663         unsigned long delay;
1664         bool cannot_commit;
1665
1666         do {
1667                 cannot_commit = false;
1668                 delay = HZ * 30;
1669                 mutex_lock(&root->fs_info->transaction_kthread_mutex);
1670
1671                 spin_lock(&root->fs_info->trans_lock);
1672                 cur = root->fs_info->running_transaction;
1673                 if (!cur) {
1674                         spin_unlock(&root->fs_info->trans_lock);
1675                         goto sleep;
1676                 }
1677
1678                 now = get_seconds();
1679                 if (!cur->blocked &&
1680                     (now < cur->start_time || now - cur->start_time < 30)) {
1681                         spin_unlock(&root->fs_info->trans_lock);
1682                         delay = HZ * 5;
1683                         goto sleep;
1684                 }
1685                 transid = cur->transid;
1686                 spin_unlock(&root->fs_info->trans_lock);
1687
1688                 /* If the file system is aborted, this will always fail. */
1689                 trans = btrfs_attach_transaction(root);
1690                 if (IS_ERR(trans)) {
1691                         if (PTR_ERR(trans) != -ENOENT)
1692                                 cannot_commit = true;
1693                         goto sleep;
1694                 }
1695                 if (transid == trans->transid) {
1696                         btrfs_commit_transaction(trans, root);
1697                 } else {
1698                         btrfs_end_transaction(trans, root);
1699                 }
1700 sleep:
1701                 wake_up_process(root->fs_info->cleaner_kthread);
1702                 mutex_unlock(&root->fs_info->transaction_kthread_mutex);
1703
1704                 if (!try_to_freeze()) {
1705                         set_current_state(TASK_INTERRUPTIBLE);
1706                         if (!kthread_should_stop() &&
1707                             (!btrfs_transaction_blocked(root->fs_info) ||
1708                              cannot_commit))
1709                                 schedule_timeout(delay);
1710                         __set_current_state(TASK_RUNNING);
1711                 }
1712         } while (!kthread_should_stop());
1713         return 0;
1714 }
1715
1716 /*
1717  * this will find the highest generation in the array of
1718  * root backups.  The index of the highest array is returned,
1719  * or -1 if we can't find anything.
1720  *
1721  * We check to make sure the array is valid by comparing the
1722  * generation of the latest  root in the array with the generation
1723  * in the super block.  If they don't match we pitch it.
1724  */
1725 static int find_newest_super_backup(struct btrfs_fs_info *info, u64 newest_gen)
1726 {
1727         u64 cur;
1728         int newest_index = -1;
1729         struct btrfs_root_backup *root_backup;
1730         int i;
1731
1732         for (i = 0; i < BTRFS_NUM_BACKUP_ROOTS; i++) {
1733                 root_backup = info->super_copy->super_roots + i;
1734                 cur = btrfs_backup_tree_root_gen(root_backup);
1735                 if (cur == newest_gen)
1736                         newest_index = i;
1737         }
1738
1739         /* check to see if we actually wrapped around */
1740         if (newest_index == BTRFS_NUM_BACKUP_ROOTS - 1) {
1741                 root_backup = info->super_copy->super_roots;
1742                 cur = btrfs_backup_tree_root_gen(root_backup);
1743                 if (cur == newest_gen)
1744                         newest_index = 0;
1745         }
1746         return newest_index;
1747 }
1748
1749
1750 /*
1751  * find the oldest backup so we know where to store new entries
1752  * in the backup array.  This will set the backup_root_index
1753  * field in the fs_info struct
1754  */
1755 static void find_oldest_super_backup(struct btrfs_fs_info *info,
1756                                      u64 newest_gen)
1757 {
1758         int newest_index = -1;
1759
1760         newest_index = find_newest_super_backup(info, newest_gen);
1761         /* if there was garbage in there, just move along */
1762         if (newest_index == -1) {
1763                 info->backup_root_index = 0;
1764         } else {
1765                 info->backup_root_index = (newest_index + 1) % BTRFS_NUM_BACKUP_ROOTS;
1766         }
1767 }
1768
1769 /*
1770  * copy all the root pointers into the super backup array.
1771  * this will bump the backup pointer by one when it is
1772  * done
1773  */
1774 static void backup_super_roots(struct btrfs_fs_info *info)
1775 {
1776         int next_backup;
1777         struct btrfs_root_backup *root_backup;
1778         int last_backup;
1779
1780         next_backup = info->backup_root_index;
1781         last_backup = (next_backup + BTRFS_NUM_BACKUP_ROOTS - 1) %
1782                 BTRFS_NUM_BACKUP_ROOTS;
1783
1784         /*
1785          * just overwrite the last backup if we're at the same generation
1786          * this happens only at umount
1787          */
1788         root_backup = info->super_for_commit->super_roots + last_backup;
1789         if (btrfs_backup_tree_root_gen(root_backup) ==
1790             btrfs_header_generation(info->tree_root->node))
1791                 next_backup = last_backup;
1792
1793         root_backup = info->super_for_commit->super_roots + next_backup;
1794
1795         /*
1796          * make sure all of our padding and empty slots get zero filled
1797          * regardless of which ones we use today
1798          */
1799         memset(root_backup, 0, sizeof(*root_backup));
1800
1801         info->backup_root_index = (next_backup + 1) % BTRFS_NUM_BACKUP_ROOTS;
1802
1803         btrfs_set_backup_tree_root(root_backup, info->tree_root->node->start);
1804         btrfs_set_backup_tree_root_gen(root_backup,
1805                                btrfs_header_generation(info->tree_root->node));
1806
1807         btrfs_set_backup_tree_root_level(root_backup,
1808                                btrfs_header_level(info->tree_root->node));
1809
1810         btrfs_set_backup_chunk_root(root_backup, info->chunk_root->node->start);
1811         btrfs_set_backup_chunk_root_gen(root_backup,
1812                                btrfs_header_generation(info->chunk_root->node));
1813         btrfs_set_backup_chunk_root_level(root_backup,
1814                                btrfs_header_level(info->chunk_root->node));
1815
1816         btrfs_set_backup_extent_root(root_backup, info->extent_root->node->start);
1817         btrfs_set_backup_extent_root_gen(root_backup,
1818                                btrfs_header_generation(info->extent_root->node));
1819         btrfs_set_backup_extent_root_level(root_backup,
1820                                btrfs_header_level(info->extent_root->node));
1821
1822         /*
1823          * we might commit during log recovery, which happens before we set
1824          * the fs_root.  Make sure it is valid before we fill it in.
1825          */
1826         if (info->fs_root && info->fs_root->node) {
1827                 btrfs_set_backup_fs_root(root_backup,
1828                                          info->fs_root->node->start);
1829                 btrfs_set_backup_fs_root_gen(root_backup,
1830                                btrfs_header_generation(info->fs_root->node));
1831                 btrfs_set_backup_fs_root_level(root_backup,
1832                                btrfs_header_level(info->fs_root->node));
1833         }
1834
1835         btrfs_set_backup_dev_root(root_backup, info->dev_root->node->start);
1836         btrfs_set_backup_dev_root_gen(root_backup,
1837                                btrfs_header_generation(info->dev_root->node));
1838         btrfs_set_backup_dev_root_level(root_backup,
1839                                        btrfs_header_level(info->dev_root->node));
1840
1841         btrfs_set_backup_csum_root(root_backup, info->csum_root->node->start);
1842         btrfs_set_backup_csum_root_gen(root_backup,
1843                                btrfs_header_generation(info->csum_root->node));
1844         btrfs_set_backup_csum_root_level(root_backup,
1845                                btrfs_header_level(info->csum_root->node));
1846
1847         btrfs_set_backup_total_bytes(root_backup,
1848                              btrfs_super_total_bytes(info->super_copy));
1849         btrfs_set_backup_bytes_used(root_backup,
1850                              btrfs_super_bytes_used(info->super_copy));
1851         btrfs_set_backup_num_devices(root_backup,
1852                              btrfs_super_num_devices(info->super_copy));
1853
1854         /*
1855          * if we don't copy this out to the super_copy, it won't get remembered
1856          * for the next commit
1857          */
1858         memcpy(&info->super_copy->super_roots,
1859                &info->super_for_commit->super_roots,
1860                sizeof(*root_backup) * BTRFS_NUM_BACKUP_ROOTS);
1861 }
1862
1863 /*
1864  * this copies info out of the root backup array and back into
1865  * the in-memory super block.  It is meant to help iterate through
1866  * the array, so you send it the number of backups you've already
1867  * tried and the last backup index you used.
1868  *
1869  * this returns -1 when it has tried all the backups
1870  */
1871 static noinline int next_root_backup(struct btrfs_fs_info *info,
1872                                      struct btrfs_super_block *super,
1873                                      int *num_backups_tried, int *backup_index)
1874 {
1875         struct btrfs_root_backup *root_backup;
1876         int newest = *backup_index;
1877
1878         if (*num_backups_tried == 0) {
1879                 u64 gen = btrfs_super_generation(super);
1880
1881                 newest = find_newest_super_backup(info, gen);
1882                 if (newest == -1)
1883                         return -1;
1884
1885                 *backup_index = newest;
1886                 *num_backups_tried = 1;
1887         } else if (*num_backups_tried == BTRFS_NUM_BACKUP_ROOTS) {
1888                 /* we've tried all the backups, all done */
1889                 return -1;
1890         } else {
1891                 /* jump to the next oldest backup */
1892                 newest = (*backup_index + BTRFS_NUM_BACKUP_ROOTS - 1) %
1893                         BTRFS_NUM_BACKUP_ROOTS;
1894                 *backup_index = newest;
1895                 *num_backups_tried += 1;
1896         }
1897         root_backup = super->super_roots + newest;
1898
1899         btrfs_set_super_generation(super,
1900                                    btrfs_backup_tree_root_gen(root_backup));
1901         btrfs_set_super_root(super, btrfs_backup_tree_root(root_backup));
1902         btrfs_set_super_root_level(super,
1903                                    btrfs_backup_tree_root_level(root_backup));
1904         btrfs_set_super_bytes_used(super, btrfs_backup_bytes_used(root_backup));
1905
1906         /*
1907          * fixme: the total bytes and num_devices need to match or we should
1908          * need a fsck
1909          */
1910         btrfs_set_super_total_bytes(super, btrfs_backup_total_bytes(root_backup));
1911         btrfs_set_super_num_devices(super, btrfs_backup_num_devices(root_backup));
1912         return 0;
1913 }
1914
1915 /* helper to cleanup tree roots */
1916 static void free_root_pointers(struct btrfs_fs_info *info, int chunk_root)
1917 {
1918         free_extent_buffer(info->tree_root->node);
1919         free_extent_buffer(info->tree_root->commit_root);
1920         free_extent_buffer(info->dev_root->node);
1921         free_extent_buffer(info->dev_root->commit_root);
1922         free_extent_buffer(info->extent_root->node);
1923         free_extent_buffer(info->extent_root->commit_root);
1924         free_extent_buffer(info->csum_root->node);
1925         free_extent_buffer(info->csum_root->commit_root);
1926         if (info->quota_root) {
1927                 free_extent_buffer(info->quota_root->node);
1928                 free_extent_buffer(info->quota_root->commit_root);
1929         }
1930
1931         info->tree_root->node = NULL;
1932         info->tree_root->commit_root = NULL;
1933         info->dev_root->node = NULL;
1934         info->dev_root->commit_root = NULL;
1935         info->extent_root->node = NULL;
1936         info->extent_root->commit_root = NULL;
1937         info->csum_root->node = NULL;
1938         info->csum_root->commit_root = NULL;
1939         if (info->quota_root) {
1940                 info->quota_root->node = NULL;
1941                 info->quota_root->commit_root = NULL;
1942         }
1943
1944         if (chunk_root) {
1945                 free_extent_buffer(info->chunk_root->node);
1946                 free_extent_buffer(info->chunk_root->commit_root);
1947                 info->chunk_root->node = NULL;
1948                 info->chunk_root->commit_root = NULL;
1949         }
1950 }
1951
1952
1953 int open_ctree(struct super_block *sb,
1954                struct btrfs_fs_devices *fs_devices,
1955                char *options)
1956 {
1957         u32 sectorsize;
1958         u32 nodesize;
1959         u32 leafsize;
1960         u32 blocksize;
1961         u32 stripesize;
1962         u64 generation;
1963         u64 features;
1964         struct btrfs_key location;
1965         struct buffer_head *bh;
1966         struct btrfs_super_block *disk_super;
1967         struct btrfs_fs_info *fs_info = btrfs_sb(sb);
1968         struct btrfs_root *tree_root;
1969         struct btrfs_root *extent_root;
1970         struct btrfs_root *csum_root;
1971         struct btrfs_root *chunk_root;
1972         struct btrfs_root *dev_root;
1973         struct btrfs_root *quota_root;
1974         struct btrfs_root *log_tree_root;
1975         int ret;
1976         int err = -EINVAL;
1977         int num_backups_tried = 0;
1978         int backup_index = 0;
1979
1980         tree_root = fs_info->tree_root = btrfs_alloc_root(fs_info);
1981         extent_root = fs_info->extent_root = btrfs_alloc_root(fs_info);
1982         csum_root = fs_info->csum_root = btrfs_alloc_root(fs_info);
1983         chunk_root = fs_info->chunk_root = btrfs_alloc_root(fs_info);
1984         dev_root = fs_info->dev_root = btrfs_alloc_root(fs_info);
1985         quota_root = fs_info->quota_root = btrfs_alloc_root(fs_info);
1986
1987         if (!tree_root || !extent_root || !csum_root ||
1988             !chunk_root || !dev_root || !quota_root) {
1989                 err = -ENOMEM;
1990                 goto fail;
1991         }
1992
1993         ret = init_srcu_struct(&fs_info->subvol_srcu);
1994         if (ret) {
1995                 err = ret;
1996                 goto fail;
1997         }
1998
1999         ret = setup_bdi(fs_info, &fs_info->bdi);
2000         if (ret) {
2001                 err = ret;
2002                 goto fail_srcu;
2003         }
2004
2005         ret = percpu_counter_init(&fs_info->dirty_metadata_bytes, 0);
2006         if (ret) {
2007                 err = ret;
2008                 goto fail_bdi;
2009         }
2010         fs_info->dirty_metadata_batch = PAGE_CACHE_SIZE *
2011                                         (1 + ilog2(nr_cpu_ids));
2012
2013         ret = percpu_counter_init(&fs_info->delalloc_bytes, 0);
2014         if (ret) {
2015                 err = ret;
2016                 goto fail_dirty_metadata_bytes;
2017         }
2018
2019         fs_info->btree_inode = new_inode(sb);
2020         if (!fs_info->btree_inode) {
2021                 err = -ENOMEM;
2022                 goto fail_delalloc_bytes;
2023         }
2024
2025         mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS);
2026
2027         INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC);
2028         INIT_LIST_HEAD(&fs_info->trans_list);
2029         INIT_LIST_HEAD(&fs_info->dead_roots);
2030         INIT_LIST_HEAD(&fs_info->delayed_iputs);
2031         INIT_LIST_HEAD(&fs_info->delalloc_inodes);
2032         INIT_LIST_HEAD(&fs_info->ordered_operations);
2033         INIT_LIST_HEAD(&fs_info->caching_block_groups);
2034         spin_lock_init(&fs_info->delalloc_lock);
2035         spin_lock_init(&fs_info->trans_lock);
2036         spin_lock_init(&fs_info->fs_roots_radix_lock);
2037         spin_lock_init(&fs_info->delayed_iput_lock);
2038         spin_lock_init(&fs_info->defrag_inodes_lock);
2039         spin_lock_init(&fs_info->free_chunk_lock);
2040         spin_lock_init(&fs_info->tree_mod_seq_lock);
2041         rwlock_init(&fs_info->tree_mod_log_lock);
2042         mutex_init(&fs_info->reloc_mutex);
2043
2044         init_completion(&fs_info->kobj_unregister);
2045         INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
2046         INIT_LIST_HEAD(&fs_info->space_info);
2047         INIT_LIST_HEAD(&fs_info->tree_mod_seq_list);
2048         btrfs_mapping_init(&fs_info->mapping_tree);
2049         btrfs_init_block_rsv(&fs_info->global_block_rsv,
2050                              BTRFS_BLOCK_RSV_GLOBAL);
2051         btrfs_init_block_rsv(&fs_info->delalloc_block_rsv,
2052                              BTRFS_BLOCK_RSV_DELALLOC);
2053         btrfs_init_block_rsv(&fs_info->trans_block_rsv, BTRFS_BLOCK_RSV_TRANS);
2054         btrfs_init_block_rsv(&fs_info->chunk_block_rsv, BTRFS_BLOCK_RSV_CHUNK);
2055         btrfs_init_block_rsv(&fs_info->empty_block_rsv, BTRFS_BLOCK_RSV_EMPTY);
2056         btrfs_init_block_rsv(&fs_info->delayed_block_rsv,
2057                              BTRFS_BLOCK_RSV_DELOPS);
2058         atomic_set(&fs_info->nr_async_submits, 0);
2059         atomic_set(&fs_info->async_delalloc_pages, 0);
2060         atomic_set(&fs_info->async_submit_draining, 0);
2061         atomic_set(&fs_info->nr_async_bios, 0);
2062         atomic_set(&fs_info->defrag_running, 0);
2063         atomic_set(&fs_info->tree_mod_seq, 0);
2064         fs_info->sb = sb;
2065         fs_info->max_inline = 8192 * 1024;
2066         fs_info->metadata_ratio = 0;
2067         fs_info->defrag_inodes = RB_ROOT;
2068         fs_info->trans_no_join = 0;
2069         fs_info->free_chunk_space = 0;
2070         fs_info->tree_mod_log = RB_ROOT;
2071
2072         /* readahead state */
2073         INIT_RADIX_TREE(&fs_info->reada_tree, GFP_NOFS & ~__GFP_WAIT);
2074         spin_lock_init(&fs_info->reada_lock);
2075
2076         fs_info->thread_pool_size = min_t(unsigned long,
2077                                           num_online_cpus() + 2, 8);
2078
2079         INIT_LIST_HEAD(&fs_info->ordered_extents);
2080         spin_lock_init(&fs_info->ordered_extent_lock);
2081         fs_info->delayed_root = kmalloc(sizeof(struct btrfs_delayed_root),
2082                                         GFP_NOFS);
2083         if (!fs_info->delayed_root) {
2084                 err = -ENOMEM;
2085                 goto fail_iput;
2086         }
2087         btrfs_init_delayed_root(fs_info->delayed_root);
2088
2089         mutex_init(&fs_info->scrub_lock);
2090         atomic_set(&fs_info->scrubs_running, 0);
2091         atomic_set(&fs_info->scrub_pause_req, 0);
2092         atomic_set(&fs_info->scrubs_paused, 0);
2093         atomic_set(&fs_info->scrub_cancel_req, 0);
2094         init_waitqueue_head(&fs_info->scrub_pause_wait);
2095         init_rwsem(&fs_info->scrub_super_lock);
2096         fs_info->scrub_workers_refcnt = 0;
2097 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
2098         fs_info->check_integrity_print_mask = 0;
2099 #endif
2100
2101         spin_lock_init(&fs_info->balance_lock);
2102         mutex_init(&fs_info->balance_mutex);
2103         atomic_set(&fs_info->balance_running, 0);
2104         atomic_set(&fs_info->balance_pause_req, 0);
2105         atomic_set(&fs_info->balance_cancel_req, 0);
2106         fs_info->balance_ctl = NULL;
2107         init_waitqueue_head(&fs_info->balance_wait_q);
2108
2109         sb->s_blocksize = 4096;
2110         sb->s_blocksize_bits = blksize_bits(4096);
2111         sb->s_bdi = &fs_info->bdi;
2112
2113         fs_info->btree_inode->i_ino = BTRFS_BTREE_INODE_OBJECTID;
2114         set_nlink(fs_info->btree_inode, 1);
2115         /*
2116          * we set the i_size on the btree inode to the max possible int.
2117          * the real end of the address space is determined by all of
2118          * the devices in the system
2119          */
2120         fs_info->btree_inode->i_size = OFFSET_MAX;
2121         fs_info->btree_inode->i_mapping->a_ops = &btree_aops;
2122         fs_info->btree_inode->i_mapping->backing_dev_info = &fs_info->bdi;
2123
2124         RB_CLEAR_NODE(&BTRFS_I(fs_info->btree_inode)->rb_node);
2125         extent_io_tree_init(&BTRFS_I(fs_info->btree_inode)->io_tree,
2126                              fs_info->btree_inode->i_mapping);
2127         BTRFS_I(fs_info->btree_inode)->io_tree.track_uptodate = 0;
2128         extent_map_tree_init(&BTRFS_I(fs_info->btree_inode)->extent_tree);
2129
2130         BTRFS_I(fs_info->btree_inode)->io_tree.ops = &btree_extent_io_ops;
2131
2132         BTRFS_I(fs_info->btree_inode)->root = tree_root;
2133         memset(&BTRFS_I(fs_info->btree_inode)->location, 0,
2134                sizeof(struct btrfs_key));
2135         set_bit(BTRFS_INODE_DUMMY,
2136                 &BTRFS_I(fs_info->btree_inode)->runtime_flags);
2137         insert_inode_hash(fs_info->btree_inode);
2138
2139         spin_lock_init(&fs_info->block_group_cache_lock);
2140         fs_info->block_group_cache_tree = RB_ROOT;
2141         fs_info->first_logical_byte = (u64)-1;
2142
2143         extent_io_tree_init(&fs_info->freed_extents[0],
2144                              fs_info->btree_inode->i_mapping);
2145         extent_io_tree_init(&fs_info->freed_extents[1],
2146                              fs_info->btree_inode->i_mapping);
2147         fs_info->pinned_extents = &fs_info->freed_extents[0];
2148         fs_info->do_barriers = 1;
2149
2150
2151         mutex_init(&fs_info->ordered_operations_mutex);
2152         mutex_init(&fs_info->tree_log_mutex);
2153         mutex_init(&fs_info->chunk_mutex);
2154         mutex_init(&fs_info->transaction_kthread_mutex);
2155         mutex_init(&fs_info->cleaner_mutex);
2156         mutex_init(&fs_info->volume_mutex);
2157         init_rwsem(&fs_info->extent_commit_sem);
2158         init_rwsem(&fs_info->cleanup_work_sem);
2159         init_rwsem(&fs_info->subvol_sem);
2160         fs_info->dev_replace.lock_owner = 0;
2161         atomic_set(&fs_info->dev_replace.nesting_level, 0);
2162         mutex_init(&fs_info->dev_replace.lock_finishing_cancel_unmount);
2163         mutex_init(&fs_info->dev_replace.lock_management_lock);
2164         mutex_init(&fs_info->dev_replace.lock);
2165
2166         spin_lock_init(&fs_info->qgroup_lock);
2167         fs_info->qgroup_tree = RB_ROOT;
2168         INIT_LIST_HEAD(&fs_info->dirty_qgroups);
2169         fs_info->qgroup_seq = 1;
2170         fs_info->quota_enabled = 0;
2171         fs_info->pending_quota_state = 0;
2172
2173         btrfs_init_free_cluster(&fs_info->meta_alloc_cluster);
2174         btrfs_init_free_cluster(&fs_info->data_alloc_cluster);
2175
2176         init_waitqueue_head(&fs_info->transaction_throttle);
2177         init_waitqueue_head(&fs_info->transaction_wait);
2178         init_waitqueue_head(&fs_info->transaction_blocked_wait);
2179         init_waitqueue_head(&fs_info->async_submit_wait);
2180
2181         __setup_root(4096, 4096, 4096, 4096, tree_root,
2182                      fs_info, BTRFS_ROOT_TREE_OBJECTID);
2183
2184         invalidate_bdev(fs_devices->latest_bdev);
2185         bh = btrfs_read_dev_super(fs_devices->latest_bdev);
2186         if (!bh) {
2187                 err = -EINVAL;
2188                 goto fail_alloc;
2189         }
2190
2191         memcpy(fs_info->super_copy, bh->b_data, sizeof(*fs_info->super_copy));
2192         memcpy(fs_info->super_for_commit, fs_info->super_copy,
2193                sizeof(*fs_info->super_for_commit));
2194         brelse(bh);
2195
2196         memcpy(fs_info->fsid, fs_info->super_copy->fsid, BTRFS_FSID_SIZE);
2197
2198         disk_super = fs_info->super_copy;
2199         if (!btrfs_super_root(disk_super))
2200                 goto fail_alloc;
2201
2202         /* check FS state, whether FS is broken. */
2203         fs_info->fs_state |= btrfs_super_flags(disk_super);
2204
2205         ret = btrfs_check_super_valid(fs_info, sb->s_flags & MS_RDONLY);
2206         if (ret) {
2207                 printk(KERN_ERR "btrfs: superblock contains fatal errors\n");
2208                 err = ret;
2209                 goto fail_alloc;
2210         }
2211
2212         /*
2213          * run through our array of backup supers and setup
2214          * our ring pointer to the oldest one
2215          */
2216         generation = btrfs_super_generation(disk_super);
2217         find_oldest_super_backup(fs_info, generation);
2218
2219         /*
2220          * In the long term, we'll store the compression type in the super
2221          * block, and it'll be used for per file compression control.
2222          */
2223         fs_info->compress_type = BTRFS_COMPRESS_ZLIB;
2224
2225         ret = btrfs_parse_options(tree_root, options);
2226         if (ret) {
2227                 err = ret;
2228                 goto fail_alloc;
2229         }
2230
2231         features = btrfs_super_incompat_flags(disk_super) &
2232                 ~BTRFS_FEATURE_INCOMPAT_SUPP;
2233         if (features) {
2234                 printk(KERN_ERR "BTRFS: couldn't mount because of "
2235                        "unsupported optional features (%Lx).\n",
2236                        (unsigned long long)features);
2237                 err = -EINVAL;
2238                 goto fail_alloc;
2239         }
2240
2241         if (btrfs_super_leafsize(disk_super) !=
2242             btrfs_super_nodesize(disk_super)) {
2243                 printk(KERN_ERR "BTRFS: couldn't mount because metadata "
2244                        "blocksizes don't match.  node %d leaf %d\n",
2245                        btrfs_super_nodesize(disk_super),
2246                        btrfs_super_leafsize(disk_super));
2247                 err = -EINVAL;
2248                 goto fail_alloc;
2249         }
2250         if (btrfs_super_leafsize(disk_super) > BTRFS_MAX_METADATA_BLOCKSIZE) {
2251                 printk(KERN_ERR "BTRFS: couldn't mount because metadata "
2252                        "blocksize (%d) was too large\n",
2253                        btrfs_super_leafsize(disk_super));
2254                 err = -EINVAL;
2255                 goto fail_alloc;
2256         }
2257
2258         features = btrfs_super_incompat_flags(disk_super);
2259         features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF;
2260         if (tree_root->fs_info->compress_type == BTRFS_COMPRESS_LZO)
2261                 features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO;
2262
2263         /*
2264          * flag our filesystem as having big metadata blocks if
2265          * they are bigger than the page size
2266          */
2267         if (btrfs_super_leafsize(disk_super) > PAGE_CACHE_SIZE) {
2268                 if (!(features & BTRFS_FEATURE_INCOMPAT_BIG_METADATA))
2269                         printk(KERN_INFO "btrfs flagging fs with big metadata feature\n");
2270                 features |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA;
2271         }
2272
2273         nodesize = btrfs_super_nodesize(disk_super);
2274         leafsize = btrfs_super_leafsize(disk_super);
2275         sectorsize = btrfs_super_sectorsize(disk_super);
2276         stripesize = btrfs_super_stripesize(disk_super);
2277         fs_info->dirty_metadata_batch = leafsize * (1 + ilog2(nr_cpu_ids));
2278         fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids));
2279
2280         /*
2281          * mixed block groups end up with duplicate but slightly offset
2282          * extent buffers for the same range.  It leads to corruptions
2283          */
2284         if ((features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) &&
2285             (sectorsize != leafsize)) {
2286                 printk(KERN_WARNING "btrfs: unequal leaf/node/sector sizes "
2287                                 "are not allowed for mixed block groups on %s\n",
2288                                 sb->s_id);
2289                 goto fail_alloc;
2290         }
2291
2292         btrfs_set_super_incompat_flags(disk_super, features);
2293
2294         features = btrfs_super_compat_ro_flags(disk_super) &
2295                 ~BTRFS_FEATURE_COMPAT_RO_SUPP;
2296         if (!(sb->s_flags & MS_RDONLY) && features) {
2297                 printk(KERN_ERR "BTRFS: couldn't mount RDWR because of "
2298                        "unsupported option features (%Lx).\n",
2299                        (unsigned long long)features);
2300                 err = -EINVAL;
2301                 goto fail_alloc;
2302         }
2303
2304         btrfs_init_workers(&fs_info->generic_worker,
2305                            "genwork", 1, NULL);
2306
2307         btrfs_init_workers(&fs_info->workers, "worker",
2308                            fs_info->thread_pool_size,
2309                            &fs_info->generic_worker);
2310
2311         btrfs_init_workers(&fs_info->delalloc_workers, "delalloc",
2312                            fs_info->thread_pool_size,
2313                            &fs_info->generic_worker);
2314
2315         btrfs_init_workers(&fs_info->flush_workers, "flush_delalloc",
2316                            fs_info->thread_pool_size,
2317                            &fs_info->generic_worker);
2318
2319         btrfs_init_workers(&fs_info->submit_workers, "submit",
2320                            min_t(u64, fs_devices->num_devices,
2321                            fs_info->thread_pool_size),
2322                            &fs_info->generic_worker);
2323
2324         btrfs_init_workers(&fs_info->caching_workers, "cache",
2325                            2, &fs_info->generic_worker);
2326
2327         /* a higher idle thresh on the submit workers makes it much more
2328          * likely that bios will be send down in a sane order to the
2329          * devices
2330          */
2331         fs_info->submit_workers.idle_thresh = 64;
2332
2333         fs_info->workers.idle_thresh = 16;
2334         fs_info->workers.ordered = 1;
2335
2336         fs_info->delalloc_workers.idle_thresh = 2;
2337         fs_info->delalloc_workers.ordered = 1;
2338
2339         btrfs_init_workers(&fs_info->fixup_workers, "fixup", 1,
2340                            &fs_info->generic_worker);
2341         btrfs_init_workers(&fs_info->endio_workers, "endio",
2342                            fs_info->thread_pool_size,
2343                            &fs_info->generic_worker);
2344         btrfs_init_workers(&fs_info->endio_meta_workers, "endio-meta",
2345                            fs_info->thread_pool_size,
2346                            &fs_info->generic_worker);
2347         btrfs_init_workers(&fs_info->endio_meta_write_workers,
2348                            "endio-meta-write", fs_info->thread_pool_size,
2349                            &fs_info->generic_worker);
2350         btrfs_init_workers(&fs_info->endio_write_workers, "endio-write",
2351                            fs_info->thread_pool_size,
2352                            &fs_info->generic_worker);
2353         btrfs_init_workers(&fs_info->endio_freespace_worker, "freespace-write",
2354                            1, &fs_info->generic_worker);
2355         btrfs_init_workers(&fs_info->delayed_workers, "delayed-meta",
2356                            fs_info->thread_pool_size,
2357                            &fs_info->generic_worker);
2358         btrfs_init_workers(&fs_info->readahead_workers, "readahead",
2359                            fs_info->thread_pool_size,
2360                            &fs_info->generic_worker);
2361
2362         /*
2363          * endios are largely parallel and should have a very
2364          * low idle thresh
2365          */
2366         fs_info->endio_workers.idle_thresh = 4;
2367         fs_info->endio_meta_workers.idle_thresh = 4;
2368
2369         fs_info->endio_write_workers.idle_thresh = 2;
2370         fs_info->endio_meta_write_workers.idle_thresh = 2;
2371         fs_info->readahead_workers.idle_thresh = 2;
2372
2373         /*
2374          * btrfs_start_workers can really only fail because of ENOMEM so just
2375          * return -ENOMEM if any of these fail.
2376          */
2377         ret = btrfs_start_workers(&fs_info->workers);
2378         ret |= btrfs_start_workers(&fs_info->generic_worker);
2379         ret |= btrfs_start_workers(&fs_info->submit_workers);
2380         ret |= btrfs_start_workers(&fs_info->delalloc_workers);
2381         ret |= btrfs_start_workers(&fs_info->fixup_workers);
2382         ret |= btrfs_start_workers(&fs_info->endio_workers);
2383         ret |= btrfs_start_workers(&fs_info->endio_meta_workers);
2384         ret |= btrfs_start_workers(&fs_info->endio_meta_write_workers);
2385         ret |= btrfs_start_workers(&fs_info->endio_write_workers);
2386         ret |= btrfs_start_workers(&fs_info->endio_freespace_worker);
2387         ret |= btrfs_start_workers(&fs_info->delayed_workers);
2388         ret |= btrfs_start_workers(&fs_info->caching_workers);
2389         ret |= btrfs_start_workers(&fs_info->readahead_workers);
2390         ret |= btrfs_start_workers(&fs_info->flush_workers);
2391         if (ret) {
2392                 err = -ENOMEM;
2393                 goto fail_sb_buffer;
2394         }
2395
2396         fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super);
2397         fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages,
2398                                     4 * 1024 * 1024 / PAGE_CACHE_SIZE);
2399
2400         tree_root->nodesize = nodesize;
2401         tree_root->leafsize = leafsize;
2402         tree_root->sectorsize = sectorsize;
2403         tree_root->stripesize = stripesize;
2404
2405         sb->s_blocksize = sectorsize;
2406         sb->s_blocksize_bits = blksize_bits(sectorsize);
2407
2408         if (strncmp((char *)(&disk_super->magic), BTRFS_MAGIC,
2409                     sizeof(disk_super->magic))) {
2410                 printk(KERN_INFO "btrfs: valid FS not found on %s\n", sb->s_id);
2411                 goto fail_sb_buffer;
2412         }
2413
2414         if (sectorsize != PAGE_SIZE) {
2415                 printk(KERN_WARNING "btrfs: Incompatible sector size(%lu) "
2416                        "found on %s\n", (unsigned long)sectorsize, sb->s_id);
2417                 goto fail_sb_buffer;
2418         }
2419
2420         mutex_lock(&fs_info->chunk_mutex);
2421         ret = btrfs_read_sys_array(tree_root);
2422         mutex_unlock(&fs_info->chunk_mutex);
2423         if (ret) {
2424                 printk(KERN_WARNING "btrfs: failed to read the system "
2425                        "array on %s\n", sb->s_id);
2426                 goto fail_sb_buffer;
2427         }
2428
2429         blocksize = btrfs_level_size(tree_root,
2430                                      btrfs_super_chunk_root_level(disk_super));
2431         generation = btrfs_super_chunk_root_generation(disk_super);
2432
2433         __setup_root(nodesize, leafsize, sectorsize, stripesize,
2434                      chunk_root, fs_info, BTRFS_CHUNK_TREE_OBJECTID);
2435
2436         chunk_root->node = read_tree_block(chunk_root,
2437                                            btrfs_super_chunk_root(disk_super),
2438                                            blocksize, generation);
2439         BUG_ON(!chunk_root->node); /* -ENOMEM */
2440         if (!test_bit(EXTENT_BUFFER_UPTODATE, &chunk_root->node->bflags)) {
2441                 printk(KERN_WARNING "btrfs: failed to read chunk root on %s\n",
2442                        sb->s_id);
2443                 goto fail_tree_roots;
2444         }
2445         btrfs_set_root_node(&chunk_root->root_item, chunk_root->node);
2446         chunk_root->commit_root = btrfs_root_node(chunk_root);
2447
2448         read_extent_buffer(chunk_root->node, fs_info->chunk_tree_uuid,
2449            (unsigned long)btrfs_header_chunk_tree_uuid(chunk_root->node),
2450            BTRFS_UUID_SIZE);
2451
2452         ret = btrfs_read_chunk_tree(chunk_root);
2453         if (ret) {
2454                 printk(KERN_WARNING "btrfs: failed to read chunk tree on %s\n",
2455                        sb->s_id);
2456                 goto fail_tree_roots;
2457         }
2458
2459         /*
2460          * keep the device that is marked to be the target device for the
2461          * dev_replace procedure
2462          */
2463         btrfs_close_extra_devices(fs_info, fs_devices, 0);
2464
2465         if (!fs_devices->latest_bdev) {
2466                 printk(KERN_CRIT "btrfs: failed to read devices on %s\n",
2467                        sb->s_id);
2468                 goto fail_tree_roots;
2469         }
2470
2471 retry_root_backup:
2472         blocksize = btrfs_level_size(tree_root,
2473                                      btrfs_super_root_level(disk_super));
2474         generation = btrfs_super_generation(disk_super);
2475
2476         tree_root->node = read_tree_block(tree_root,
2477                                           btrfs_super_root(disk_super),
2478                                           blocksize, generation);
2479         if (!tree_root->node ||
2480             !test_bit(EXTENT_BUFFER_UPTODATE, &tree_root->node->bflags)) {
2481                 printk(KERN_WARNING "btrfs: failed to read tree root on %s\n",
2482                        sb->s_id);
2483
2484                 goto recovery_tree_root;
2485         }
2486
2487         btrfs_set_root_node(&tree_root->root_item, tree_root->node);
2488         tree_root->commit_root = btrfs_root_node(tree_root);
2489
2490         ret = find_and_setup_root(tree_root, fs_info,
2491                                   BTRFS_EXTENT_TREE_OBJECTID, extent_root);
2492         if (ret)
2493                 goto recovery_tree_root;
2494         extent_root->track_dirty = 1;
2495
2496         ret = find_and_setup_root(tree_root, fs_info,
2497                                   BTRFS_DEV_TREE_OBJECTID, dev_root);
2498         if (ret)
2499                 goto recovery_tree_root;
2500         dev_root->track_dirty = 1;
2501
2502         ret = find_and_setup_root(tree_root, fs_info,
2503                                   BTRFS_CSUM_TREE_OBJECTID, csum_root);
2504         if (ret)
2505                 goto recovery_tree_root;
2506         csum_root->track_dirty = 1;
2507
2508         ret = find_and_setup_root(tree_root, fs_info,
2509                                   BTRFS_QUOTA_TREE_OBJECTID, quota_root);
2510         if (ret) {
2511                 kfree(quota_root);
2512                 quota_root = fs_info->quota_root = NULL;
2513         } else {
2514                 quota_root->track_dirty = 1;
2515                 fs_info->quota_enabled = 1;
2516                 fs_info->pending_quota_state = 1;
2517         }
2518
2519         fs_info->generation = generation;
2520         fs_info->last_trans_committed = generation;
2521
2522         ret = btrfs_recover_balance(fs_info);
2523         if (ret) {
2524                 printk(KERN_WARNING "btrfs: failed to recover balance\n");
2525                 goto fail_block_groups;
2526         }
2527
2528         ret = btrfs_init_dev_stats(fs_info);
2529         if (ret) {
2530                 printk(KERN_ERR "btrfs: failed to init dev_stats: %d\n",
2531                        ret);
2532                 goto fail_block_groups;
2533         }
2534
2535         ret = btrfs_init_dev_replace(fs_info);
2536         if (ret) {
2537                 pr_err("btrfs: failed to init dev_replace: %d\n", ret);
2538                 goto fail_block_groups;
2539         }
2540
2541         btrfs_close_extra_devices(fs_info, fs_devices, 1);
2542
2543         ret = btrfs_init_space_info(fs_info);
2544         if (ret) {
2545                 printk(KERN_ERR "Failed to initial space info: %d\n", ret);
2546                 goto fail_block_groups;
2547         }
2548
2549         ret = btrfs_read_block_groups(extent_root);
2550         if (ret) {
2551                 printk(KERN_ERR "Failed to read block groups: %d\n", ret);
2552                 goto fail_block_groups;
2553         }
2554         fs_info->num_tolerated_disk_barrier_failures =
2555                 btrfs_calc_num_tolerated_disk_barrier_failures(fs_info);
2556         if (fs_info->fs_devices->missing_devices >
2557              fs_info->num_tolerated_disk_barrier_failures &&
2558             !(sb->s_flags & MS_RDONLY)) {
2559                 printk(KERN_WARNING
2560                        "Btrfs: too many missing devices, writeable mount is not allowed\n");
2561                 goto fail_block_groups;
2562         }
2563
2564         fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root,
2565                                                "btrfs-cleaner");
2566         if (IS_ERR(fs_info->cleaner_kthread))
2567                 goto fail_block_groups;
2568
2569         fs_info->transaction_kthread = kthread_run(transaction_kthread,
2570                                                    tree_root,
2571                                                    "btrfs-transaction");
2572         if (IS_ERR(fs_info->transaction_kthread))
2573                 goto fail_cleaner;
2574
2575         if (!btrfs_test_opt(tree_root, SSD) &&
2576             !btrfs_test_opt(tree_root, NOSSD) &&
2577             !fs_info->fs_devices->rotating) {
2578                 printk(KERN_INFO "Btrfs detected SSD devices, enabling SSD "
2579                        "mode\n");
2580                 btrfs_set_opt(fs_info->mount_opt, SSD);
2581         }
2582
2583 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
2584         if (btrfs_test_opt(tree_root, CHECK_INTEGRITY)) {
2585                 ret = btrfsic_mount(tree_root, fs_devices,
2586                                     btrfs_test_opt(tree_root,
2587                                         CHECK_INTEGRITY_INCLUDING_EXTENT_DATA) ?
2588                                     1 : 0,
2589                                     fs_info->check_integrity_print_mask);
2590                 if (ret)
2591                         printk(KERN_WARNING "btrfs: failed to initialize"
2592                                " integrity check module %s\n", sb->s_id);
2593         }
2594 #endif
2595         ret = btrfs_read_qgroup_config(fs_info);
2596         if (ret)
2597                 goto fail_trans_kthread;
2598
2599         /* do not make disk changes in broken FS */
2600         if (btrfs_super_log_root(disk_super) != 0) {
2601                 u64 bytenr = btrfs_super_log_root(disk_super);
2602
2603                 if (fs_devices->rw_devices == 0) {
2604                         printk(KERN_WARNING "Btrfs log replay required "
2605                                "on RO media\n");
2606                         err = -EIO;
2607                         goto fail_qgroup;
2608                 }
2609                 blocksize =
2610                      btrfs_level_size(tree_root,
2611                                       btrfs_super_log_root_level(disk_super));
2612
2613                 log_tree_root = btrfs_alloc_root(fs_info);
2614                 if (!log_tree_root) {
2615                         err = -ENOMEM;
2616                         goto fail_qgroup;
2617                 }
2618
2619                 __setup_root(nodesize, leafsize, sectorsize, stripesize,
2620                              log_tree_root, fs_info, BTRFS_TREE_LOG_OBJECTID);
2621
2622                 log_tree_root->node = read_tree_block(tree_root, bytenr,
2623                                                       blocksize,
2624                                                       generation + 1);
2625                 /* returns with log_tree_root freed on success */
2626                 ret = btrfs_recover_log_trees(log_tree_root);
2627                 if (ret) {
2628                         btrfs_error(tree_root->fs_info, ret,
2629                                     "Failed to recover log tree");
2630                         free_extent_buffer(log_tree_root->node);
2631                         kfree(log_tree_root);
2632                         goto fail_trans_kthread;
2633                 }
2634
2635                 if (sb->s_flags & MS_RDONLY) {
2636                         ret = btrfs_commit_super(tree_root);
2637                         if (ret)
2638                                 goto fail_trans_kthread;
2639                 }
2640         }
2641
2642         ret = btrfs_find_orphan_roots(tree_root);
2643         if (ret)
2644                 goto fail_trans_kthread;
2645
2646         if (!(sb->s_flags & MS_RDONLY)) {
2647                 ret = btrfs_cleanup_fs_roots(fs_info);
2648                 if (ret)
2649                         goto fail_trans_kthread;
2650
2651                 ret = btrfs_recover_relocation(tree_root);
2652                 if (ret < 0) {
2653                         printk(KERN_WARNING
2654                                "btrfs: failed to recover relocation\n");
2655                         err = -EINVAL;
2656                         goto fail_qgroup;
2657                 }
2658         }
2659
2660         location.objectid = BTRFS_FS_TREE_OBJECTID;
2661         location.type = BTRFS_ROOT_ITEM_KEY;
2662         location.offset = (u64)-1;
2663
2664         fs_info->fs_root = btrfs_read_fs_root_no_name(fs_info, &location);
2665         if (!fs_info->fs_root)
2666                 goto fail_qgroup;
2667         if (IS_ERR(fs_info->fs_root)) {
2668                 err = PTR_ERR(fs_info->fs_root);
2669                 goto fail_qgroup;
2670         }
2671
2672         if (sb->s_flags & MS_RDONLY)
2673                 return 0;
2674
2675         down_read(&fs_info->cleanup_work_sem);
2676         if ((ret = btrfs_orphan_cleanup(fs_info->fs_root)) ||
2677             (ret = btrfs_orphan_cleanup(fs_info->tree_root))) {
2678                 up_read(&fs_info->cleanup_work_sem);
2679                 close_ctree(tree_root);
2680                 return ret;
2681         }
2682         up_read(&fs_info->cleanup_work_sem);
2683
2684         ret = btrfs_resume_balance_async(fs_info);
2685         if (ret) {
2686                 printk(KERN_WARNING "btrfs: failed to resume balance\n");
2687                 close_ctree(tree_root);
2688                 return ret;
2689         }
2690
2691         ret = btrfs_resume_dev_replace_async(fs_info);
2692         if (ret) {
2693                 pr_warn("btrfs: failed to resume dev_replace\n");
2694                 close_ctree(tree_root);
2695                 return ret;
2696         }
2697
2698         return 0;
2699
2700 fail_qgroup:
2701         btrfs_free_qgroup_config(fs_info);
2702 fail_trans_kthread:
2703         kthread_stop(fs_info->transaction_kthread);
2704 fail_cleaner:
2705         kthread_stop(fs_info->cleaner_kthread);
2706
2707         /*
2708          * make sure we're done with the btree inode before we stop our
2709          * kthreads
2710          */
2711         filemap_write_and_wait(fs_info->btree_inode->i_mapping);
2712         invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
2713
2714 fail_block_groups:
2715         btrfs_free_block_groups(fs_info);
2716
2717 fail_tree_roots:
2718         free_root_pointers(fs_info, 1);
2719
2720 fail_sb_buffer:
2721         btrfs_stop_workers(&fs_info->generic_worker);
2722         btrfs_stop_workers(&fs_info->readahead_workers);
2723         btrfs_stop_workers(&fs_info->fixup_workers);
2724         btrfs_stop_workers(&fs_info->delalloc_workers);
2725         btrfs_stop_workers(&fs_info->workers);
2726         btrfs_stop_workers(&fs_info->endio_workers);
2727         btrfs_stop_workers(&fs_info->endio_meta_workers);
2728         btrfs_stop_workers(&fs_info->endio_meta_write_workers);
2729         btrfs_stop_workers(&fs_info->endio_write_workers);
2730         btrfs_stop_workers(&fs_info->endio_freespace_worker);
2731         btrfs_stop_workers(&fs_info->submit_workers);
2732         btrfs_stop_workers(&fs_info->delayed_workers);
2733         btrfs_stop_workers(&fs_info->caching_workers);
2734         btrfs_stop_workers(&fs_info->flush_workers);
2735 fail_alloc:
2736 fail_iput:
2737         btrfs_mapping_tree_free(&fs_info->mapping_tree);
2738
2739         invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
2740         iput(fs_info->btree_inode);
2741 fail_delalloc_bytes:
2742         percpu_counter_destroy(&fs_info->delalloc_bytes);
2743 fail_dirty_metadata_bytes:
2744         percpu_counter_destroy(&fs_info->dirty_metadata_bytes);
2745 fail_bdi:
2746         bdi_destroy(&fs_info->bdi);
2747 fail_srcu:
2748         cleanup_srcu_struct(&fs_info->subvol_srcu);
2749 fail:
2750         btrfs_close_devices(fs_info->fs_devices);
2751         return err;
2752
2753 recovery_tree_root:
2754         if (!btrfs_test_opt(tree_root, RECOVERY))
2755                 goto fail_tree_roots;
2756
2757         free_root_pointers(fs_info, 0);
2758
2759         /* don't use the log in recovery mode, it won't be valid */
2760         btrfs_set_super_log_root(disk_super, 0);
2761
2762         /* we can't trust the free space cache either */
2763         btrfs_set_opt(fs_info->mount_opt, CLEAR_CACHE);
2764
2765         ret = next_root_backup(fs_info, fs_info->super_copy,
2766                                &num_backups_tried, &backup_index);
2767         if (ret == -1)
2768                 goto fail_block_groups;
2769         goto retry_root_backup;
2770 }
2771
2772 static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate)
2773 {
2774         if (uptodate) {
2775                 set_buffer_uptodate(bh);
2776         } else {
2777                 struct btrfs_device *device = (struct btrfs_device *)
2778                         bh->b_private;
2779
2780                 printk_ratelimited_in_rcu(KERN_WARNING "lost page write due to "
2781                                           "I/O error on %s\n",
2782                                           rcu_str_deref(device->name));
2783                 /* note, we dont' set_buffer_write_io_error because we have
2784                  * our own ways of dealing with the IO errors
2785                  */
2786                 clear_buffer_uptodate(bh);
2787                 btrfs_dev_stat_inc_and_print(device, BTRFS_DEV_STAT_WRITE_ERRS);
2788         }
2789         unlock_buffer(bh);
2790         put_bh(bh);
2791 }
2792
2793 struct buffer_head *btrfs_read_dev_super(struct block_device *bdev)
2794 {
2795         struct buffer_head *bh;
2796         struct buffer_head *latest = NULL;
2797         struct btrfs_super_block *super;
2798         int i;
2799         u64 transid = 0;
2800         u64 bytenr;
2801
2802         /* we would like to check all the supers, but that would make
2803          * a btrfs mount succeed after a mkfs from a different FS.
2804          * So, we need to add a special mount option to scan for
2805          * later supers, using BTRFS_SUPER_MIRROR_MAX instead
2806          */
2807         for (i = 0; i < 1; i++) {
2808                 bytenr = btrfs_sb_offset(i);
2809                 if (bytenr + 4096 >= i_size_read(bdev->bd_inode))
2810                         break;
2811                 bh = __bread(bdev, bytenr / 4096, 4096);
2812                 if (!bh)
2813                         continue;
2814
2815                 super = (struct btrfs_super_block *)bh->b_data;
2816                 if (btrfs_super_bytenr(super) != bytenr ||
2817                     strncmp((char *)(&super->magic), BTRFS_MAGIC,
2818                             sizeof(super->magic))) {
2819                         brelse(bh);
2820                         continue;
2821                 }
2822
2823                 if (!latest || btrfs_super_generation(super) > transid) {
2824                         brelse(latest);
2825                         latest = bh;
2826                         transid = btrfs_super_generation(super);
2827                 } else {
2828                         brelse(bh);
2829                 }
2830         }
2831         return latest;
2832 }
2833
2834 /*
2835  * this should be called twice, once with wait == 0 and
2836  * once with wait == 1.  When wait == 0 is done, all the buffer heads
2837  * we write are pinned.
2838  *
2839  * They are released when wait == 1 is done.
2840  * max_mirrors must be the same for both runs, and it indicates how
2841  * many supers on this one device should be written.
2842  *
2843  * max_mirrors == 0 means to write them all.
2844  */
2845 static int write_dev_supers(struct btrfs_device *device,
2846                             struct btrfs_super_block *sb,
2847                             int do_barriers, int wait, int max_mirrors)
2848 {
2849         struct buffer_head *bh;
2850         int i;
2851         int ret;
2852         int errors = 0;
2853         u32 crc;
2854         u64 bytenr;
2855
2856         if (max_mirrors == 0)
2857                 max_mirrors = BTRFS_SUPER_MIRROR_MAX;
2858
2859         for (i = 0; i < max_mirrors; i++) {
2860                 bytenr = btrfs_sb_offset(i);
2861                 if (bytenr + BTRFS_SUPER_INFO_SIZE >= device->total_bytes)
2862                         break;
2863
2864                 if (wait) {
2865                         bh = __find_get_block(device->bdev, bytenr / 4096,
2866                                               BTRFS_SUPER_INFO_SIZE);
2867                         BUG_ON(!bh);
2868                         wait_on_buffer(bh);
2869                         if (!buffer_uptodate(bh))
2870                                 errors++;
2871
2872                         /* drop our reference */
2873                         brelse(bh);
2874
2875                         /* drop the reference from the wait == 0 run */
2876                         brelse(bh);
2877                         continue;
2878                 } else {
2879                         btrfs_set_super_bytenr(sb, bytenr);
2880
2881                         crc = ~(u32)0;
2882                         crc = btrfs_csum_data(NULL, (char *)sb +
2883                                               BTRFS_CSUM_SIZE, crc,
2884                                               BTRFS_SUPER_INFO_SIZE -
2885                                               BTRFS_CSUM_SIZE);
2886                         btrfs_csum_final(crc, sb->csum);
2887
2888                         /*
2889                          * one reference for us, and we leave it for the
2890                          * caller
2891                          */
2892                         bh = __getblk(device->bdev, bytenr / 4096,
2893                                       BTRFS_SUPER_INFO_SIZE);
2894                         memcpy(bh->b_data, sb, BTRFS_SUPER_INFO_SIZE);
2895
2896                         /* one reference for submit_bh */
2897                         get_bh(bh);
2898
2899                         set_buffer_uptodate(bh);
2900                         lock_buffer(bh);
2901                         bh->b_end_io = btrfs_end_buffer_write_sync;
2902                         bh->b_private = device;
2903                 }
2904
2905                 /*
2906                  * we fua the first super.  The others we allow
2907                  * to go down lazy.
2908                  */
2909                 ret = btrfsic_submit_bh(WRITE_FUA, bh);
2910                 if (ret)
2911                         errors++;
2912         }
2913         return errors < i ? 0 : -1;
2914 }
2915
2916 /*
2917  * endio for the write_dev_flush, this will wake anyone waiting
2918  * for the barrier when it is done
2919  */
2920 static void btrfs_end_empty_barrier(struct bio *bio, int err)
2921 {
2922         if (err) {
2923                 if (err == -EOPNOTSUPP)
2924                         set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
2925                 clear_bit(BIO_UPTODATE, &bio->bi_flags);
2926         }
2927         if (bio->bi_private)
2928                 complete(bio->bi_private);
2929         bio_put(bio);
2930 }
2931
2932 /*
2933  * trigger flushes for one the devices.  If you pass wait == 0, the flushes are
2934  * sent down.  With wait == 1, it waits for the previous flush.
2935  *
2936  * any device where the flush fails with eopnotsupp are flagged as not-barrier
2937  * capable
2938  */
2939 static int write_dev_flush(struct btrfs_device *device, int wait)
2940 {
2941         struct bio *bio;
2942         int ret = 0;
2943
2944         if (device->nobarriers)
2945                 return 0;
2946
2947         if (wait) {
2948                 bio = device->flush_bio;
2949                 if (!bio)
2950                         return 0;
2951
2952                 wait_for_completion(&device->flush_wait);
2953
2954                 if (bio_flagged(bio, BIO_EOPNOTSUPP)) {
2955                         printk_in_rcu("btrfs: disabling barriers on dev %s\n",
2956                                       rcu_str_deref(device->name));
2957                         device->nobarriers = 1;
2958                 } else if (!bio_flagged(bio, BIO_UPTODATE)) {
2959                         ret = -EIO;
2960                         btrfs_dev_stat_inc_and_print(device,
2961                                 BTRFS_DEV_STAT_FLUSH_ERRS);
2962                 }
2963
2964                 /* drop the reference from the wait == 0 run */
2965                 bio_put(bio);
2966                 device->flush_bio = NULL;
2967
2968                 return ret;
2969         }
2970
2971         /*
2972          * one reference for us, and we leave it for the
2973          * caller
2974          */
2975         device->flush_bio = NULL;
2976         bio = bio_alloc(GFP_NOFS, 0);
2977         if (!bio)
2978                 return -ENOMEM;
2979
2980         bio->bi_end_io = btrfs_end_empty_barrier;
2981         bio->bi_bdev = device->bdev;
2982         init_completion(&device->flush_wait);
2983         bio->bi_private = &device->flush_wait;
2984         device->flush_bio = bio;
2985
2986         bio_get(bio);
2987         btrfsic_submit_bio(WRITE_FLUSH, bio);
2988
2989         return 0;
2990 }
2991
2992 /*
2993  * send an empty flush down to each device in parallel,
2994  * then wait for them
2995  */
2996 static int barrier_all_devices(struct btrfs_fs_info *info)
2997 {
2998         struct list_head *head;
2999         struct btrfs_device *dev;
3000         int errors_send = 0;
3001         int errors_wait = 0;
3002         int ret;
3003
3004         /* send down all the barriers */
3005         head = &info->fs_devices->devices;
3006         list_for_each_entry_rcu(dev, head, dev_list) {
3007                 if (!dev->bdev) {
3008                         errors_send++;
3009                         continue;
3010                 }
3011                 if (!dev->in_fs_metadata || !dev->writeable)
3012                         continue;
3013
3014                 ret = write_dev_flush(dev, 0);
3015                 if (ret)
3016                         errors_send++;
3017         }
3018
3019         /* wait for all the barriers */
3020         list_for_each_entry_rcu(dev, head, dev_list) {
3021                 if (!dev->bdev) {
3022                         errors_wait++;
3023                         continue;
3024                 }
3025                 if (!dev->in_fs_metadata || !dev->writeable)
3026                         continue;
3027
3028                 ret = write_dev_flush(dev, 1);
3029                 if (ret)
3030                         errors_wait++;
3031         }
3032         if (errors_send > info->num_tolerated_disk_barrier_failures ||
3033             errors_wait > info->num_tolerated_disk_barrier_failures)
3034                 return -EIO;
3035         return 0;
3036 }
3037
3038 int btrfs_calc_num_tolerated_disk_barrier_failures(
3039         struct btrfs_fs_info *fs_info)
3040 {
3041         struct btrfs_ioctl_space_info space;
3042         struct btrfs_space_info *sinfo;
3043         u64 types[] = {BTRFS_BLOCK_GROUP_DATA,
3044                        BTRFS_BLOCK_GROUP_SYSTEM,
3045                        BTRFS_BLOCK_GROUP_METADATA,
3046                        BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA};
3047         int num_types = 4;
3048         int i;
3049         int c;
3050         int num_tolerated_disk_barrier_failures =
3051                 (int)fs_info->fs_devices->num_devices;
3052
3053         for (i = 0; i < num_types; i++) {
3054                 struct btrfs_space_info *tmp;
3055
3056                 sinfo = NULL;
3057                 rcu_read_lock();
3058                 list_for_each_entry_rcu(tmp, &fs_info->space_info, list) {
3059                         if (tmp->flags == types[i]) {
3060                                 sinfo = tmp;
3061                                 break;
3062                         }
3063                 }
3064                 rcu_read_unlock();
3065
3066                 if (!sinfo)
3067                         continue;
3068
3069                 down_read(&sinfo->groups_sem);
3070                 for (c = 0; c < BTRFS_NR_RAID_TYPES; c++) {
3071                         if (!list_empty(&sinfo->block_groups[c])) {
3072                                 u64 flags;
3073
3074                                 btrfs_get_block_group_info(
3075                                         &sinfo->block_groups[c], &space);
3076                                 if (space.total_bytes == 0 ||
3077                                     space.used_bytes == 0)
3078                                         continue;
3079                                 flags = space.flags;
3080                                 /*
3081                                  * return
3082                                  * 0: if dup, single or RAID0 is configured for
3083                                  *    any of metadata, system or data, else
3084                                  * 1: if RAID5 is configured, or if RAID1 or
3085                                  *    RAID10 is configured and only two mirrors
3086                                  *    are used, else
3087                                  * 2: if RAID6 is configured, else
3088                                  * num_mirrors - 1: if RAID1 or RAID10 is
3089                                  *                  configured and more than
3090                                  *                  2 mirrors are used.
3091                                  */
3092                                 if (num_tolerated_disk_barrier_failures > 0 &&
3093                                     ((flags & (BTRFS_BLOCK_GROUP_DUP |
3094                                                BTRFS_BLOCK_GROUP_RAID0)) ||
3095                                      ((flags & BTRFS_BLOCK_GROUP_PROFILE_MASK)
3096                                       == 0)))
3097                                         num_tolerated_disk_barrier_failures = 0;
3098                                 else if (num_tolerated_disk_barrier_failures > 1
3099                                          &&
3100                                          (flags & (BTRFS_BLOCK_GROUP_RAID1 |
3101                                                    BTRFS_BLOCK_GROUP_RAID10)))
3102                                         num_tolerated_disk_barrier_failures = 1;
3103                         }
3104                 }
3105                 up_read(&sinfo->groups_sem);
3106         }
3107
3108         return num_tolerated_disk_barrier_failures;
3109 }
3110
3111 int write_all_supers(struct btrfs_root *root, int max_mirrors)
3112 {
3113         struct list_head *head;
3114         struct btrfs_device *dev;
3115         struct btrfs_super_block *sb;
3116         struct btrfs_dev_item *dev_item;
3117         int ret;
3118         int do_barriers;
3119         int max_errors;
3120         int total_errors = 0;
3121         u64 flags;
3122
3123         max_errors = btrfs_super_num_devices(root->fs_info->super_copy) - 1;
3124         do_barriers = !btrfs_test_opt(root, NOBARRIER);
3125         backup_super_roots(root->fs_info);
3126
3127         sb = root->fs_info->super_for_commit;
3128         dev_item = &sb->dev_item;
3129
3130         mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
3131         head = &root->fs_info->fs_devices->devices;
3132
3133         if (do_barriers) {
3134                 ret = barrier_all_devices(root->fs_info);
3135                 if (ret) {
3136                         mutex_unlock(
3137                                 &root->fs_info->fs_devices->device_list_mutex);
3138                         btrfs_error(root->fs_info, ret,
3139                                     "errors while submitting device barriers.");
3140                         return ret;
3141                 }
3142         }
3143
3144         list_for_each_entry_rcu(dev, head, dev_list) {
3145                 if (!dev->bdev) {
3146                         total_errors++;
3147                         continue;
3148                 }
3149                 if (!dev->in_fs_metadata || !dev->writeable)
3150                         continue;
3151
3152                 btrfs_set_stack_device_generation(dev_item, 0);
3153                 btrfs_set_stack_device_type(dev_item, dev->type);
3154                 btrfs_set_stack_device_id(dev_item, dev->devid);
3155                 btrfs_set_stack_device_total_bytes(dev_item, dev->total_bytes);
3156                 btrfs_set_stack_device_bytes_used(dev_item, dev->bytes_used);
3157                 btrfs_set_stack_device_io_align(dev_item, dev->io_align);
3158                 btrfs_set_stack_device_io_width(dev_item, dev->io_width);
3159                 btrfs_set_stack_device_sector_size(dev_item, dev->sector_size);
3160                 memcpy(dev_item->uuid, dev->uuid, BTRFS_UUID_SIZE);
3161                 memcpy(dev_item->fsid, dev->fs_devices->fsid, BTRFS_UUID_SIZE);
3162
3163                 flags = btrfs_super_flags(sb);
3164                 btrfs_set_super_flags(sb, flags | BTRFS_HEADER_FLAG_WRITTEN);
3165
3166                 ret = write_dev_supers(dev, sb, do_barriers, 0, max_mirrors);
3167                 if (ret)
3168                         total_errors++;
3169         }
3170         if (total_errors > max_errors) {
3171                 printk(KERN_ERR "btrfs: %d errors while writing supers\n",
3172                        total_errors);
3173
3174                 /* This shouldn't happen. FUA is masked off if unsupported */
3175                 BUG();
3176         }
3177
3178         total_errors = 0;
3179         list_for_each_entry_rcu(dev, head, dev_list) {
3180                 if (!dev->bdev)
3181                         continue;
3182                 if (!dev->in_fs_metadata || !dev->writeable)
3183                         continue;
3184
3185                 ret = write_dev_supers(dev, sb, do_barriers, 1, max_mirrors);
3186                 if (ret)
3187                         total_errors++;
3188         }
3189         mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
3190         if (total_errors > max_errors) {
3191                 btrfs_error(root->fs_info, -EIO,
3192                             "%d errors while writing supers", total_errors);
3193                 return -EIO;
3194         }
3195         return 0;
3196 }
3197
3198 int write_ctree_super(struct btrfs_trans_handle *trans,
3199                       struct btrfs_root *root, int max_mirrors)
3200 {
3201         int ret;
3202
3203         ret = write_all_supers(root, max_mirrors);
3204         return ret;
3205 }
3206
3207 void btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
3208 {
3209         spin_lock(&fs_info->fs_roots_radix_lock);
3210         radix_tree_delete(&fs_info->fs_roots_radix,
3211                           (unsigned long)root->root_key.objectid);
3212         spin_unlock(&fs_info->fs_roots_radix_lock);
3213
3214         if (btrfs_root_refs(&root->root_item) == 0)
3215                 synchronize_srcu(&fs_info->subvol_srcu);
3216
3217         __btrfs_remove_free_space_cache(root->free_ino_pinned);
3218         __btrfs_remove_free_space_cache(root->free_ino_ctl);
3219         free_fs_root(root);
3220 }
3221
3222 static void free_fs_root(struct btrfs_root *root)
3223 {
3224         iput(root->cache_inode);
3225         WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree));
3226         if (root->anon_dev)
3227                 free_anon_bdev(root->anon_dev);
3228         free_extent_buffer(root->node);
3229         free_extent_buffer(root->commit_root);
3230         kfree(root->free_ino_ctl);
3231         kfree(root->free_ino_pinned);
3232         kfree(root->name);
3233         kfree(root);
3234 }
3235
3236 static void del_fs_roots(struct btrfs_fs_info *fs_info)
3237 {
3238         int ret;
3239         struct btrfs_root *gang[8];
3240         int i;
3241
3242         while (!list_empty(&fs_info->dead_roots)) {
3243                 gang[0] = list_entry(fs_info->dead_roots.next,
3244                                      struct btrfs_root, root_list);
3245                 list_del(&gang[0]->root_list);
3246
3247                 if (gang[0]->in_radix) {
3248                         btrfs_free_fs_root(fs_info, gang[0]);
3249                 } else {
3250                         free_extent_buffer(gang[0]->node);
3251                         free_extent_buffer(gang[0]->commit_root);
3252                         kfree(gang[0]);
3253                 }
3254         }
3255
3256         while (1) {
3257                 ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
3258                                              (void **)gang, 0,
3259                                              ARRAY_SIZE(gang));
3260                 if (!ret)
3261                         break;
3262                 for (i = 0; i < ret; i++)
3263                         btrfs_free_fs_root(fs_info, gang[i]);
3264         }
3265 }
3266
3267 int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info)
3268 {
3269         u64 root_objectid = 0;
3270         struct btrfs_root *gang[8];
3271         int i;
3272         int ret;
3273
3274         while (1) {
3275                 ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
3276                                              (void **)gang, root_objectid,
3277                                              ARRAY_SIZE(gang));
3278                 if (!ret)
3279                         break;
3280
3281                 root_objectid = gang[ret - 1]->root_key.objectid + 1;
3282                 for (i = 0; i < ret; i++) {
3283                         int err;
3284
3285                         root_objectid = gang[i]->root_key.objectid;
3286                         err = btrfs_orphan_cleanup(gang[i]);
3287                         if (err)
3288                                 return err;
3289                 }
3290                 root_objectid++;
3291         }
3292         return 0;
3293 }
3294
3295 int btrfs_commit_super(struct btrfs_root *root)
3296 {
3297         struct btrfs_trans_handle *trans;
3298         int ret;
3299
3300         mutex_lock(&root->fs_info->cleaner_mutex);
3301         btrfs_run_delayed_iputs(root);
3302         btrfs_clean_old_snapshots(root);
3303         mutex_unlock(&root->fs_info->cleaner_mutex);
3304
3305         /* wait until ongoing cleanup work done */
3306         down_write(&root->fs_info->cleanup_work_sem);
3307         up_write(&root->fs_info->cleanup_work_sem);
3308
3309         trans = btrfs_join_transaction(root);
3310         if (IS_ERR(trans))
3311                 return PTR_ERR(trans);
3312         ret = btrfs_commit_transaction(trans, root);
3313         if (ret)
3314                 return ret;
3315         /* run commit again to drop the original snapshot */
3316         trans = btrfs_join_transaction(root);
3317         if (IS_ERR(trans))
3318                 return PTR_ERR(trans);
3319         ret = btrfs_commit_transaction(trans, root);
3320         if (ret)
3321                 return ret;
3322         ret = btrfs_write_and_wait_transaction(NULL, root);
3323         if (ret) {
3324                 btrfs_error(root->fs_info, ret,
3325                             "Failed to sync btree inode to disk.");
3326                 return ret;
3327         }
3328
3329         ret = write_ctree_super(NULL, root, 0);
3330         return ret;
3331 }
3332
3333 int close_ctree(struct btrfs_root *root)
3334 {
3335         struct btrfs_fs_info *fs_info = root->fs_info;
3336         int ret;
3337
3338         fs_info->closing = 1;
3339         smp_mb();
3340
3341         /* pause restriper - we want to resume on mount */
3342         btrfs_pause_balance(fs_info);
3343
3344         btrfs_dev_replace_suspend_for_unmount(fs_info);
3345
3346         btrfs_scrub_cancel(fs_info);
3347
3348         /* wait for any defraggers to finish */
3349         wait_event(fs_info->transaction_wait,
3350                    (atomic_read(&fs_info->defrag_running) == 0));
3351
3352         /* clear out the rbtree of defraggable inodes */
3353         btrfs_cleanup_defrag_inodes(fs_info);
3354
3355         if (!(fs_info->sb->s_flags & MS_RDONLY)) {
3356                 ret = btrfs_commit_super(root);
3357                 if (ret)
3358                         printk(KERN_ERR "btrfs: commit super ret %d\n", ret);
3359         }
3360
3361         if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)
3362                 btrfs_error_commit_super(root);
3363
3364         btrfs_put_block_group_cache(fs_info);
3365
3366         kthread_stop(fs_info->transaction_kthread);
3367         kthread_stop(fs_info->cleaner_kthread);
3368
3369         fs_info->closing = 2;
3370         smp_mb();
3371
3372         btrfs_free_qgroup_config(root->fs_info);
3373
3374         if (percpu_counter_sum(&fs_info->delalloc_bytes)) {
3375                 printk(KERN_INFO "btrfs: at unmount delalloc count %lld\n",
3376                        percpu_counter_sum(&fs_info->delalloc_bytes));
3377         }
3378
3379         free_extent_buffer(fs_info->extent_root->node);
3380         free_extent_buffer(fs_info->extent_root->commit_root);
3381         free_extent_buffer(fs_info->tree_root->node);
3382         free_extent_buffer(fs_info->tree_root->commit_root);
3383         free_extent_buffer(fs_info->chunk_root->node);
3384         free_extent_buffer(fs_info->chunk_root->commit_root);
3385         free_extent_buffer(fs_info->dev_root->node);
3386         free_extent_buffer(fs_info->dev_root->commit_root);
3387         free_extent_buffer(fs_info->csum_root->node);
3388         free_extent_buffer(fs_info->csum_root->commit_root);
3389         if (fs_info->quota_root) {
3390                 free_extent_buffer(fs_info->quota_root->node);
3391                 free_extent_buffer(fs_info->quota_root->commit_root);
3392         }
3393
3394         btrfs_free_block_groups(fs_info);
3395
3396         del_fs_roots(fs_info);
3397
3398         iput(fs_info->btree_inode);
3399
3400         btrfs_stop_workers(&fs_info->generic_worker);
3401         btrfs_stop_workers(&fs_info->fixup_workers);
3402         btrfs_stop_workers(&fs_info->delalloc_workers);
3403         btrfs_stop_workers(&fs_info->workers);
3404         btrfs_stop_workers(&fs_info->endio_workers);
3405         btrfs_stop_workers(&fs_info->endio_meta_workers);
3406         btrfs_stop_workers(&fs_info->endio_meta_write_workers);
3407         btrfs_stop_workers(&fs_info->endio_write_workers);
3408         btrfs_stop_workers(&fs_info->endio_freespace_worker);
3409         btrfs_stop_workers(&fs_info->submit_workers);
3410         btrfs_stop_workers(&fs_info->delayed_workers);
3411         btrfs_stop_workers(&fs_info->caching_workers);
3412         btrfs_stop_workers(&fs_info->readahead_workers);
3413         btrfs_stop_workers(&fs_info->flush_workers);
3414
3415 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
3416         if (btrfs_test_opt(root, CHECK_INTEGRITY))
3417                 btrfsic_unmount(root, fs_info->fs_devices);
3418 #endif
3419
3420         btrfs_close_devices(fs_info->fs_devices);
3421         btrfs_mapping_tree_free(&fs_info->mapping_tree);
3422
3423         percpu_counter_destroy(&fs_info->dirty_metadata_bytes);
3424         percpu_counter_destroy(&fs_info->delalloc_bytes);
3425         bdi_destroy(&fs_info->bdi);
3426         cleanup_srcu_struct(&fs_info->subvol_srcu);
3427
3428         return 0;
3429 }
3430
3431 int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid,
3432                           int atomic)
3433 {
3434         int ret;
3435         struct inode *btree_inode = buf->pages[0]->mapping->host;
3436
3437         ret = extent_buffer_uptodate(buf);
3438         if (!ret)
3439                 return ret;
3440
3441         ret = verify_parent_transid(&BTRFS_I(btree_inode)->io_tree, buf,
3442                                     parent_transid, atomic);
3443         if (ret == -EAGAIN)
3444                 return ret;
3445         return !ret;
3446 }
3447
3448 int btrfs_set_buffer_uptodate(struct extent_buffer *buf)
3449 {
3450         return set_extent_buffer_uptodate(buf);
3451 }
3452
3453 void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
3454 {
3455         struct btrfs_root *root = BTRFS_I(buf->pages[0]->mapping->host)->root;
3456         u64 transid = btrfs_header_generation(buf);
3457         int was_dirty;
3458
3459         btrfs_assert_tree_locked(buf);
3460         if (transid != root->fs_info->generation)
3461                 WARN(1, KERN_CRIT "btrfs transid mismatch buffer %llu, "
3462                        "found %llu running %llu\n",
3463                         (unsigned long long)buf->start,
3464                         (unsigned long long)transid,
3465                         (unsigned long long)root->fs_info->generation);
3466         was_dirty = set_extent_buffer_dirty(buf);
3467         if (!was_dirty)
3468                 __percpu_counter_add(&root->fs_info->dirty_metadata_bytes,
3469                                      buf->len,
3470                                      root->fs_info->dirty_metadata_batch);
3471 }
3472
3473 static void __btrfs_btree_balance_dirty(struct btrfs_root *root,
3474                                         int flush_delayed)
3475 {
3476         /*
3477          * looks as though older kernels can get into trouble with
3478          * this code, they end up stuck in balance_dirty_pages forever
3479          */
3480         int ret;
3481
3482         if (current->flags & PF_MEMALLOC)
3483                 return;
3484
3485         if (flush_delayed)
3486                 btrfs_balance_delayed_items(root);
3487
3488         ret = percpu_counter_compare(&root->fs_info->dirty_metadata_bytes,
3489                                      BTRFS_DIRTY_METADATA_THRESH);
3490         if (ret > 0) {
3491                 balance_dirty_pages_ratelimited_nr(
3492                                    root->fs_info->btree_inode->i_mapping, 1);
3493         }
3494         return;
3495 }
3496
3497 void btrfs_btree_balance_dirty(struct btrfs_root *root)
3498 {
3499         __btrfs_btree_balance_dirty(root, 1);
3500 }
3501
3502 void btrfs_btree_balance_dirty_nodelay(struct btrfs_root *root)
3503 {
3504         __btrfs_btree_balance_dirty(root, 0);
3505 }
3506
3507 int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid)
3508 {
3509         struct btrfs_root *root = BTRFS_I(buf->pages[0]->mapping->host)->root;
3510         return btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
3511 }
3512
3513 static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
3514                               int read_only)
3515 {
3516         if (btrfs_super_csum_type(fs_info->super_copy) >= ARRAY_SIZE(btrfs_csum_sizes)) {
3517                 printk(KERN_ERR "btrfs: unsupported checksum algorithm\n");
3518                 return -EINVAL;
3519         }
3520
3521         if (read_only)
3522                 return 0;
3523
3524         return 0;
3525 }
3526
3527 void btrfs_error_commit_super(struct btrfs_root *root)
3528 {
3529         mutex_lock(&root->fs_info->cleaner_mutex);
3530         btrfs_run_delayed_iputs(root);
3531         mutex_unlock(&root->fs_info->cleaner_mutex);
3532
3533         down_write(&root->fs_info->cleanup_work_sem);
3534         up_write(&root->fs_info->cleanup_work_sem);
3535
3536         /* cleanup FS via transaction */
3537         btrfs_cleanup_transaction(root);
3538 }
3539
3540 static void btrfs_destroy_ordered_operations(struct btrfs_root *root)
3541 {
3542         struct btrfs_inode *btrfs_inode;
3543         struct list_head splice;
3544
3545         INIT_LIST_HEAD(&splice);
3546
3547         mutex_lock(&root->fs_info->ordered_operations_mutex);
3548         spin_lock(&root->fs_info->ordered_extent_lock);
3549
3550         list_splice_init(&root->fs_info->ordered_operations, &splice);
3551         while (!list_empty(&splice)) {
3552                 btrfs_inode = list_entry(splice.next, struct btrfs_inode,
3553                                          ordered_operations);
3554
3555                 list_del_init(&btrfs_inode->ordered_operations);
3556
3557                 btrfs_invalidate_inodes(btrfs_inode->root);
3558         }
3559
3560         spin_unlock(&root->fs_info->ordered_extent_lock);
3561         mutex_unlock(&root->fs_info->ordered_operations_mutex);
3562 }
3563
3564 static void btrfs_destroy_ordered_extents(struct btrfs_root *root)
3565 {
3566         struct list_head splice;
3567         struct btrfs_ordered_extent *ordered;
3568         struct inode *inode;
3569
3570         INIT_LIST_HEAD(&splice);
3571
3572         spin_lock(&root->fs_info->ordered_extent_lock);
3573
3574         list_splice_init(&root->fs_info->ordered_extents, &splice);
3575         while (!list_empty(&splice)) {
3576                 ordered = list_entry(splice.next, struct btrfs_ordered_extent,
3577                                      root_extent_list);
3578
3579                 list_del_init(&ordered->root_extent_list);
3580                 atomic_inc(&ordered->refs);
3581
3582                 /* the inode may be getting freed (in sys_unlink path). */
3583                 inode = igrab(ordered->inode);
3584
3585                 spin_unlock(&root->fs_info->ordered_extent_lock);
3586                 if (inode)
3587                         iput(inode);
3588
3589                 atomic_set(&ordered->refs, 1);
3590                 btrfs_put_ordered_extent(ordered);
3591
3592                 spin_lock(&root->fs_info->ordered_extent_lock);
3593         }
3594
3595         spin_unlock(&root->fs_info->ordered_extent_lock);
3596 }
3597
3598 int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
3599                                struct btrfs_root *root)
3600 {
3601         struct rb_node *node;
3602         struct btrfs_delayed_ref_root *delayed_refs;
3603         struct btrfs_delayed_ref_node *ref;
3604         int ret = 0;
3605
3606         delayed_refs = &trans->delayed_refs;
3607
3608         spin_lock(&delayed_refs->lock);
3609         if (delayed_refs->num_entries == 0) {
3610                 spin_unlock(&delayed_refs->lock);
3611                 printk(KERN_INFO "delayed_refs has NO entry\n");
3612                 return ret;
3613         }
3614
3615         while ((node = rb_first(&delayed_refs->root)) != NULL) {
3616                 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
3617
3618                 atomic_set(&ref->refs, 1);
3619                 if (btrfs_delayed_ref_is_head(ref)) {
3620                         struct btrfs_delayed_ref_head *head;
3621
3622                         head = btrfs_delayed_node_to_head(ref);
3623                         if (!mutex_trylock(&head->mutex)) {
3624                                 atomic_inc(&ref->refs);
3625                                 spin_unlock(&delayed_refs->lock);
3626
3627                                 /* Need to wait for the delayed ref to run */
3628                                 mutex_lock(&head->mutex);
3629                                 mutex_unlock(&head->mutex);
3630                                 btrfs_put_delayed_ref(ref);
3631
3632                                 spin_lock(&delayed_refs->lock);
3633                                 continue;
3634                         }
3635
3636                         btrfs_free_delayed_extent_op(head->extent_op);
3637                         delayed_refs->num_heads--;
3638                         if (list_empty(&head->cluster))
3639                                 delayed_refs->num_heads_ready--;
3640                         list_del_init(&head->cluster);
3641                 }
3642                 ref->in_tree = 0;
3643                 rb_erase(&ref->rb_node, &delayed_refs->root);
3644                 delayed_refs->num_entries--;
3645
3646                 spin_unlock(&delayed_refs->lock);
3647                 btrfs_put_delayed_ref(ref);
3648
3649                 cond_resched();
3650                 spin_lock(&delayed_refs->lock);
3651         }
3652
3653         spin_unlock(&delayed_refs->lock);
3654
3655         return ret;
3656 }
3657
3658 static void btrfs_destroy_pending_snapshots(struct btrfs_transaction *t)
3659 {
3660         struct btrfs_pending_snapshot *snapshot;
3661         struct list_head splice;
3662
3663         INIT_LIST_HEAD(&splice);
3664
3665         list_splice_init(&t->pending_snapshots, &splice);
3666
3667         while (!list_empty(&splice)) {
3668                 snapshot = list_entry(splice.next,
3669                                       struct btrfs_pending_snapshot,
3670                                       list);
3671
3672                 list_del_init(&snapshot->list);
3673
3674                 kfree(snapshot);
3675         }
3676 }
3677
3678 static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root)
3679 {
3680         struct btrfs_inode *btrfs_inode;
3681         struct list_head splice;
3682
3683         INIT_LIST_HEAD(&splice);
3684
3685         spin_lock(&root->fs_info->delalloc_lock);
3686         list_splice_init(&root->fs_info->delalloc_inodes, &splice);
3687
3688         while (!list_empty(&splice)) {
3689                 btrfs_inode = list_entry(splice.next, struct btrfs_inode,
3690                                     delalloc_inodes);
3691
3692                 list_del_init(&btrfs_inode->delalloc_inodes);
3693
3694                 btrfs_invalidate_inodes(btrfs_inode->root);
3695         }
3696
3697         spin_unlock(&root->fs_info->delalloc_lock);
3698 }
3699
3700 static int btrfs_destroy_marked_extents(struct btrfs_root *root,
3701                                         struct extent_io_tree *dirty_pages,
3702                                         int mark)
3703 {
3704         int ret;
3705         struct page *page;
3706         struct inode *btree_inode = root->fs_info->btree_inode;
3707         struct extent_buffer *eb;
3708         u64 start = 0;
3709         u64 end;
3710         u64 offset;
3711         unsigned long index;
3712
3713         while (1) {
3714                 ret = find_first_extent_bit(dirty_pages, start, &start, &end,
3715                                             mark, NULL);
3716                 if (ret)
3717                         break;
3718
3719                 clear_extent_bits(dirty_pages, start, end, mark, GFP_NOFS);
3720                 while (start <= end) {
3721                         index = start >> PAGE_CACHE_SHIFT;
3722                         start = (u64)(index + 1) << PAGE_CACHE_SHIFT;
3723                         page = find_get_page(btree_inode->i_mapping, index);
3724                         if (!page)
3725                                 continue;
3726                         offset = page_offset(page);
3727
3728                         spin_lock(&dirty_pages->buffer_lock);
3729                         eb = radix_tree_lookup(
3730                              &(&BTRFS_I(page->mapping->host)->io_tree)->buffer,
3731                                                offset >> PAGE_CACHE_SHIFT);
3732                         spin_unlock(&dirty_pages->buffer_lock);
3733                         if (eb)
3734                                 ret = test_and_clear_bit(EXTENT_BUFFER_DIRTY,
3735                                                          &eb->bflags);
3736                         if (PageWriteback(page))
3737                                 end_page_writeback(page);
3738
3739                         lock_page(page);
3740                         if (PageDirty(page)) {
3741                                 clear_page_dirty_for_io(page);
3742                                 spin_lock_irq(&page->mapping->tree_lock);
3743                                 radix_tree_tag_clear(&page->mapping->page_tree,
3744                                                         page_index(page),
3745                                                         PAGECACHE_TAG_DIRTY);
3746                                 spin_unlock_irq(&page->mapping->tree_lock);
3747                         }
3748
3749                         unlock_page(page);
3750                         page_cache_release(page);
3751                 }
3752         }
3753
3754         return ret;
3755 }
3756
3757 static int btrfs_destroy_pinned_extent(struct btrfs_root *root,
3758                                        struct extent_io_tree *pinned_extents)
3759 {
3760         struct extent_io_tree *unpin;
3761         u64 start;
3762         u64 end;
3763         int ret;
3764         bool loop = true;
3765
3766         unpin = pinned_extents;
3767 again:
3768         while (1) {
3769                 ret = find_first_extent_bit(unpin, 0, &start, &end,
3770                                             EXTENT_DIRTY, NULL);
3771                 if (ret)
3772                         break;
3773
3774                 /* opt_discard */
3775                 if (btrfs_test_opt(root, DISCARD))
3776                         ret = btrfs_error_discard_extent(root, start,
3777                                                          end + 1 - start,
3778                                                          NULL);
3779
3780                 clear_extent_dirty(unpin, start, end, GFP_NOFS);
3781                 btrfs_error_unpin_extent_range(root, start, end);
3782                 cond_resched();
3783         }
3784
3785         if (loop) {
3786                 if (unpin == &root->fs_info->freed_extents[0])
3787                         unpin = &root->fs_info->freed_extents[1];
3788                 else
3789                         unpin = &root->fs_info->freed_extents[0];
3790                 loop = false;
3791                 goto again;
3792         }
3793
3794         return 0;
3795 }
3796
3797 void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
3798                                    struct btrfs_root *root)
3799 {
3800         btrfs_destroy_delayed_refs(cur_trans, root);
3801         btrfs_block_rsv_release(root, &root->fs_info->trans_block_rsv,
3802                                 cur_trans->dirty_pages.dirty_bytes);
3803
3804         /* FIXME: cleanup wait for commit */
3805         cur_trans->in_commit = 1;
3806         cur_trans->blocked = 1;
3807         wake_up(&root->fs_info->transaction_blocked_wait);
3808
3809         cur_trans->blocked = 0;
3810         wake_up(&root->fs_info->transaction_wait);
3811
3812         cur_trans->commit_done = 1;
3813         wake_up(&cur_trans->commit_wait);
3814
3815         btrfs_destroy_delayed_inodes(root);
3816         btrfs_assert_delayed_root_empty(root);
3817
3818         btrfs_destroy_pending_snapshots(cur_trans);
3819
3820         btrfs_destroy_marked_extents(root, &cur_trans->dirty_pages,
3821                                      EXTENT_DIRTY);
3822         btrfs_destroy_pinned_extent(root,
3823                                     root->fs_info->pinned_extents);
3824
3825         /*
3826         memset(cur_trans, 0, sizeof(*cur_trans));
3827         kmem_cache_free(btrfs_transaction_cachep, cur_trans);
3828         */
3829 }
3830
3831 int btrfs_cleanup_transaction(struct btrfs_root *root)
3832 {
3833         struct btrfs_transaction *t;
3834         LIST_HEAD(list);
3835
3836         mutex_lock(&root->fs_info->transaction_kthread_mutex);
3837
3838         spin_lock(&root->fs_info->trans_lock);
3839         list_splice_init(&root->fs_info->trans_list, &list);
3840         root->fs_info->trans_no_join = 1;
3841         spin_unlock(&root->fs_info->trans_lock);
3842
3843         while (!list_empty(&list)) {
3844                 t = list_entry(list.next, struct btrfs_transaction, list);
3845                 if (!t)
3846                         break;
3847
3848                 btrfs_destroy_ordered_operations(root);
3849
3850                 btrfs_destroy_ordered_extents(root);
3851
3852                 btrfs_destroy_delayed_refs(t, root);
3853
3854                 btrfs_block_rsv_release(root,
3855                                         &root->fs_info->trans_block_rsv,
3856                                         t->dirty_pages.dirty_bytes);
3857
3858                 /* FIXME: cleanup wait for commit */
3859                 t->in_commit = 1;
3860                 t->blocked = 1;
3861                 smp_mb();
3862                 if (waitqueue_active(&root->fs_info->transaction_blocked_wait))
3863                         wake_up(&root->fs_info->transaction_blocked_wait);
3864
3865                 t->blocked = 0;
3866                 smp_mb();
3867                 if (waitqueue_active(&root->fs_info->transaction_wait))
3868                         wake_up(&root->fs_info->transaction_wait);
3869
3870                 t->commit_done = 1;
3871                 smp_mb();
3872                 if (waitqueue_active(&t->commit_wait))
3873                         wake_up(&t->commit_wait);
3874
3875                 btrfs_destroy_delayed_inodes(root);
3876                 btrfs_assert_delayed_root_empty(root);
3877
3878                 btrfs_destroy_pending_snapshots(t);
3879
3880                 btrfs_destroy_delalloc_inodes(root);
3881
3882                 spin_lock(&root->fs_info->trans_lock);
3883                 root->fs_info->running_transaction = NULL;
3884                 spin_unlock(&root->fs_info->trans_lock);
3885
3886                 btrfs_destroy_marked_extents(root, &t->dirty_pages,
3887                                              EXTENT_DIRTY);
3888
3889                 btrfs_destroy_pinned_extent(root,
3890                                             root->fs_info->pinned_extents);
3891
3892                 atomic_set(&t->use_count, 0);
3893                 list_del_init(&t->list);
3894                 memset(t, 0, sizeof(*t));
3895                 kmem_cache_free(btrfs_transaction_cachep, t);
3896         }
3897
3898         spin_lock(&root->fs_info->trans_lock);
3899         root->fs_info->trans_no_join = 0;
3900         spin_unlock(&root->fs_info->trans_lock);
3901         mutex_unlock(&root->fs_info->transaction_kthread_mutex);
3902
3903         return 0;
3904 }
3905
3906 static struct extent_io_ops btree_extent_io_ops = {
3907         .readpage_end_io_hook = btree_readpage_end_io_hook,
3908         .readpage_io_failed_hook = btree_io_failed_hook,
3909         .submit_bio_hook = btree_submit_bio_hook,
3910         /* note we're sharing with inode.c for the merge bio hook */
3911         .merge_bio_hook = btrfs_merge_bio_hook,
3912 };