btrfs: fix misleading variable name for flags
[linux-2.6-block.git] / fs / btrfs / disk-io.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <linux/fs.h>
20 #include <linux/blkdev.h>
21 #include <linux/scatterlist.h>
22 #include <linux/swap.h>
23 #include <linux/radix-tree.h>
24 #include <linux/writeback.h>
25 #include <linux/buffer_head.h>
26 #include <linux/workqueue.h>
27 #include <linux/kthread.h>
28 #include <linux/freezer.h>
29 #include <linux/crc32c.h>
30 #include <linux/slab.h>
31 #include <linux/migrate.h>
32 #include <linux/ratelimit.h>
33 #include <linux/uuid.h>
34 #include <asm/unaligned.h>
35 #include "compat.h"
36 #include "ctree.h"
37 #include "disk-io.h"
38 #include "transaction.h"
39 #include "btrfs_inode.h"
40 #include "volumes.h"
41 #include "print-tree.h"
42 #include "async-thread.h"
43 #include "locking.h"
44 #include "tree-log.h"
45 #include "free-space-cache.h"
46 #include "inode-map.h"
47 #include "check-integrity.h"
48 #include "rcu-string.h"
49 #include "dev-replace.h"
50 #include "raid56.h"
51
52 #ifdef CONFIG_X86
53 #include <asm/cpufeature.h>
54 #endif
55
56 static struct extent_io_ops btree_extent_io_ops;
57 static void end_workqueue_fn(struct btrfs_work *work);
58 static void free_fs_root(struct btrfs_root *root);
59 static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
60                                     int read_only);
61 static void btrfs_destroy_ordered_operations(struct btrfs_transaction *t,
62                                              struct btrfs_root *root);
63 static void btrfs_destroy_ordered_extents(struct btrfs_root *root);
64 static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
65                                       struct btrfs_root *root);
66 static void btrfs_evict_pending_snapshots(struct btrfs_transaction *t);
67 static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root);
68 static int btrfs_destroy_marked_extents(struct btrfs_root *root,
69                                         struct extent_io_tree *dirty_pages,
70                                         int mark);
71 static int btrfs_destroy_pinned_extent(struct btrfs_root *root,
72                                        struct extent_io_tree *pinned_extents);
73 static int btrfs_cleanup_transaction(struct btrfs_root *root);
74 static void btrfs_error_commit_super(struct btrfs_root *root);
75
76 /*
77  * end_io_wq structs are used to do processing in task context when an IO is
78  * complete.  This is used during reads to verify checksums, and it is used
79  * by writes to insert metadata for new file extents after IO is complete.
80  */
81 struct end_io_wq {
82         struct bio *bio;
83         bio_end_io_t *end_io;
84         void *private;
85         struct btrfs_fs_info *info;
86         int error;
87         int metadata;
88         struct list_head list;
89         struct btrfs_work work;
90 };
91
92 /*
93  * async submit bios are used to offload expensive checksumming
94  * onto the worker threads.  They checksum file and metadata bios
95  * just before they are sent down the IO stack.
96  */
97 struct async_submit_bio {
98         struct inode *inode;
99         struct bio *bio;
100         struct list_head list;
101         extent_submit_bio_hook_t *submit_bio_start;
102         extent_submit_bio_hook_t *submit_bio_done;
103         int rw;
104         int mirror_num;
105         unsigned long bio_flags;
106         /*
107          * bio_offset is optional, can be used if the pages in the bio
108          * can't tell us where in the file the bio should go
109          */
110         u64 bio_offset;
111         struct btrfs_work work;
112         int error;
113 };
114
115 /*
116  * Lockdep class keys for extent_buffer->lock's in this root.  For a given
117  * eb, the lockdep key is determined by the btrfs_root it belongs to and
118  * the level the eb occupies in the tree.
119  *
120  * Different roots are used for different purposes and may nest inside each
121  * other and they require separate keysets.  As lockdep keys should be
122  * static, assign keysets according to the purpose of the root as indicated
123  * by btrfs_root->objectid.  This ensures that all special purpose roots
124  * have separate keysets.
125  *
126  * Lock-nesting across peer nodes is always done with the immediate parent
127  * node locked thus preventing deadlock.  As lockdep doesn't know this, use
128  * subclass to avoid triggering lockdep warning in such cases.
129  *
130  * The key is set by the readpage_end_io_hook after the buffer has passed
131  * csum validation but before the pages are unlocked.  It is also set by
132  * btrfs_init_new_buffer on freshly allocated blocks.
133  *
134  * We also add a check to make sure the highest level of the tree is the
135  * same as our lockdep setup here.  If BTRFS_MAX_LEVEL changes, this code
136  * needs update as well.
137  */
138 #ifdef CONFIG_DEBUG_LOCK_ALLOC
139 # if BTRFS_MAX_LEVEL != 8
140 #  error
141 # endif
142
143 static struct btrfs_lockdep_keyset {
144         u64                     id;             /* root objectid */
145         const char              *name_stem;     /* lock name stem */
146         char                    names[BTRFS_MAX_LEVEL + 1][20];
147         struct lock_class_key   keys[BTRFS_MAX_LEVEL + 1];
148 } btrfs_lockdep_keysets[] = {
149         { .id = BTRFS_ROOT_TREE_OBJECTID,       .name_stem = "root"     },
150         { .id = BTRFS_EXTENT_TREE_OBJECTID,     .name_stem = "extent"   },
151         { .id = BTRFS_CHUNK_TREE_OBJECTID,      .name_stem = "chunk"    },
152         { .id = BTRFS_DEV_TREE_OBJECTID,        .name_stem = "dev"      },
153         { .id = BTRFS_FS_TREE_OBJECTID,         .name_stem = "fs"       },
154         { .id = BTRFS_CSUM_TREE_OBJECTID,       .name_stem = "csum"     },
155         { .id = BTRFS_ORPHAN_OBJECTID,          .name_stem = "orphan"   },
156         { .id = BTRFS_TREE_LOG_OBJECTID,        .name_stem = "log"      },
157         { .id = BTRFS_TREE_RELOC_OBJECTID,      .name_stem = "treloc"   },
158         { .id = BTRFS_DATA_RELOC_TREE_OBJECTID, .name_stem = "dreloc"   },
159         { .id = 0,                              .name_stem = "tree"     },
160 };
161
162 void __init btrfs_init_lockdep(void)
163 {
164         int i, j;
165
166         /* initialize lockdep class names */
167         for (i = 0; i < ARRAY_SIZE(btrfs_lockdep_keysets); i++) {
168                 struct btrfs_lockdep_keyset *ks = &btrfs_lockdep_keysets[i];
169
170                 for (j = 0; j < ARRAY_SIZE(ks->names); j++)
171                         snprintf(ks->names[j], sizeof(ks->names[j]),
172                                  "btrfs-%s-%02d", ks->name_stem, j);
173         }
174 }
175
176 void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb,
177                                     int level)
178 {
179         struct btrfs_lockdep_keyset *ks;
180
181         BUG_ON(level >= ARRAY_SIZE(ks->keys));
182
183         /* find the matching keyset, id 0 is the default entry */
184         for (ks = btrfs_lockdep_keysets; ks->id; ks++)
185                 if (ks->id == objectid)
186                         break;
187
188         lockdep_set_class_and_name(&eb->lock,
189                                    &ks->keys[level], ks->names[level]);
190 }
191
192 #endif
193
194 /*
195  * extents on the btree inode are pretty simple, there's one extent
196  * that covers the entire device
197  */
198 static struct extent_map *btree_get_extent(struct inode *inode,
199                 struct page *page, size_t pg_offset, u64 start, u64 len,
200                 int create)
201 {
202         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
203         struct extent_map *em;
204         int ret;
205
206         read_lock(&em_tree->lock);
207         em = lookup_extent_mapping(em_tree, start, len);
208         if (em) {
209                 em->bdev =
210                         BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
211                 read_unlock(&em_tree->lock);
212                 goto out;
213         }
214         read_unlock(&em_tree->lock);
215
216         em = alloc_extent_map();
217         if (!em) {
218                 em = ERR_PTR(-ENOMEM);
219                 goto out;
220         }
221         em->start = 0;
222         em->len = (u64)-1;
223         em->block_len = (u64)-1;
224         em->block_start = 0;
225         em->bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
226
227         write_lock(&em_tree->lock);
228         ret = add_extent_mapping(em_tree, em, 0);
229         if (ret == -EEXIST) {
230                 free_extent_map(em);
231                 em = lookup_extent_mapping(em_tree, start, len);
232                 if (!em)
233                         em = ERR_PTR(-EIO);
234         } else if (ret) {
235                 free_extent_map(em);
236                 em = ERR_PTR(ret);
237         }
238         write_unlock(&em_tree->lock);
239
240 out:
241         return em;
242 }
243
244 u32 btrfs_csum_data(char *data, u32 seed, size_t len)
245 {
246         return crc32c(seed, data, len);
247 }
248
249 void btrfs_csum_final(u32 crc, char *result)
250 {
251         put_unaligned_le32(~crc, result);
252 }
253
254 /*
255  * compute the csum for a btree block, and either verify it or write it
256  * into the csum field of the block.
257  */
258 static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf,
259                            int verify)
260 {
261         u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
262         char *result = NULL;
263         unsigned long len;
264         unsigned long cur_len;
265         unsigned long offset = BTRFS_CSUM_SIZE;
266         char *kaddr;
267         unsigned long map_start;
268         unsigned long map_len;
269         int err;
270         u32 crc = ~(u32)0;
271         unsigned long inline_result;
272
273         len = buf->len - offset;
274         while (len > 0) {
275                 err = map_private_extent_buffer(buf, offset, 32,
276                                         &kaddr, &map_start, &map_len);
277                 if (err)
278                         return 1;
279                 cur_len = min(len, map_len - (offset - map_start));
280                 crc = btrfs_csum_data(kaddr + offset - map_start,
281                                       crc, cur_len);
282                 len -= cur_len;
283                 offset += cur_len;
284         }
285         if (csum_size > sizeof(inline_result)) {
286                 result = kzalloc(csum_size * sizeof(char), GFP_NOFS);
287                 if (!result)
288                         return 1;
289         } else {
290                 result = (char *)&inline_result;
291         }
292
293         btrfs_csum_final(crc, result);
294
295         if (verify) {
296                 if (memcmp_extent_buffer(buf, result, 0, csum_size)) {
297                         u32 val;
298                         u32 found = 0;
299                         memcpy(&found, result, csum_size);
300
301                         read_extent_buffer(buf, &val, 0, csum_size);
302                         printk_ratelimited(KERN_INFO "btrfs: %s checksum verify "
303                                        "failed on %llu wanted %X found %X "
304                                        "level %d\n",
305                                        root->fs_info->sb->s_id,
306                                        (unsigned long long)buf->start, val, found,
307                                        btrfs_header_level(buf));
308                         if (result != (char *)&inline_result)
309                                 kfree(result);
310                         return 1;
311                 }
312         } else {
313                 write_extent_buffer(buf, result, 0, csum_size);
314         }
315         if (result != (char *)&inline_result)
316                 kfree(result);
317         return 0;
318 }
319
320 /*
321  * we can't consider a given block up to date unless the transid of the
322  * block matches the transid in the parent node's pointer.  This is how we
323  * detect blocks that either didn't get written at all or got written
324  * in the wrong place.
325  */
326 static int verify_parent_transid(struct extent_io_tree *io_tree,
327                                  struct extent_buffer *eb, u64 parent_transid,
328                                  int atomic)
329 {
330         struct extent_state *cached_state = NULL;
331         int ret;
332
333         if (!parent_transid || btrfs_header_generation(eb) == parent_transid)
334                 return 0;
335
336         if (atomic)
337                 return -EAGAIN;
338
339         lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1,
340                          0, &cached_state);
341         if (extent_buffer_uptodate(eb) &&
342             btrfs_header_generation(eb) == parent_transid) {
343                 ret = 0;
344                 goto out;
345         }
346         printk_ratelimited("parent transid verify failed on %llu wanted %llu "
347                        "found %llu\n",
348                        (unsigned long long)eb->start,
349                        (unsigned long long)parent_transid,
350                        (unsigned long long)btrfs_header_generation(eb));
351         ret = 1;
352         clear_extent_buffer_uptodate(eb);
353 out:
354         unlock_extent_cached(io_tree, eb->start, eb->start + eb->len - 1,
355                              &cached_state, GFP_NOFS);
356         return ret;
357 }
358
359 /*
360  * helper to read a given tree block, doing retries as required when
361  * the checksums don't match and we have alternate mirrors to try.
362  */
363 static int btree_read_extent_buffer_pages(struct btrfs_root *root,
364                                           struct extent_buffer *eb,
365                                           u64 start, u64 parent_transid)
366 {
367         struct extent_io_tree *io_tree;
368         int failed = 0;
369         int ret;
370         int num_copies = 0;
371         int mirror_num = 0;
372         int failed_mirror = 0;
373
374         clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
375         io_tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree;
376         while (1) {
377                 ret = read_extent_buffer_pages(io_tree, eb, start,
378                                                WAIT_COMPLETE,
379                                                btree_get_extent, mirror_num);
380                 if (!ret) {
381                         if (!verify_parent_transid(io_tree, eb,
382                                                    parent_transid, 0))
383                                 break;
384                         else
385                                 ret = -EIO;
386                 }
387
388                 /*
389                  * This buffer's crc is fine, but its contents are corrupted, so
390                  * there is no reason to read the other copies, they won't be
391                  * any less wrong.
392                  */
393                 if (test_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags))
394                         break;
395
396                 num_copies = btrfs_num_copies(root->fs_info,
397                                               eb->start, eb->len);
398                 if (num_copies == 1)
399                         break;
400
401                 if (!failed_mirror) {
402                         failed = 1;
403                         failed_mirror = eb->read_mirror;
404                 }
405
406                 mirror_num++;
407                 if (mirror_num == failed_mirror)
408                         mirror_num++;
409
410                 if (mirror_num > num_copies)
411                         break;
412         }
413
414         if (failed && !ret && failed_mirror)
415                 repair_eb_io_failure(root, eb, failed_mirror);
416
417         return ret;
418 }
419
420 /*
421  * checksum a dirty tree block before IO.  This has extra checks to make sure
422  * we only fill in the checksum field in the first page of a multi-page block
423  */
424
425 static int csum_dirty_buffer(struct btrfs_root *root, struct page *page)
426 {
427         struct extent_io_tree *tree;
428         u64 start = page_offset(page);
429         u64 found_start;
430         struct extent_buffer *eb;
431
432         tree = &BTRFS_I(page->mapping->host)->io_tree;
433
434         eb = (struct extent_buffer *)page->private;
435         if (page != eb->pages[0])
436                 return 0;
437         found_start = btrfs_header_bytenr(eb);
438         if (found_start != start) {
439                 WARN_ON(1);
440                 return 0;
441         }
442         if (!PageUptodate(page)) {
443                 WARN_ON(1);
444                 return 0;
445         }
446         csum_tree_block(root, eb, 0);
447         return 0;
448 }
449
450 static int check_tree_block_fsid(struct btrfs_root *root,
451                                  struct extent_buffer *eb)
452 {
453         struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
454         u8 fsid[BTRFS_UUID_SIZE];
455         int ret = 1;
456
457         read_extent_buffer(eb, fsid, (unsigned long)btrfs_header_fsid(eb),
458                            BTRFS_FSID_SIZE);
459         while (fs_devices) {
460                 if (!memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE)) {
461                         ret = 0;
462                         break;
463                 }
464                 fs_devices = fs_devices->seed;
465         }
466         return ret;
467 }
468
469 #define CORRUPT(reason, eb, root, slot)                         \
470         printk(KERN_CRIT "btrfs: corrupt leaf, %s: block=%llu," \
471                "root=%llu, slot=%d\n", reason,                  \
472                (unsigned long long)btrfs_header_bytenr(eb),     \
473                (unsigned long long)root->objectid, slot)
474
475 static noinline int check_leaf(struct btrfs_root *root,
476                                struct extent_buffer *leaf)
477 {
478         struct btrfs_key key;
479         struct btrfs_key leaf_key;
480         u32 nritems = btrfs_header_nritems(leaf);
481         int slot;
482
483         if (nritems == 0)
484                 return 0;
485
486         /* Check the 0 item */
487         if (btrfs_item_offset_nr(leaf, 0) + btrfs_item_size_nr(leaf, 0) !=
488             BTRFS_LEAF_DATA_SIZE(root)) {
489                 CORRUPT("invalid item offset size pair", leaf, root, 0);
490                 return -EIO;
491         }
492
493         /*
494          * Check to make sure each items keys are in the correct order and their
495          * offsets make sense.  We only have to loop through nritems-1 because
496          * we check the current slot against the next slot, which verifies the
497          * next slot's offset+size makes sense and that the current's slot
498          * offset is correct.
499          */
500         for (slot = 0; slot < nritems - 1; slot++) {
501                 btrfs_item_key_to_cpu(leaf, &leaf_key, slot);
502                 btrfs_item_key_to_cpu(leaf, &key, slot + 1);
503
504                 /* Make sure the keys are in the right order */
505                 if (btrfs_comp_cpu_keys(&leaf_key, &key) >= 0) {
506                         CORRUPT("bad key order", leaf, root, slot);
507                         return -EIO;
508                 }
509
510                 /*
511                  * Make sure the offset and ends are right, remember that the
512                  * item data starts at the end of the leaf and grows towards the
513                  * front.
514                  */
515                 if (btrfs_item_offset_nr(leaf, slot) !=
516                         btrfs_item_end_nr(leaf, slot + 1)) {
517                         CORRUPT("slot offset bad", leaf, root, slot);
518                         return -EIO;
519                 }
520
521                 /*
522                  * Check to make sure that we don't point outside of the leaf,
523                  * just incase all the items are consistent to eachother, but
524                  * all point outside of the leaf.
525                  */
526                 if (btrfs_item_end_nr(leaf, slot) >
527                     BTRFS_LEAF_DATA_SIZE(root)) {
528                         CORRUPT("slot end outside of leaf", leaf, root, slot);
529                         return -EIO;
530                 }
531         }
532
533         return 0;
534 }
535
536 static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
537                                struct extent_state *state, int mirror)
538 {
539         struct extent_io_tree *tree;
540         u64 found_start;
541         int found_level;
542         struct extent_buffer *eb;
543         struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
544         int ret = 0;
545         int reads_done;
546
547         if (!page->private)
548                 goto out;
549
550         tree = &BTRFS_I(page->mapping->host)->io_tree;
551         eb = (struct extent_buffer *)page->private;
552
553         /* the pending IO might have been the only thing that kept this buffer
554          * in memory.  Make sure we have a ref for all this other checks
555          */
556         extent_buffer_get(eb);
557
558         reads_done = atomic_dec_and_test(&eb->io_pages);
559         if (!reads_done)
560                 goto err;
561
562         eb->read_mirror = mirror;
563         if (test_bit(EXTENT_BUFFER_IOERR, &eb->bflags)) {
564                 ret = -EIO;
565                 goto err;
566         }
567
568         found_start = btrfs_header_bytenr(eb);
569         if (found_start != eb->start) {
570                 printk_ratelimited(KERN_INFO "btrfs bad tree block start "
571                                "%llu %llu\n",
572                                (unsigned long long)found_start,
573                                (unsigned long long)eb->start);
574                 ret = -EIO;
575                 goto err;
576         }
577         if (check_tree_block_fsid(root, eb)) {
578                 printk_ratelimited(KERN_INFO "btrfs bad fsid on block %llu\n",
579                                (unsigned long long)eb->start);
580                 ret = -EIO;
581                 goto err;
582         }
583         found_level = btrfs_header_level(eb);
584         if (found_level >= BTRFS_MAX_LEVEL) {
585                 btrfs_info(root->fs_info, "bad tree block level %d\n",
586                            (int)btrfs_header_level(eb));
587                 ret = -EIO;
588                 goto err;
589         }
590
591         btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb),
592                                        eb, found_level);
593
594         ret = csum_tree_block(root, eb, 1);
595         if (ret) {
596                 ret = -EIO;
597                 goto err;
598         }
599
600         /*
601          * If this is a leaf block and it is corrupt, set the corrupt bit so
602          * that we don't try and read the other copies of this block, just
603          * return -EIO.
604          */
605         if (found_level == 0 && check_leaf(root, eb)) {
606                 set_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
607                 ret = -EIO;
608         }
609
610         if (!ret)
611                 set_extent_buffer_uptodate(eb);
612 err:
613         if (reads_done &&
614             test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags))
615                 btree_readahead_hook(root, eb, eb->start, ret);
616
617         if (ret) {
618                 /*
619                  * our io error hook is going to dec the io pages
620                  * again, we have to make sure it has something
621                  * to decrement
622                  */
623                 atomic_inc(&eb->io_pages);
624                 clear_extent_buffer_uptodate(eb);
625         }
626         free_extent_buffer(eb);
627 out:
628         return ret;
629 }
630
631 static int btree_io_failed_hook(struct page *page, int failed_mirror)
632 {
633         struct extent_buffer *eb;
634         struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
635
636         eb = (struct extent_buffer *)page->private;
637         set_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
638         eb->read_mirror = failed_mirror;
639         atomic_dec(&eb->io_pages);
640         if (test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags))
641                 btree_readahead_hook(root, eb, eb->start, -EIO);
642         return -EIO;    /* we fixed nothing */
643 }
644
645 static void end_workqueue_bio(struct bio *bio, int err)
646 {
647         struct end_io_wq *end_io_wq = bio->bi_private;
648         struct btrfs_fs_info *fs_info;
649
650         fs_info = end_io_wq->info;
651         end_io_wq->error = err;
652         end_io_wq->work.func = end_workqueue_fn;
653         end_io_wq->work.flags = 0;
654
655         if (bio->bi_rw & REQ_WRITE) {
656                 if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA)
657                         btrfs_queue_worker(&fs_info->endio_meta_write_workers,
658                                            &end_io_wq->work);
659                 else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_FREE_SPACE)
660                         btrfs_queue_worker(&fs_info->endio_freespace_worker,
661                                            &end_io_wq->work);
662                 else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56)
663                         btrfs_queue_worker(&fs_info->endio_raid56_workers,
664                                            &end_io_wq->work);
665                 else
666                         btrfs_queue_worker(&fs_info->endio_write_workers,
667                                            &end_io_wq->work);
668         } else {
669                 if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56)
670                         btrfs_queue_worker(&fs_info->endio_raid56_workers,
671                                            &end_io_wq->work);
672                 else if (end_io_wq->metadata)
673                         btrfs_queue_worker(&fs_info->endio_meta_workers,
674                                            &end_io_wq->work);
675                 else
676                         btrfs_queue_worker(&fs_info->endio_workers,
677                                            &end_io_wq->work);
678         }
679 }
680
681 /*
682  * For the metadata arg you want
683  *
684  * 0 - if data
685  * 1 - if normal metadta
686  * 2 - if writing to the free space cache area
687  * 3 - raid parity work
688  */
689 int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
690                         int metadata)
691 {
692         struct end_io_wq *end_io_wq;
693         end_io_wq = kmalloc(sizeof(*end_io_wq), GFP_NOFS);
694         if (!end_io_wq)
695                 return -ENOMEM;
696
697         end_io_wq->private = bio->bi_private;
698         end_io_wq->end_io = bio->bi_end_io;
699         end_io_wq->info = info;
700         end_io_wq->error = 0;
701         end_io_wq->bio = bio;
702         end_io_wq->metadata = metadata;
703
704         bio->bi_private = end_io_wq;
705         bio->bi_end_io = end_workqueue_bio;
706         return 0;
707 }
708
709 unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info)
710 {
711         unsigned long limit = min_t(unsigned long,
712                                     info->workers.max_workers,
713                                     info->fs_devices->open_devices);
714         return 256 * limit;
715 }
716
717 static void run_one_async_start(struct btrfs_work *work)
718 {
719         struct async_submit_bio *async;
720         int ret;
721
722         async = container_of(work, struct  async_submit_bio, work);
723         ret = async->submit_bio_start(async->inode, async->rw, async->bio,
724                                       async->mirror_num, async->bio_flags,
725                                       async->bio_offset);
726         if (ret)
727                 async->error = ret;
728 }
729
730 static void run_one_async_done(struct btrfs_work *work)
731 {
732         struct btrfs_fs_info *fs_info;
733         struct async_submit_bio *async;
734         int limit;
735
736         async = container_of(work, struct  async_submit_bio, work);
737         fs_info = BTRFS_I(async->inode)->root->fs_info;
738
739         limit = btrfs_async_submit_limit(fs_info);
740         limit = limit * 2 / 3;
741
742         if (atomic_dec_return(&fs_info->nr_async_submits) < limit &&
743             waitqueue_active(&fs_info->async_submit_wait))
744                 wake_up(&fs_info->async_submit_wait);
745
746         /* If an error occured we just want to clean up the bio and move on */
747         if (async->error) {
748                 bio_endio(async->bio, async->error);
749                 return;
750         }
751
752         async->submit_bio_done(async->inode, async->rw, async->bio,
753                                async->mirror_num, async->bio_flags,
754                                async->bio_offset);
755 }
756
757 static void run_one_async_free(struct btrfs_work *work)
758 {
759         struct async_submit_bio *async;
760
761         async = container_of(work, struct  async_submit_bio, work);
762         kfree(async);
763 }
764
765 int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
766                         int rw, struct bio *bio, int mirror_num,
767                         unsigned long bio_flags,
768                         u64 bio_offset,
769                         extent_submit_bio_hook_t *submit_bio_start,
770                         extent_submit_bio_hook_t *submit_bio_done)
771 {
772         struct async_submit_bio *async;
773
774         async = kmalloc(sizeof(*async), GFP_NOFS);
775         if (!async)
776                 return -ENOMEM;
777
778         async->inode = inode;
779         async->rw = rw;
780         async->bio = bio;
781         async->mirror_num = mirror_num;
782         async->submit_bio_start = submit_bio_start;
783         async->submit_bio_done = submit_bio_done;
784
785         async->work.func = run_one_async_start;
786         async->work.ordered_func = run_one_async_done;
787         async->work.ordered_free = run_one_async_free;
788
789         async->work.flags = 0;
790         async->bio_flags = bio_flags;
791         async->bio_offset = bio_offset;
792
793         async->error = 0;
794
795         atomic_inc(&fs_info->nr_async_submits);
796
797         if (rw & REQ_SYNC)
798                 btrfs_set_work_high_prio(&async->work);
799
800         btrfs_queue_worker(&fs_info->workers, &async->work);
801
802         while (atomic_read(&fs_info->async_submit_draining) &&
803               atomic_read(&fs_info->nr_async_submits)) {
804                 wait_event(fs_info->async_submit_wait,
805                            (atomic_read(&fs_info->nr_async_submits) == 0));
806         }
807
808         return 0;
809 }
810
811 static int btree_csum_one_bio(struct bio *bio)
812 {
813         struct bio_vec *bvec = bio->bi_io_vec;
814         int bio_index = 0;
815         struct btrfs_root *root;
816         int ret = 0;
817
818         WARN_ON(bio->bi_vcnt <= 0);
819         while (bio_index < bio->bi_vcnt) {
820                 root = BTRFS_I(bvec->bv_page->mapping->host)->root;
821                 ret = csum_dirty_buffer(root, bvec->bv_page);
822                 if (ret)
823                         break;
824                 bio_index++;
825                 bvec++;
826         }
827         return ret;
828 }
829
830 static int __btree_submit_bio_start(struct inode *inode, int rw,
831                                     struct bio *bio, int mirror_num,
832                                     unsigned long bio_flags,
833                                     u64 bio_offset)
834 {
835         /*
836          * when we're called for a write, we're already in the async
837          * submission context.  Just jump into btrfs_map_bio
838          */
839         return btree_csum_one_bio(bio);
840 }
841
842 static int __btree_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
843                                  int mirror_num, unsigned long bio_flags,
844                                  u64 bio_offset)
845 {
846         int ret;
847
848         /*
849          * when we're called for a write, we're already in the async
850          * submission context.  Just jump into btrfs_map_bio
851          */
852         ret = btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, mirror_num, 1);
853         if (ret)
854                 bio_endio(bio, ret);
855         return ret;
856 }
857
858 static int check_async_write(struct inode *inode, unsigned long bio_flags)
859 {
860         if (bio_flags & EXTENT_BIO_TREE_LOG)
861                 return 0;
862 #ifdef CONFIG_X86
863         if (cpu_has_xmm4_2)
864                 return 0;
865 #endif
866         return 1;
867 }
868
869 static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
870                                  int mirror_num, unsigned long bio_flags,
871                                  u64 bio_offset)
872 {
873         int async = check_async_write(inode, bio_flags);
874         int ret;
875
876         if (!(rw & REQ_WRITE)) {
877                 /*
878                  * called for a read, do the setup so that checksum validation
879                  * can happen in the async kernel threads
880                  */
881                 ret = btrfs_bio_wq_end_io(BTRFS_I(inode)->root->fs_info,
882                                           bio, 1);
883                 if (ret)
884                         goto out_w_error;
885                 ret = btrfs_map_bio(BTRFS_I(inode)->root, rw, bio,
886                                     mirror_num, 0);
887         } else if (!async) {
888                 ret = btree_csum_one_bio(bio);
889                 if (ret)
890                         goto out_w_error;
891                 ret = btrfs_map_bio(BTRFS_I(inode)->root, rw, bio,
892                                     mirror_num, 0);
893         } else {
894                 /*
895                  * kthread helpers are used to submit writes so that
896                  * checksumming can happen in parallel across all CPUs
897                  */
898                 ret = btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
899                                           inode, rw, bio, mirror_num, 0,
900                                           bio_offset,
901                                           __btree_submit_bio_start,
902                                           __btree_submit_bio_done);
903         }
904
905         if (ret) {
906 out_w_error:
907                 bio_endio(bio, ret);
908         }
909         return ret;
910 }
911
912 #ifdef CONFIG_MIGRATION
913 static int btree_migratepage(struct address_space *mapping,
914                         struct page *newpage, struct page *page,
915                         enum migrate_mode mode)
916 {
917         /*
918          * we can't safely write a btree page from here,
919          * we haven't done the locking hook
920          */
921         if (PageDirty(page))
922                 return -EAGAIN;
923         /*
924          * Buffers may be managed in a filesystem specific way.
925          * We must have no buffers or drop them.
926          */
927         if (page_has_private(page) &&
928             !try_to_release_page(page, GFP_KERNEL))
929                 return -EAGAIN;
930         return migrate_page(mapping, newpage, page, mode);
931 }
932 #endif
933
934
935 static int btree_writepages(struct address_space *mapping,
936                             struct writeback_control *wbc)
937 {
938         struct extent_io_tree *tree;
939         struct btrfs_fs_info *fs_info;
940         int ret;
941
942         tree = &BTRFS_I(mapping->host)->io_tree;
943         if (wbc->sync_mode == WB_SYNC_NONE) {
944
945                 if (wbc->for_kupdate)
946                         return 0;
947
948                 fs_info = BTRFS_I(mapping->host)->root->fs_info;
949                 /* this is a bit racy, but that's ok */
950                 ret = percpu_counter_compare(&fs_info->dirty_metadata_bytes,
951                                              BTRFS_DIRTY_METADATA_THRESH);
952                 if (ret < 0)
953                         return 0;
954         }
955         return btree_write_cache_pages(mapping, wbc);
956 }
957
958 static int btree_readpage(struct file *file, struct page *page)
959 {
960         struct extent_io_tree *tree;
961         tree = &BTRFS_I(page->mapping->host)->io_tree;
962         return extent_read_full_page(tree, page, btree_get_extent, 0);
963 }
964
965 static int btree_releasepage(struct page *page, gfp_t gfp_flags)
966 {
967         if (PageWriteback(page) || PageDirty(page))
968                 return 0;
969
970         return try_release_extent_buffer(page);
971 }
972
973 static void btree_invalidatepage(struct page *page, unsigned long offset)
974 {
975         struct extent_io_tree *tree;
976         tree = &BTRFS_I(page->mapping->host)->io_tree;
977         extent_invalidatepage(tree, page, offset);
978         btree_releasepage(page, GFP_NOFS);
979         if (PagePrivate(page)) {
980                 printk(KERN_WARNING "btrfs warning page private not zero "
981                        "on page %llu\n", (unsigned long long)page_offset(page));
982                 ClearPagePrivate(page);
983                 set_page_private(page, 0);
984                 page_cache_release(page);
985         }
986 }
987
988 static int btree_set_page_dirty(struct page *page)
989 {
990 #ifdef DEBUG
991         struct extent_buffer *eb;
992
993         BUG_ON(!PagePrivate(page));
994         eb = (struct extent_buffer *)page->private;
995         BUG_ON(!eb);
996         BUG_ON(!test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
997         BUG_ON(!atomic_read(&eb->refs));
998         btrfs_assert_tree_locked(eb);
999 #endif
1000         return __set_page_dirty_nobuffers(page);
1001 }
1002
1003 static const struct address_space_operations btree_aops = {
1004         .readpage       = btree_readpage,
1005         .writepages     = btree_writepages,
1006         .releasepage    = btree_releasepage,
1007         .invalidatepage = btree_invalidatepage,
1008 #ifdef CONFIG_MIGRATION
1009         .migratepage    = btree_migratepage,
1010 #endif
1011         .set_page_dirty = btree_set_page_dirty,
1012 };
1013
1014 int readahead_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize,
1015                          u64 parent_transid)
1016 {
1017         struct extent_buffer *buf = NULL;
1018         struct inode *btree_inode = root->fs_info->btree_inode;
1019         int ret = 0;
1020
1021         buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
1022         if (!buf)
1023                 return 0;
1024         read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree,
1025                                  buf, 0, WAIT_NONE, btree_get_extent, 0);
1026         free_extent_buffer(buf);
1027         return ret;
1028 }
1029
1030 int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr, u32 blocksize,
1031                          int mirror_num, struct extent_buffer **eb)
1032 {
1033         struct extent_buffer *buf = NULL;
1034         struct inode *btree_inode = root->fs_info->btree_inode;
1035         struct extent_io_tree *io_tree = &BTRFS_I(btree_inode)->io_tree;
1036         int ret;
1037
1038         buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
1039         if (!buf)
1040                 return 0;
1041
1042         set_bit(EXTENT_BUFFER_READAHEAD, &buf->bflags);
1043
1044         ret = read_extent_buffer_pages(io_tree, buf, 0, WAIT_PAGE_LOCK,
1045                                        btree_get_extent, mirror_num);
1046         if (ret) {
1047                 free_extent_buffer(buf);
1048                 return ret;
1049         }
1050
1051         if (test_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags)) {
1052                 free_extent_buffer(buf);
1053                 return -EIO;
1054         } else if (extent_buffer_uptodate(buf)) {
1055                 *eb = buf;
1056         } else {
1057                 free_extent_buffer(buf);
1058         }
1059         return 0;
1060 }
1061
1062 struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root,
1063                                             u64 bytenr, u32 blocksize)
1064 {
1065         struct inode *btree_inode = root->fs_info->btree_inode;
1066         struct extent_buffer *eb;
1067         eb = find_extent_buffer(&BTRFS_I(btree_inode)->io_tree,
1068                                 bytenr, blocksize);
1069         return eb;
1070 }
1071
1072 struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
1073                                                  u64 bytenr, u32 blocksize)
1074 {
1075         struct inode *btree_inode = root->fs_info->btree_inode;
1076         struct extent_buffer *eb;
1077
1078         eb = alloc_extent_buffer(&BTRFS_I(btree_inode)->io_tree,
1079                                  bytenr, blocksize);
1080         return eb;
1081 }
1082
1083
1084 int btrfs_write_tree_block(struct extent_buffer *buf)
1085 {
1086         return filemap_fdatawrite_range(buf->pages[0]->mapping, buf->start,
1087                                         buf->start + buf->len - 1);
1088 }
1089
1090 int btrfs_wait_tree_block_writeback(struct extent_buffer *buf)
1091 {
1092         return filemap_fdatawait_range(buf->pages[0]->mapping,
1093                                        buf->start, buf->start + buf->len - 1);
1094 }
1095
1096 struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
1097                                       u32 blocksize, u64 parent_transid)
1098 {
1099         struct extent_buffer *buf = NULL;
1100         int ret;
1101
1102         buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
1103         if (!buf)
1104                 return NULL;
1105
1106         ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
1107         return buf;
1108
1109 }
1110
1111 void clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root,
1112                       struct extent_buffer *buf)
1113 {
1114         struct btrfs_fs_info *fs_info = root->fs_info;
1115
1116         if (btrfs_header_generation(buf) ==
1117             fs_info->running_transaction->transid) {
1118                 btrfs_assert_tree_locked(buf);
1119
1120                 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)) {
1121                         __percpu_counter_add(&fs_info->dirty_metadata_bytes,
1122                                              -buf->len,
1123                                              fs_info->dirty_metadata_batch);
1124                         /* ugh, clear_extent_buffer_dirty needs to lock the page */
1125                         btrfs_set_lock_blocking(buf);
1126                         clear_extent_buffer_dirty(buf);
1127                 }
1128         }
1129 }
1130
1131 static void __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
1132                          u32 stripesize, struct btrfs_root *root,
1133                          struct btrfs_fs_info *fs_info,
1134                          u64 objectid)
1135 {
1136         root->node = NULL;
1137         root->commit_root = NULL;
1138         root->sectorsize = sectorsize;
1139         root->nodesize = nodesize;
1140         root->leafsize = leafsize;
1141         root->stripesize = stripesize;
1142         root->ref_cows = 0;
1143         root->track_dirty = 0;
1144         root->in_radix = 0;
1145         root->orphan_item_inserted = 0;
1146         root->orphan_cleanup_state = 0;
1147
1148         root->objectid = objectid;
1149         root->last_trans = 0;
1150         root->highest_objectid = 0;
1151         root->name = NULL;
1152         root->inode_tree = RB_ROOT;
1153         INIT_RADIX_TREE(&root->delayed_nodes_tree, GFP_ATOMIC);
1154         root->block_rsv = NULL;
1155         root->orphan_block_rsv = NULL;
1156
1157         INIT_LIST_HEAD(&root->dirty_list);
1158         INIT_LIST_HEAD(&root->root_list);
1159         INIT_LIST_HEAD(&root->logged_list[0]);
1160         INIT_LIST_HEAD(&root->logged_list[1]);
1161         spin_lock_init(&root->orphan_lock);
1162         spin_lock_init(&root->inode_lock);
1163         spin_lock_init(&root->accounting_lock);
1164         spin_lock_init(&root->log_extents_lock[0]);
1165         spin_lock_init(&root->log_extents_lock[1]);
1166         mutex_init(&root->objectid_mutex);
1167         mutex_init(&root->log_mutex);
1168         init_waitqueue_head(&root->log_writer_wait);
1169         init_waitqueue_head(&root->log_commit_wait[0]);
1170         init_waitqueue_head(&root->log_commit_wait[1]);
1171         atomic_set(&root->log_commit[0], 0);
1172         atomic_set(&root->log_commit[1], 0);
1173         atomic_set(&root->log_writers, 0);
1174         atomic_set(&root->log_batch, 0);
1175         atomic_set(&root->orphan_inodes, 0);
1176         root->log_transid = 0;
1177         root->last_log_commit = 0;
1178         extent_io_tree_init(&root->dirty_log_pages,
1179                              fs_info->btree_inode->i_mapping);
1180
1181         memset(&root->root_key, 0, sizeof(root->root_key));
1182         memset(&root->root_item, 0, sizeof(root->root_item));
1183         memset(&root->defrag_progress, 0, sizeof(root->defrag_progress));
1184         memset(&root->root_kobj, 0, sizeof(root->root_kobj));
1185         root->defrag_trans_start = fs_info->generation;
1186         init_completion(&root->kobj_unregister);
1187         root->defrag_running = 0;
1188         root->root_key.objectid = objectid;
1189         root->anon_dev = 0;
1190
1191         spin_lock_init(&root->root_item_lock);
1192 }
1193
1194 static int __must_check find_and_setup_root(struct btrfs_root *tree_root,
1195                                             struct btrfs_fs_info *fs_info,
1196                                             u64 objectid,
1197                                             struct btrfs_root *root)
1198 {
1199         int ret;
1200         u32 blocksize;
1201         u64 generation;
1202
1203         __setup_root(tree_root->nodesize, tree_root->leafsize,
1204                      tree_root->sectorsize, tree_root->stripesize,
1205                      root, fs_info, objectid);
1206         ret = btrfs_find_last_root(tree_root, objectid,
1207                                    &root->root_item, &root->root_key);
1208         if (ret > 0)
1209                 return -ENOENT;
1210         else if (ret < 0)
1211                 return ret;
1212
1213         generation = btrfs_root_generation(&root->root_item);
1214         blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item));
1215         root->commit_root = NULL;
1216         root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
1217                                      blocksize, generation);
1218         if (!root->node || !btrfs_buffer_uptodate(root->node, generation, 0)) {
1219                 free_extent_buffer(root->node);
1220                 root->node = NULL;
1221                 return -EIO;
1222         }
1223         root->commit_root = btrfs_root_node(root);
1224         return 0;
1225 }
1226
1227 static struct btrfs_root *btrfs_alloc_root(struct btrfs_fs_info *fs_info)
1228 {
1229         struct btrfs_root *root = kzalloc(sizeof(*root), GFP_NOFS);
1230         if (root)
1231                 root->fs_info = fs_info;
1232         return root;
1233 }
1234
1235 struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
1236                                      struct btrfs_fs_info *fs_info,
1237                                      u64 objectid)
1238 {
1239         struct extent_buffer *leaf;
1240         struct btrfs_root *tree_root = fs_info->tree_root;
1241         struct btrfs_root *root;
1242         struct btrfs_key key;
1243         int ret = 0;
1244         u64 bytenr;
1245         uuid_le uuid;
1246
1247         root = btrfs_alloc_root(fs_info);
1248         if (!root)
1249                 return ERR_PTR(-ENOMEM);
1250
1251         __setup_root(tree_root->nodesize, tree_root->leafsize,
1252                      tree_root->sectorsize, tree_root->stripesize,
1253                      root, fs_info, objectid);
1254         root->root_key.objectid = objectid;
1255         root->root_key.type = BTRFS_ROOT_ITEM_KEY;
1256         root->root_key.offset = 0;
1257
1258         leaf = btrfs_alloc_free_block(trans, root, root->leafsize,
1259                                       0, objectid, NULL, 0, 0, 0);
1260         if (IS_ERR(leaf)) {
1261                 ret = PTR_ERR(leaf);
1262                 leaf = NULL;
1263                 goto fail;
1264         }
1265
1266         bytenr = leaf->start;
1267         memset_extent_buffer(leaf, 0, 0, sizeof(struct btrfs_header));
1268         btrfs_set_header_bytenr(leaf, leaf->start);
1269         btrfs_set_header_generation(leaf, trans->transid);
1270         btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV);
1271         btrfs_set_header_owner(leaf, objectid);
1272         root->node = leaf;
1273
1274         write_extent_buffer(leaf, fs_info->fsid,
1275                             (unsigned long)btrfs_header_fsid(leaf),
1276                             BTRFS_FSID_SIZE);
1277         write_extent_buffer(leaf, fs_info->chunk_tree_uuid,
1278                             (unsigned long)btrfs_header_chunk_tree_uuid(leaf),
1279                             BTRFS_UUID_SIZE);
1280         btrfs_mark_buffer_dirty(leaf);
1281
1282         root->commit_root = btrfs_root_node(root);
1283         root->track_dirty = 1;
1284
1285
1286         root->root_item.flags = 0;
1287         root->root_item.byte_limit = 0;
1288         btrfs_set_root_bytenr(&root->root_item, leaf->start);
1289         btrfs_set_root_generation(&root->root_item, trans->transid);
1290         btrfs_set_root_level(&root->root_item, 0);
1291         btrfs_set_root_refs(&root->root_item, 1);
1292         btrfs_set_root_used(&root->root_item, leaf->len);
1293         btrfs_set_root_last_snapshot(&root->root_item, 0);
1294         btrfs_set_root_dirid(&root->root_item, 0);
1295         uuid_le_gen(&uuid);
1296         memcpy(root->root_item.uuid, uuid.b, BTRFS_UUID_SIZE);
1297         root->root_item.drop_level = 0;
1298
1299         key.objectid = objectid;
1300         key.type = BTRFS_ROOT_ITEM_KEY;
1301         key.offset = 0;
1302         ret = btrfs_insert_root(trans, tree_root, &key, &root->root_item);
1303         if (ret)
1304                 goto fail;
1305
1306         btrfs_tree_unlock(leaf);
1307
1308         return root;
1309
1310 fail:
1311         if (leaf) {
1312                 btrfs_tree_unlock(leaf);
1313                 free_extent_buffer(leaf);
1314         }
1315         kfree(root);
1316
1317         return ERR_PTR(ret);
1318 }
1319
1320 static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans,
1321                                          struct btrfs_fs_info *fs_info)
1322 {
1323         struct btrfs_root *root;
1324         struct btrfs_root *tree_root = fs_info->tree_root;
1325         struct extent_buffer *leaf;
1326
1327         root = btrfs_alloc_root(fs_info);
1328         if (!root)
1329                 return ERR_PTR(-ENOMEM);
1330
1331         __setup_root(tree_root->nodesize, tree_root->leafsize,
1332                      tree_root->sectorsize, tree_root->stripesize,
1333                      root, fs_info, BTRFS_TREE_LOG_OBJECTID);
1334
1335         root->root_key.objectid = BTRFS_TREE_LOG_OBJECTID;
1336         root->root_key.type = BTRFS_ROOT_ITEM_KEY;
1337         root->root_key.offset = BTRFS_TREE_LOG_OBJECTID;
1338         /*
1339          * log trees do not get reference counted because they go away
1340          * before a real commit is actually done.  They do store pointers
1341          * to file data extents, and those reference counts still get
1342          * updated (along with back refs to the log tree).
1343          */
1344         root->ref_cows = 0;
1345
1346         leaf = btrfs_alloc_free_block(trans, root, root->leafsize, 0,
1347                                       BTRFS_TREE_LOG_OBJECTID, NULL,
1348                                       0, 0, 0);
1349         if (IS_ERR(leaf)) {
1350                 kfree(root);
1351                 return ERR_CAST(leaf);
1352         }
1353
1354         memset_extent_buffer(leaf, 0, 0, sizeof(struct btrfs_header));
1355         btrfs_set_header_bytenr(leaf, leaf->start);
1356         btrfs_set_header_generation(leaf, trans->transid);
1357         btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV);
1358         btrfs_set_header_owner(leaf, BTRFS_TREE_LOG_OBJECTID);
1359         root->node = leaf;
1360
1361         write_extent_buffer(root->node, root->fs_info->fsid,
1362                             (unsigned long)btrfs_header_fsid(root->node),
1363                             BTRFS_FSID_SIZE);
1364         btrfs_mark_buffer_dirty(root->node);
1365         btrfs_tree_unlock(root->node);
1366         return root;
1367 }
1368
1369 int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
1370                              struct btrfs_fs_info *fs_info)
1371 {
1372         struct btrfs_root *log_root;
1373
1374         log_root = alloc_log_tree(trans, fs_info);
1375         if (IS_ERR(log_root))
1376                 return PTR_ERR(log_root);
1377         WARN_ON(fs_info->log_root_tree);
1378         fs_info->log_root_tree = log_root;
1379         return 0;
1380 }
1381
1382 int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
1383                        struct btrfs_root *root)
1384 {
1385         struct btrfs_root *log_root;
1386         struct btrfs_inode_item *inode_item;
1387
1388         log_root = alloc_log_tree(trans, root->fs_info);
1389         if (IS_ERR(log_root))
1390                 return PTR_ERR(log_root);
1391
1392         log_root->last_trans = trans->transid;
1393         log_root->root_key.offset = root->root_key.objectid;
1394
1395         inode_item = &log_root->root_item.inode;
1396         inode_item->generation = cpu_to_le64(1);
1397         inode_item->size = cpu_to_le64(3);
1398         inode_item->nlink = cpu_to_le32(1);
1399         inode_item->nbytes = cpu_to_le64(root->leafsize);
1400         inode_item->mode = cpu_to_le32(S_IFDIR | 0755);
1401
1402         btrfs_set_root_node(&log_root->root_item, log_root->node);
1403
1404         WARN_ON(root->log_root);
1405         root->log_root = log_root;
1406         root->log_transid = 0;
1407         root->last_log_commit = 0;
1408         return 0;
1409 }
1410
1411 struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root,
1412                                                struct btrfs_key *location)
1413 {
1414         struct btrfs_root *root;
1415         struct btrfs_fs_info *fs_info = tree_root->fs_info;
1416         struct btrfs_path *path;
1417         struct extent_buffer *l;
1418         u64 generation;
1419         u32 blocksize;
1420         int ret = 0;
1421         int slot;
1422
1423         root = btrfs_alloc_root(fs_info);
1424         if (!root)
1425                 return ERR_PTR(-ENOMEM);
1426         if (location->offset == (u64)-1) {
1427                 ret = find_and_setup_root(tree_root, fs_info,
1428                                           location->objectid, root);
1429                 if (ret) {
1430                         kfree(root);
1431                         return ERR_PTR(ret);
1432                 }
1433                 goto out;
1434         }
1435
1436         __setup_root(tree_root->nodesize, tree_root->leafsize,
1437                      tree_root->sectorsize, tree_root->stripesize,
1438                      root, fs_info, location->objectid);
1439
1440         path = btrfs_alloc_path();
1441         if (!path) {
1442                 kfree(root);
1443                 return ERR_PTR(-ENOMEM);
1444         }
1445         ret = btrfs_search_slot(NULL, tree_root, location, path, 0, 0);
1446         if (ret == 0) {
1447                 l = path->nodes[0];
1448                 slot = path->slots[0];
1449                 btrfs_read_root_item(l, slot, &root->root_item);
1450                 memcpy(&root->root_key, location, sizeof(*location));
1451         }
1452         btrfs_free_path(path);
1453         if (ret) {
1454                 kfree(root);
1455                 if (ret > 0)
1456                         ret = -ENOENT;
1457                 return ERR_PTR(ret);
1458         }
1459
1460         generation = btrfs_root_generation(&root->root_item);
1461         blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item));
1462         root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
1463                                      blocksize, generation);
1464         if (!root->node || !extent_buffer_uptodate(root->node)) {
1465                 ret = (!root->node) ? -ENOMEM : -EIO;
1466
1467                 free_extent_buffer(root->node);
1468                 kfree(root);
1469                 return ERR_PTR(ret);
1470         }
1471
1472         root->commit_root = btrfs_root_node(root);
1473         BUG_ON(!root->node); /* -ENOMEM */
1474 out:
1475         if (location->objectid != BTRFS_TREE_LOG_OBJECTID) {
1476                 root->ref_cows = 1;
1477                 btrfs_check_and_init_root_item(&root->root_item);
1478         }
1479
1480         return root;
1481 }
1482
1483 struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info,
1484                                               struct btrfs_key *location)
1485 {
1486         struct btrfs_root *root;
1487         int ret;
1488
1489         if (location->objectid == BTRFS_ROOT_TREE_OBJECTID)
1490                 return fs_info->tree_root;
1491         if (location->objectid == BTRFS_EXTENT_TREE_OBJECTID)
1492                 return fs_info->extent_root;
1493         if (location->objectid == BTRFS_CHUNK_TREE_OBJECTID)
1494                 return fs_info->chunk_root;
1495         if (location->objectid == BTRFS_DEV_TREE_OBJECTID)
1496                 return fs_info->dev_root;
1497         if (location->objectid == BTRFS_CSUM_TREE_OBJECTID)
1498                 return fs_info->csum_root;
1499         if (location->objectid == BTRFS_QUOTA_TREE_OBJECTID)
1500                 return fs_info->quota_root ? fs_info->quota_root :
1501                                              ERR_PTR(-ENOENT);
1502 again:
1503         spin_lock(&fs_info->fs_roots_radix_lock);
1504         root = radix_tree_lookup(&fs_info->fs_roots_radix,
1505                                  (unsigned long)location->objectid);
1506         spin_unlock(&fs_info->fs_roots_radix_lock);
1507         if (root)
1508                 return root;
1509
1510         root = btrfs_read_fs_root_no_radix(fs_info->tree_root, location);
1511         if (IS_ERR(root))
1512                 return root;
1513
1514         root->free_ino_ctl = kzalloc(sizeof(*root->free_ino_ctl), GFP_NOFS);
1515         root->free_ino_pinned = kzalloc(sizeof(*root->free_ino_pinned),
1516                                         GFP_NOFS);
1517         if (!root->free_ino_pinned || !root->free_ino_ctl) {
1518                 ret = -ENOMEM;
1519                 goto fail;
1520         }
1521
1522         btrfs_init_free_ino_ctl(root);
1523         mutex_init(&root->fs_commit_mutex);
1524         spin_lock_init(&root->cache_lock);
1525         init_waitqueue_head(&root->cache_wait);
1526
1527         ret = get_anon_bdev(&root->anon_dev);
1528         if (ret)
1529                 goto fail;
1530
1531         if (btrfs_root_refs(&root->root_item) == 0) {
1532                 ret = -ENOENT;
1533                 goto fail;
1534         }
1535
1536         ret = btrfs_find_orphan_item(fs_info->tree_root, location->objectid);
1537         if (ret < 0)
1538                 goto fail;
1539         if (ret == 0)
1540                 root->orphan_item_inserted = 1;
1541
1542         ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
1543         if (ret)
1544                 goto fail;
1545
1546         spin_lock(&fs_info->fs_roots_radix_lock);
1547         ret = radix_tree_insert(&fs_info->fs_roots_radix,
1548                                 (unsigned long)root->root_key.objectid,
1549                                 root);
1550         if (ret == 0)
1551                 root->in_radix = 1;
1552
1553         spin_unlock(&fs_info->fs_roots_radix_lock);
1554         radix_tree_preload_end();
1555         if (ret) {
1556                 if (ret == -EEXIST) {
1557                         free_fs_root(root);
1558                         goto again;
1559                 }
1560                 goto fail;
1561         }
1562
1563         ret = btrfs_find_dead_roots(fs_info->tree_root,
1564                                     root->root_key.objectid);
1565         WARN_ON(ret);
1566         return root;
1567 fail:
1568         free_fs_root(root);
1569         return ERR_PTR(ret);
1570 }
1571
1572 static int btrfs_congested_fn(void *congested_data, int bdi_bits)
1573 {
1574         struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data;
1575         int ret = 0;
1576         struct btrfs_device *device;
1577         struct backing_dev_info *bdi;
1578
1579         rcu_read_lock();
1580         list_for_each_entry_rcu(device, &info->fs_devices->devices, dev_list) {
1581                 if (!device->bdev)
1582                         continue;
1583                 bdi = blk_get_backing_dev_info(device->bdev);
1584                 if (bdi && bdi_congested(bdi, bdi_bits)) {
1585                         ret = 1;
1586                         break;
1587                 }
1588         }
1589         rcu_read_unlock();
1590         return ret;
1591 }
1592
1593 /*
1594  * If this fails, caller must call bdi_destroy() to get rid of the
1595  * bdi again.
1596  */
1597 static int setup_bdi(struct btrfs_fs_info *info, struct backing_dev_info *bdi)
1598 {
1599         int err;
1600
1601         bdi->capabilities = BDI_CAP_MAP_COPY;
1602         err = bdi_setup_and_register(bdi, "btrfs", BDI_CAP_MAP_COPY);
1603         if (err)
1604                 return err;
1605
1606         bdi->ra_pages   = default_backing_dev_info.ra_pages;
1607         bdi->congested_fn       = btrfs_congested_fn;
1608         bdi->congested_data     = info;
1609         return 0;
1610 }
1611
1612 /*
1613  * called by the kthread helper functions to finally call the bio end_io
1614  * functions.  This is where read checksum verification actually happens
1615  */
1616 static void end_workqueue_fn(struct btrfs_work *work)
1617 {
1618         struct bio *bio;
1619         struct end_io_wq *end_io_wq;
1620         struct btrfs_fs_info *fs_info;
1621         int error;
1622
1623         end_io_wq = container_of(work, struct end_io_wq, work);
1624         bio = end_io_wq->bio;
1625         fs_info = end_io_wq->info;
1626
1627         error = end_io_wq->error;
1628         bio->bi_private = end_io_wq->private;
1629         bio->bi_end_io = end_io_wq->end_io;
1630         kfree(end_io_wq);
1631         bio_endio(bio, error);
1632 }
1633
1634 static int cleaner_kthread(void *arg)
1635 {
1636         struct btrfs_root *root = arg;
1637
1638         do {
1639                 int again = 0;
1640
1641                 if (!(root->fs_info->sb->s_flags & MS_RDONLY) &&
1642                     down_read_trylock(&root->fs_info->sb->s_umount)) {
1643                         if (mutex_trylock(&root->fs_info->cleaner_mutex)) {
1644                                 btrfs_run_delayed_iputs(root);
1645                                 again = btrfs_clean_one_deleted_snapshot(root);
1646                                 mutex_unlock(&root->fs_info->cleaner_mutex);
1647                         }
1648                         btrfs_run_defrag_inodes(root->fs_info);
1649                         up_read(&root->fs_info->sb->s_umount);
1650                 }
1651
1652                 if (!try_to_freeze() && !again) {
1653                         set_current_state(TASK_INTERRUPTIBLE);
1654                         if (!kthread_should_stop())
1655                                 schedule();
1656                         __set_current_state(TASK_RUNNING);
1657                 }
1658         } while (!kthread_should_stop());
1659         return 0;
1660 }
1661
1662 static int transaction_kthread(void *arg)
1663 {
1664         struct btrfs_root *root = arg;
1665         struct btrfs_trans_handle *trans;
1666         struct btrfs_transaction *cur;
1667         u64 transid;
1668         unsigned long now;
1669         unsigned long delay;
1670         bool cannot_commit;
1671
1672         do {
1673                 cannot_commit = false;
1674                 delay = HZ * 30;
1675                 mutex_lock(&root->fs_info->transaction_kthread_mutex);
1676
1677                 spin_lock(&root->fs_info->trans_lock);
1678                 cur = root->fs_info->running_transaction;
1679                 if (!cur) {
1680                         spin_unlock(&root->fs_info->trans_lock);
1681                         goto sleep;
1682                 }
1683
1684                 now = get_seconds();
1685                 if (!cur->blocked &&
1686                     (now < cur->start_time || now - cur->start_time < 30)) {
1687                         spin_unlock(&root->fs_info->trans_lock);
1688                         delay = HZ * 5;
1689                         goto sleep;
1690                 }
1691                 transid = cur->transid;
1692                 spin_unlock(&root->fs_info->trans_lock);
1693
1694                 /* If the file system is aborted, this will always fail. */
1695                 trans = btrfs_attach_transaction(root);
1696                 if (IS_ERR(trans)) {
1697                         if (PTR_ERR(trans) != -ENOENT)
1698                                 cannot_commit = true;
1699                         goto sleep;
1700                 }
1701                 if (transid == trans->transid) {
1702                         btrfs_commit_transaction(trans, root);
1703                 } else {
1704                         btrfs_end_transaction(trans, root);
1705                 }
1706 sleep:
1707                 wake_up_process(root->fs_info->cleaner_kthread);
1708                 mutex_unlock(&root->fs_info->transaction_kthread_mutex);
1709
1710                 if (!try_to_freeze()) {
1711                         set_current_state(TASK_INTERRUPTIBLE);
1712                         if (!kthread_should_stop() &&
1713                             (!btrfs_transaction_blocked(root->fs_info) ||
1714                              cannot_commit))
1715                                 schedule_timeout(delay);
1716                         __set_current_state(TASK_RUNNING);
1717                 }
1718         } while (!kthread_should_stop());
1719         return 0;
1720 }
1721
1722 /*
1723  * this will find the highest generation in the array of
1724  * root backups.  The index of the highest array is returned,
1725  * or -1 if we can't find anything.
1726  *
1727  * We check to make sure the array is valid by comparing the
1728  * generation of the latest  root in the array with the generation
1729  * in the super block.  If they don't match we pitch it.
1730  */
1731 static int find_newest_super_backup(struct btrfs_fs_info *info, u64 newest_gen)
1732 {
1733         u64 cur;
1734         int newest_index = -1;
1735         struct btrfs_root_backup *root_backup;
1736         int i;
1737
1738         for (i = 0; i < BTRFS_NUM_BACKUP_ROOTS; i++) {
1739                 root_backup = info->super_copy->super_roots + i;
1740                 cur = btrfs_backup_tree_root_gen(root_backup);
1741                 if (cur == newest_gen)
1742                         newest_index = i;
1743         }
1744
1745         /* check to see if we actually wrapped around */
1746         if (newest_index == BTRFS_NUM_BACKUP_ROOTS - 1) {
1747                 root_backup = info->super_copy->super_roots;
1748                 cur = btrfs_backup_tree_root_gen(root_backup);
1749                 if (cur == newest_gen)
1750                         newest_index = 0;
1751         }
1752         return newest_index;
1753 }
1754
1755
1756 /*
1757  * find the oldest backup so we know where to store new entries
1758  * in the backup array.  This will set the backup_root_index
1759  * field in the fs_info struct
1760  */
1761 static void find_oldest_super_backup(struct btrfs_fs_info *info,
1762                                      u64 newest_gen)
1763 {
1764         int newest_index = -1;
1765
1766         newest_index = find_newest_super_backup(info, newest_gen);
1767         /* if there was garbage in there, just move along */
1768         if (newest_index == -1) {
1769                 info->backup_root_index = 0;
1770         } else {
1771                 info->backup_root_index = (newest_index + 1) % BTRFS_NUM_BACKUP_ROOTS;
1772         }
1773 }
1774
1775 /*
1776  * copy all the root pointers into the super backup array.
1777  * this will bump the backup pointer by one when it is
1778  * done
1779  */
1780 static void backup_super_roots(struct btrfs_fs_info *info)
1781 {
1782         int next_backup;
1783         struct btrfs_root_backup *root_backup;
1784         int last_backup;
1785
1786         next_backup = info->backup_root_index;
1787         last_backup = (next_backup + BTRFS_NUM_BACKUP_ROOTS - 1) %
1788                 BTRFS_NUM_BACKUP_ROOTS;
1789
1790         /*
1791          * just overwrite the last backup if we're at the same generation
1792          * this happens only at umount
1793          */
1794         root_backup = info->super_for_commit->super_roots + last_backup;
1795         if (btrfs_backup_tree_root_gen(root_backup) ==
1796             btrfs_header_generation(info->tree_root->node))
1797                 next_backup = last_backup;
1798
1799         root_backup = info->super_for_commit->super_roots + next_backup;
1800
1801         /*
1802          * make sure all of our padding and empty slots get zero filled
1803          * regardless of which ones we use today
1804          */
1805         memset(root_backup, 0, sizeof(*root_backup));
1806
1807         info->backup_root_index = (next_backup + 1) % BTRFS_NUM_BACKUP_ROOTS;
1808
1809         btrfs_set_backup_tree_root(root_backup, info->tree_root->node->start);
1810         btrfs_set_backup_tree_root_gen(root_backup,
1811                                btrfs_header_generation(info->tree_root->node));
1812
1813         btrfs_set_backup_tree_root_level(root_backup,
1814                                btrfs_header_level(info->tree_root->node));
1815
1816         btrfs_set_backup_chunk_root(root_backup, info->chunk_root->node->start);
1817         btrfs_set_backup_chunk_root_gen(root_backup,
1818                                btrfs_header_generation(info->chunk_root->node));
1819         btrfs_set_backup_chunk_root_level(root_backup,
1820                                btrfs_header_level(info->chunk_root->node));
1821
1822         btrfs_set_backup_extent_root(root_backup, info->extent_root->node->start);
1823         btrfs_set_backup_extent_root_gen(root_backup,
1824                                btrfs_header_generation(info->extent_root->node));
1825         btrfs_set_backup_extent_root_level(root_backup,
1826                                btrfs_header_level(info->extent_root->node));
1827
1828         /*
1829          * we might commit during log recovery, which happens before we set
1830          * the fs_root.  Make sure it is valid before we fill it in.
1831          */
1832         if (info->fs_root && info->fs_root->node) {
1833                 btrfs_set_backup_fs_root(root_backup,
1834                                          info->fs_root->node->start);
1835                 btrfs_set_backup_fs_root_gen(root_backup,
1836                                btrfs_header_generation(info->fs_root->node));
1837                 btrfs_set_backup_fs_root_level(root_backup,
1838                                btrfs_header_level(info->fs_root->node));
1839         }
1840
1841         btrfs_set_backup_dev_root(root_backup, info->dev_root->node->start);
1842         btrfs_set_backup_dev_root_gen(root_backup,
1843                                btrfs_header_generation(info->dev_root->node));
1844         btrfs_set_backup_dev_root_level(root_backup,
1845                                        btrfs_header_level(info->dev_root->node));
1846
1847         btrfs_set_backup_csum_root(root_backup, info->csum_root->node->start);
1848         btrfs_set_backup_csum_root_gen(root_backup,
1849                                btrfs_header_generation(info->csum_root->node));
1850         btrfs_set_backup_csum_root_level(root_backup,
1851                                btrfs_header_level(info->csum_root->node));
1852
1853         btrfs_set_backup_total_bytes(root_backup,
1854                              btrfs_super_total_bytes(info->super_copy));
1855         btrfs_set_backup_bytes_used(root_backup,
1856                              btrfs_super_bytes_used(info->super_copy));
1857         btrfs_set_backup_num_devices(root_backup,
1858                              btrfs_super_num_devices(info->super_copy));
1859
1860         /*
1861          * if we don't copy this out to the super_copy, it won't get remembered
1862          * for the next commit
1863          */
1864         memcpy(&info->super_copy->super_roots,
1865                &info->super_for_commit->super_roots,
1866                sizeof(*root_backup) * BTRFS_NUM_BACKUP_ROOTS);
1867 }
1868
1869 /*
1870  * this copies info out of the root backup array and back into
1871  * the in-memory super block.  It is meant to help iterate through
1872  * the array, so you send it the number of backups you've already
1873  * tried and the last backup index you used.
1874  *
1875  * this returns -1 when it has tried all the backups
1876  */
1877 static noinline int next_root_backup(struct btrfs_fs_info *info,
1878                                      struct btrfs_super_block *super,
1879                                      int *num_backups_tried, int *backup_index)
1880 {
1881         struct btrfs_root_backup *root_backup;
1882         int newest = *backup_index;
1883
1884         if (*num_backups_tried == 0) {
1885                 u64 gen = btrfs_super_generation(super);
1886
1887                 newest = find_newest_super_backup(info, gen);
1888                 if (newest == -1)
1889                         return -1;
1890
1891                 *backup_index = newest;
1892                 *num_backups_tried = 1;
1893         } else if (*num_backups_tried == BTRFS_NUM_BACKUP_ROOTS) {
1894                 /* we've tried all the backups, all done */
1895                 return -1;
1896         } else {
1897                 /* jump to the next oldest backup */
1898                 newest = (*backup_index + BTRFS_NUM_BACKUP_ROOTS - 1) %
1899                         BTRFS_NUM_BACKUP_ROOTS;
1900                 *backup_index = newest;
1901                 *num_backups_tried += 1;
1902         }
1903         root_backup = super->super_roots + newest;
1904
1905         btrfs_set_super_generation(super,
1906                                    btrfs_backup_tree_root_gen(root_backup));
1907         btrfs_set_super_root(super, btrfs_backup_tree_root(root_backup));
1908         btrfs_set_super_root_level(super,
1909                                    btrfs_backup_tree_root_level(root_backup));
1910         btrfs_set_super_bytes_used(super, btrfs_backup_bytes_used(root_backup));
1911
1912         /*
1913          * fixme: the total bytes and num_devices need to match or we should
1914          * need a fsck
1915          */
1916         btrfs_set_super_total_bytes(super, btrfs_backup_total_bytes(root_backup));
1917         btrfs_set_super_num_devices(super, btrfs_backup_num_devices(root_backup));
1918         return 0;
1919 }
1920
1921 /* helper to cleanup workers */
1922 static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info)
1923 {
1924         btrfs_stop_workers(&fs_info->generic_worker);
1925         btrfs_stop_workers(&fs_info->fixup_workers);
1926         btrfs_stop_workers(&fs_info->delalloc_workers);
1927         btrfs_stop_workers(&fs_info->workers);
1928         btrfs_stop_workers(&fs_info->endio_workers);
1929         btrfs_stop_workers(&fs_info->endio_meta_workers);
1930         btrfs_stop_workers(&fs_info->endio_raid56_workers);
1931         btrfs_stop_workers(&fs_info->rmw_workers);
1932         btrfs_stop_workers(&fs_info->endio_meta_write_workers);
1933         btrfs_stop_workers(&fs_info->endio_write_workers);
1934         btrfs_stop_workers(&fs_info->endio_freespace_worker);
1935         btrfs_stop_workers(&fs_info->submit_workers);
1936         btrfs_stop_workers(&fs_info->delayed_workers);
1937         btrfs_stop_workers(&fs_info->caching_workers);
1938         btrfs_stop_workers(&fs_info->readahead_workers);
1939         btrfs_stop_workers(&fs_info->flush_workers);
1940         btrfs_stop_workers(&fs_info->qgroup_rescan_workers);
1941 }
1942
1943 /* helper to cleanup tree roots */
1944 static void free_root_pointers(struct btrfs_fs_info *info, int chunk_root)
1945 {
1946         free_extent_buffer(info->tree_root->node);
1947         free_extent_buffer(info->tree_root->commit_root);
1948         free_extent_buffer(info->dev_root->node);
1949         free_extent_buffer(info->dev_root->commit_root);
1950         free_extent_buffer(info->extent_root->node);
1951         free_extent_buffer(info->extent_root->commit_root);
1952         free_extent_buffer(info->csum_root->node);
1953         free_extent_buffer(info->csum_root->commit_root);
1954         if (info->quota_root) {
1955                 free_extent_buffer(info->quota_root->node);
1956                 free_extent_buffer(info->quota_root->commit_root);
1957         }
1958
1959         info->tree_root->node = NULL;
1960         info->tree_root->commit_root = NULL;
1961         info->dev_root->node = NULL;
1962         info->dev_root->commit_root = NULL;
1963         info->extent_root->node = NULL;
1964         info->extent_root->commit_root = NULL;
1965         info->csum_root->node = NULL;
1966         info->csum_root->commit_root = NULL;
1967         if (info->quota_root) {
1968                 info->quota_root->node = NULL;
1969                 info->quota_root->commit_root = NULL;
1970         }
1971
1972         if (chunk_root) {
1973                 free_extent_buffer(info->chunk_root->node);
1974                 free_extent_buffer(info->chunk_root->commit_root);
1975                 info->chunk_root->node = NULL;
1976                 info->chunk_root->commit_root = NULL;
1977         }
1978 }
1979
1980 static void del_fs_roots(struct btrfs_fs_info *fs_info)
1981 {
1982         int ret;
1983         struct btrfs_root *gang[8];
1984         int i;
1985
1986         while (!list_empty(&fs_info->dead_roots)) {
1987                 gang[0] = list_entry(fs_info->dead_roots.next,
1988                                      struct btrfs_root, root_list);
1989                 list_del(&gang[0]->root_list);
1990
1991                 if (gang[0]->in_radix) {
1992                         btrfs_free_fs_root(fs_info, gang[0]);
1993                 } else {
1994                         free_extent_buffer(gang[0]->node);
1995                         free_extent_buffer(gang[0]->commit_root);
1996                         kfree(gang[0]);
1997                 }
1998         }
1999
2000         while (1) {
2001                 ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
2002                                              (void **)gang, 0,
2003                                              ARRAY_SIZE(gang));
2004                 if (!ret)
2005                         break;
2006                 for (i = 0; i < ret; i++)
2007                         btrfs_free_fs_root(fs_info, gang[i]);
2008         }
2009 }
2010
2011 int open_ctree(struct super_block *sb,
2012                struct btrfs_fs_devices *fs_devices,
2013                char *options)
2014 {
2015         u32 sectorsize;
2016         u32 nodesize;
2017         u32 leafsize;
2018         u32 blocksize;
2019         u32 stripesize;
2020         u64 generation;
2021         u64 features;
2022         struct btrfs_key location;
2023         struct buffer_head *bh;
2024         struct btrfs_super_block *disk_super;
2025         struct btrfs_fs_info *fs_info = btrfs_sb(sb);
2026         struct btrfs_root *tree_root;
2027         struct btrfs_root *extent_root;
2028         struct btrfs_root *csum_root;
2029         struct btrfs_root *chunk_root;
2030         struct btrfs_root *dev_root;
2031         struct btrfs_root *quota_root;
2032         struct btrfs_root *log_tree_root;
2033         int ret;
2034         int err = -EINVAL;
2035         int num_backups_tried = 0;
2036         int backup_index = 0;
2037
2038         tree_root = fs_info->tree_root = btrfs_alloc_root(fs_info);
2039         extent_root = fs_info->extent_root = btrfs_alloc_root(fs_info);
2040         csum_root = fs_info->csum_root = btrfs_alloc_root(fs_info);
2041         chunk_root = fs_info->chunk_root = btrfs_alloc_root(fs_info);
2042         dev_root = fs_info->dev_root = btrfs_alloc_root(fs_info);
2043         quota_root = fs_info->quota_root = btrfs_alloc_root(fs_info);
2044
2045         if (!tree_root || !extent_root || !csum_root ||
2046             !chunk_root || !dev_root || !quota_root) {
2047                 err = -ENOMEM;
2048                 goto fail;
2049         }
2050
2051         ret = init_srcu_struct(&fs_info->subvol_srcu);
2052         if (ret) {
2053                 err = ret;
2054                 goto fail;
2055         }
2056
2057         ret = setup_bdi(fs_info, &fs_info->bdi);
2058         if (ret) {
2059                 err = ret;
2060                 goto fail_srcu;
2061         }
2062
2063         ret = percpu_counter_init(&fs_info->dirty_metadata_bytes, 0);
2064         if (ret) {
2065                 err = ret;
2066                 goto fail_bdi;
2067         }
2068         fs_info->dirty_metadata_batch = PAGE_CACHE_SIZE *
2069                                         (1 + ilog2(nr_cpu_ids));
2070
2071         ret = percpu_counter_init(&fs_info->delalloc_bytes, 0);
2072         if (ret) {
2073                 err = ret;
2074                 goto fail_dirty_metadata_bytes;
2075         }
2076
2077         fs_info->btree_inode = new_inode(sb);
2078         if (!fs_info->btree_inode) {
2079                 err = -ENOMEM;
2080                 goto fail_delalloc_bytes;
2081         }
2082
2083         mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS);
2084
2085         INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC);
2086         INIT_LIST_HEAD(&fs_info->trans_list);
2087         INIT_LIST_HEAD(&fs_info->dead_roots);
2088         INIT_LIST_HEAD(&fs_info->delayed_iputs);
2089         INIT_LIST_HEAD(&fs_info->delalloc_inodes);
2090         INIT_LIST_HEAD(&fs_info->caching_block_groups);
2091         spin_lock_init(&fs_info->delalloc_lock);
2092         spin_lock_init(&fs_info->trans_lock);
2093         spin_lock_init(&fs_info->fs_roots_radix_lock);
2094         spin_lock_init(&fs_info->delayed_iput_lock);
2095         spin_lock_init(&fs_info->defrag_inodes_lock);
2096         spin_lock_init(&fs_info->free_chunk_lock);
2097         spin_lock_init(&fs_info->tree_mod_seq_lock);
2098         spin_lock_init(&fs_info->super_lock);
2099         rwlock_init(&fs_info->tree_mod_log_lock);
2100         mutex_init(&fs_info->reloc_mutex);
2101         seqlock_init(&fs_info->profiles_lock);
2102
2103         init_completion(&fs_info->kobj_unregister);
2104         INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
2105         INIT_LIST_HEAD(&fs_info->space_info);
2106         INIT_LIST_HEAD(&fs_info->tree_mod_seq_list);
2107         btrfs_mapping_init(&fs_info->mapping_tree);
2108         btrfs_init_block_rsv(&fs_info->global_block_rsv,
2109                              BTRFS_BLOCK_RSV_GLOBAL);
2110         btrfs_init_block_rsv(&fs_info->delalloc_block_rsv,
2111                              BTRFS_BLOCK_RSV_DELALLOC);
2112         btrfs_init_block_rsv(&fs_info->trans_block_rsv, BTRFS_BLOCK_RSV_TRANS);
2113         btrfs_init_block_rsv(&fs_info->chunk_block_rsv, BTRFS_BLOCK_RSV_CHUNK);
2114         btrfs_init_block_rsv(&fs_info->empty_block_rsv, BTRFS_BLOCK_RSV_EMPTY);
2115         btrfs_init_block_rsv(&fs_info->delayed_block_rsv,
2116                              BTRFS_BLOCK_RSV_DELOPS);
2117         atomic_set(&fs_info->nr_async_submits, 0);
2118         atomic_set(&fs_info->async_delalloc_pages, 0);
2119         atomic_set(&fs_info->async_submit_draining, 0);
2120         atomic_set(&fs_info->nr_async_bios, 0);
2121         atomic_set(&fs_info->defrag_running, 0);
2122         atomic64_set(&fs_info->tree_mod_seq, 0);
2123         fs_info->sb = sb;
2124         fs_info->max_inline = 8192 * 1024;
2125         fs_info->metadata_ratio = 0;
2126         fs_info->defrag_inodes = RB_ROOT;
2127         fs_info->trans_no_join = 0;
2128         fs_info->free_chunk_space = 0;
2129         fs_info->tree_mod_log = RB_ROOT;
2130
2131         /* readahead state */
2132         INIT_RADIX_TREE(&fs_info->reada_tree, GFP_NOFS & ~__GFP_WAIT);
2133         spin_lock_init(&fs_info->reada_lock);
2134
2135         fs_info->thread_pool_size = min_t(unsigned long,
2136                                           num_online_cpus() + 2, 8);
2137
2138         INIT_LIST_HEAD(&fs_info->ordered_extents);
2139         spin_lock_init(&fs_info->ordered_extent_lock);
2140         fs_info->delayed_root = kmalloc(sizeof(struct btrfs_delayed_root),
2141                                         GFP_NOFS);
2142         if (!fs_info->delayed_root) {
2143                 err = -ENOMEM;
2144                 goto fail_iput;
2145         }
2146         btrfs_init_delayed_root(fs_info->delayed_root);
2147
2148         mutex_init(&fs_info->scrub_lock);
2149         atomic_set(&fs_info->scrubs_running, 0);
2150         atomic_set(&fs_info->scrub_pause_req, 0);
2151         atomic_set(&fs_info->scrubs_paused, 0);
2152         atomic_set(&fs_info->scrub_cancel_req, 0);
2153         init_waitqueue_head(&fs_info->scrub_pause_wait);
2154         init_rwsem(&fs_info->scrub_super_lock);
2155         fs_info->scrub_workers_refcnt = 0;
2156 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
2157         fs_info->check_integrity_print_mask = 0;
2158 #endif
2159
2160         spin_lock_init(&fs_info->balance_lock);
2161         mutex_init(&fs_info->balance_mutex);
2162         atomic_set(&fs_info->balance_running, 0);
2163         atomic_set(&fs_info->balance_pause_req, 0);
2164         atomic_set(&fs_info->balance_cancel_req, 0);
2165         fs_info->balance_ctl = NULL;
2166         init_waitqueue_head(&fs_info->balance_wait_q);
2167
2168         sb->s_blocksize = 4096;
2169         sb->s_blocksize_bits = blksize_bits(4096);
2170         sb->s_bdi = &fs_info->bdi;
2171
2172         fs_info->btree_inode->i_ino = BTRFS_BTREE_INODE_OBJECTID;
2173         set_nlink(fs_info->btree_inode, 1);
2174         /*
2175          * we set the i_size on the btree inode to the max possible int.
2176          * the real end of the address space is determined by all of
2177          * the devices in the system
2178          */
2179         fs_info->btree_inode->i_size = OFFSET_MAX;
2180         fs_info->btree_inode->i_mapping->a_ops = &btree_aops;
2181         fs_info->btree_inode->i_mapping->backing_dev_info = &fs_info->bdi;
2182
2183         RB_CLEAR_NODE(&BTRFS_I(fs_info->btree_inode)->rb_node);
2184         extent_io_tree_init(&BTRFS_I(fs_info->btree_inode)->io_tree,
2185                              fs_info->btree_inode->i_mapping);
2186         BTRFS_I(fs_info->btree_inode)->io_tree.track_uptodate = 0;
2187         extent_map_tree_init(&BTRFS_I(fs_info->btree_inode)->extent_tree);
2188
2189         BTRFS_I(fs_info->btree_inode)->io_tree.ops = &btree_extent_io_ops;
2190
2191         BTRFS_I(fs_info->btree_inode)->root = tree_root;
2192         memset(&BTRFS_I(fs_info->btree_inode)->location, 0,
2193                sizeof(struct btrfs_key));
2194         set_bit(BTRFS_INODE_DUMMY,
2195                 &BTRFS_I(fs_info->btree_inode)->runtime_flags);
2196         insert_inode_hash(fs_info->btree_inode);
2197
2198         spin_lock_init(&fs_info->block_group_cache_lock);
2199         fs_info->block_group_cache_tree = RB_ROOT;
2200         fs_info->first_logical_byte = (u64)-1;
2201
2202         extent_io_tree_init(&fs_info->freed_extents[0],
2203                              fs_info->btree_inode->i_mapping);
2204         extent_io_tree_init(&fs_info->freed_extents[1],
2205                              fs_info->btree_inode->i_mapping);
2206         fs_info->pinned_extents = &fs_info->freed_extents[0];
2207         fs_info->do_barriers = 1;
2208
2209
2210         mutex_init(&fs_info->ordered_operations_mutex);
2211         mutex_init(&fs_info->tree_log_mutex);
2212         mutex_init(&fs_info->chunk_mutex);
2213         mutex_init(&fs_info->transaction_kthread_mutex);
2214         mutex_init(&fs_info->cleaner_mutex);
2215         mutex_init(&fs_info->volume_mutex);
2216         init_rwsem(&fs_info->extent_commit_sem);
2217         init_rwsem(&fs_info->cleanup_work_sem);
2218         init_rwsem(&fs_info->subvol_sem);
2219         fs_info->dev_replace.lock_owner = 0;
2220         atomic_set(&fs_info->dev_replace.nesting_level, 0);
2221         mutex_init(&fs_info->dev_replace.lock_finishing_cancel_unmount);
2222         mutex_init(&fs_info->dev_replace.lock_management_lock);
2223         mutex_init(&fs_info->dev_replace.lock);
2224
2225         spin_lock_init(&fs_info->qgroup_lock);
2226         mutex_init(&fs_info->qgroup_ioctl_lock);
2227         fs_info->qgroup_tree = RB_ROOT;
2228         INIT_LIST_HEAD(&fs_info->dirty_qgroups);
2229         fs_info->qgroup_seq = 1;
2230         fs_info->quota_enabled = 0;
2231         fs_info->pending_quota_state = 0;
2232         mutex_init(&fs_info->qgroup_rescan_lock);
2233
2234         btrfs_init_free_cluster(&fs_info->meta_alloc_cluster);
2235         btrfs_init_free_cluster(&fs_info->data_alloc_cluster);
2236
2237         init_waitqueue_head(&fs_info->transaction_throttle);
2238         init_waitqueue_head(&fs_info->transaction_wait);
2239         init_waitqueue_head(&fs_info->transaction_blocked_wait);
2240         init_waitqueue_head(&fs_info->async_submit_wait);
2241
2242         ret = btrfs_alloc_stripe_hash_table(fs_info);
2243         if (ret) {
2244                 err = ret;
2245                 goto fail_alloc;
2246         }
2247
2248         __setup_root(4096, 4096, 4096, 4096, tree_root,
2249                      fs_info, BTRFS_ROOT_TREE_OBJECTID);
2250
2251         invalidate_bdev(fs_devices->latest_bdev);
2252         bh = btrfs_read_dev_super(fs_devices->latest_bdev);
2253         if (!bh) {
2254                 err = -EINVAL;
2255                 goto fail_alloc;
2256         }
2257
2258         memcpy(fs_info->super_copy, bh->b_data, sizeof(*fs_info->super_copy));
2259         memcpy(fs_info->super_for_commit, fs_info->super_copy,
2260                sizeof(*fs_info->super_for_commit));
2261         brelse(bh);
2262
2263         memcpy(fs_info->fsid, fs_info->super_copy->fsid, BTRFS_FSID_SIZE);
2264
2265         disk_super = fs_info->super_copy;
2266         if (!btrfs_super_root(disk_super))
2267                 goto fail_alloc;
2268
2269         /* check FS state, whether FS is broken. */
2270         if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_ERROR)
2271                 set_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state);
2272
2273         ret = btrfs_check_super_valid(fs_info, sb->s_flags & MS_RDONLY);
2274         if (ret) {
2275                 printk(KERN_ERR "btrfs: superblock contains fatal errors\n");
2276                 err = ret;
2277                 goto fail_alloc;
2278         }
2279
2280         /*
2281          * run through our array of backup supers and setup
2282          * our ring pointer to the oldest one
2283          */
2284         generation = btrfs_super_generation(disk_super);
2285         find_oldest_super_backup(fs_info, generation);
2286
2287         /*
2288          * In the long term, we'll store the compression type in the super
2289          * block, and it'll be used for per file compression control.
2290          */
2291         fs_info->compress_type = BTRFS_COMPRESS_ZLIB;
2292
2293         ret = btrfs_parse_options(tree_root, options);
2294         if (ret) {
2295                 err = ret;
2296                 goto fail_alloc;
2297         }
2298
2299         features = btrfs_super_incompat_flags(disk_super) &
2300                 ~BTRFS_FEATURE_INCOMPAT_SUPP;
2301         if (features) {
2302                 printk(KERN_ERR "BTRFS: couldn't mount because of "
2303                        "unsupported optional features (%Lx).\n",
2304                        (unsigned long long)features);
2305                 err = -EINVAL;
2306                 goto fail_alloc;
2307         }
2308
2309         if (btrfs_super_leafsize(disk_super) !=
2310             btrfs_super_nodesize(disk_super)) {
2311                 printk(KERN_ERR "BTRFS: couldn't mount because metadata "
2312                        "blocksizes don't match.  node %d leaf %d\n",
2313                        btrfs_super_nodesize(disk_super),
2314                        btrfs_super_leafsize(disk_super));
2315                 err = -EINVAL;
2316                 goto fail_alloc;
2317         }
2318         if (btrfs_super_leafsize(disk_super) > BTRFS_MAX_METADATA_BLOCKSIZE) {
2319                 printk(KERN_ERR "BTRFS: couldn't mount because metadata "
2320                        "blocksize (%d) was too large\n",
2321                        btrfs_super_leafsize(disk_super));
2322                 err = -EINVAL;
2323                 goto fail_alloc;
2324         }
2325
2326         features = btrfs_super_incompat_flags(disk_super);
2327         features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF;
2328         if (tree_root->fs_info->compress_type == BTRFS_COMPRESS_LZO)
2329                 features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO;
2330
2331         if (features & BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA)
2332                 printk(KERN_ERR "btrfs: has skinny extents\n");
2333
2334         /*
2335          * flag our filesystem as having big metadata blocks if
2336          * they are bigger than the page size
2337          */
2338         if (btrfs_super_leafsize(disk_super) > PAGE_CACHE_SIZE) {
2339                 if (!(features & BTRFS_FEATURE_INCOMPAT_BIG_METADATA))
2340                         printk(KERN_INFO "btrfs flagging fs with big metadata feature\n");
2341                 features |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA;
2342         }
2343
2344         nodesize = btrfs_super_nodesize(disk_super);
2345         leafsize = btrfs_super_leafsize(disk_super);
2346         sectorsize = btrfs_super_sectorsize(disk_super);
2347         stripesize = btrfs_super_stripesize(disk_super);
2348         fs_info->dirty_metadata_batch = leafsize * (1 + ilog2(nr_cpu_ids));
2349         fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids));
2350
2351         /*
2352          * mixed block groups end up with duplicate but slightly offset
2353          * extent buffers for the same range.  It leads to corruptions
2354          */
2355         if ((features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) &&
2356             (sectorsize != leafsize)) {
2357                 printk(KERN_WARNING "btrfs: unequal leaf/node/sector sizes "
2358                                 "are not allowed for mixed block groups on %s\n",
2359                                 sb->s_id);
2360                 goto fail_alloc;
2361         }
2362
2363         /*
2364          * Needn't use the lock because there is no other task which will
2365          * update the flag.
2366          */
2367         btrfs_set_super_incompat_flags(disk_super, features);
2368
2369         features = btrfs_super_compat_ro_flags(disk_super) &
2370                 ~BTRFS_FEATURE_COMPAT_RO_SUPP;
2371         if (!(sb->s_flags & MS_RDONLY) && features) {
2372                 printk(KERN_ERR "BTRFS: couldn't mount RDWR because of "
2373                        "unsupported option features (%Lx).\n",
2374                        (unsigned long long)features);
2375                 err = -EINVAL;
2376                 goto fail_alloc;
2377         }
2378
2379         btrfs_init_workers(&fs_info->generic_worker,
2380                            "genwork", 1, NULL);
2381
2382         btrfs_init_workers(&fs_info->workers, "worker",
2383                            fs_info->thread_pool_size,
2384                            &fs_info->generic_worker);
2385
2386         btrfs_init_workers(&fs_info->delalloc_workers, "delalloc",
2387                            fs_info->thread_pool_size,
2388                            &fs_info->generic_worker);
2389
2390         btrfs_init_workers(&fs_info->flush_workers, "flush_delalloc",
2391                            fs_info->thread_pool_size,
2392                            &fs_info->generic_worker);
2393
2394         btrfs_init_workers(&fs_info->submit_workers, "submit",
2395                            min_t(u64, fs_devices->num_devices,
2396                            fs_info->thread_pool_size),
2397                            &fs_info->generic_worker);
2398
2399         btrfs_init_workers(&fs_info->caching_workers, "cache",
2400                            2, &fs_info->generic_worker);
2401
2402         /* a higher idle thresh on the submit workers makes it much more
2403          * likely that bios will be send down in a sane order to the
2404          * devices
2405          */
2406         fs_info->submit_workers.idle_thresh = 64;
2407
2408         fs_info->workers.idle_thresh = 16;
2409         fs_info->workers.ordered = 1;
2410
2411         fs_info->delalloc_workers.idle_thresh = 2;
2412         fs_info->delalloc_workers.ordered = 1;
2413
2414         btrfs_init_workers(&fs_info->fixup_workers, "fixup", 1,
2415                            &fs_info->generic_worker);
2416         btrfs_init_workers(&fs_info->endio_workers, "endio",
2417                            fs_info->thread_pool_size,
2418                            &fs_info->generic_worker);
2419         btrfs_init_workers(&fs_info->endio_meta_workers, "endio-meta",
2420                            fs_info->thread_pool_size,
2421                            &fs_info->generic_worker);
2422         btrfs_init_workers(&fs_info->endio_meta_write_workers,
2423                            "endio-meta-write", fs_info->thread_pool_size,
2424                            &fs_info->generic_worker);
2425         btrfs_init_workers(&fs_info->endio_raid56_workers,
2426                            "endio-raid56", fs_info->thread_pool_size,
2427                            &fs_info->generic_worker);
2428         btrfs_init_workers(&fs_info->rmw_workers,
2429                            "rmw", fs_info->thread_pool_size,
2430                            &fs_info->generic_worker);
2431         btrfs_init_workers(&fs_info->endio_write_workers, "endio-write",
2432                            fs_info->thread_pool_size,
2433                            &fs_info->generic_worker);
2434         btrfs_init_workers(&fs_info->endio_freespace_worker, "freespace-write",
2435                            1, &fs_info->generic_worker);
2436         btrfs_init_workers(&fs_info->delayed_workers, "delayed-meta",
2437                            fs_info->thread_pool_size,
2438                            &fs_info->generic_worker);
2439         btrfs_init_workers(&fs_info->readahead_workers, "readahead",
2440                            fs_info->thread_pool_size,
2441                            &fs_info->generic_worker);
2442         btrfs_init_workers(&fs_info->qgroup_rescan_workers, "qgroup-rescan", 1,
2443                            &fs_info->generic_worker);
2444
2445         /*
2446          * endios are largely parallel and should have a very
2447          * low idle thresh
2448          */
2449         fs_info->endio_workers.idle_thresh = 4;
2450         fs_info->endio_meta_workers.idle_thresh = 4;
2451         fs_info->endio_raid56_workers.idle_thresh = 4;
2452         fs_info->rmw_workers.idle_thresh = 2;
2453
2454         fs_info->endio_write_workers.idle_thresh = 2;
2455         fs_info->endio_meta_write_workers.idle_thresh = 2;
2456         fs_info->readahead_workers.idle_thresh = 2;
2457
2458         /*
2459          * btrfs_start_workers can really only fail because of ENOMEM so just
2460          * return -ENOMEM if any of these fail.
2461          */
2462         ret = btrfs_start_workers(&fs_info->workers);
2463         ret |= btrfs_start_workers(&fs_info->generic_worker);
2464         ret |= btrfs_start_workers(&fs_info->submit_workers);
2465         ret |= btrfs_start_workers(&fs_info->delalloc_workers);
2466         ret |= btrfs_start_workers(&fs_info->fixup_workers);
2467         ret |= btrfs_start_workers(&fs_info->endio_workers);
2468         ret |= btrfs_start_workers(&fs_info->endio_meta_workers);
2469         ret |= btrfs_start_workers(&fs_info->rmw_workers);
2470         ret |= btrfs_start_workers(&fs_info->endio_raid56_workers);
2471         ret |= btrfs_start_workers(&fs_info->endio_meta_write_workers);
2472         ret |= btrfs_start_workers(&fs_info->endio_write_workers);
2473         ret |= btrfs_start_workers(&fs_info->endio_freespace_worker);
2474         ret |= btrfs_start_workers(&fs_info->delayed_workers);
2475         ret |= btrfs_start_workers(&fs_info->caching_workers);
2476         ret |= btrfs_start_workers(&fs_info->readahead_workers);
2477         ret |= btrfs_start_workers(&fs_info->flush_workers);
2478         ret |= btrfs_start_workers(&fs_info->qgroup_rescan_workers);
2479         if (ret) {
2480                 err = -ENOMEM;
2481                 goto fail_sb_buffer;
2482         }
2483
2484         fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super);
2485         fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages,
2486                                     4 * 1024 * 1024 / PAGE_CACHE_SIZE);
2487
2488         tree_root->nodesize = nodesize;
2489         tree_root->leafsize = leafsize;
2490         tree_root->sectorsize = sectorsize;
2491         tree_root->stripesize = stripesize;
2492
2493         sb->s_blocksize = sectorsize;
2494         sb->s_blocksize_bits = blksize_bits(sectorsize);
2495
2496         if (disk_super->magic != cpu_to_le64(BTRFS_MAGIC)) {
2497                 printk(KERN_INFO "btrfs: valid FS not found on %s\n", sb->s_id);
2498                 goto fail_sb_buffer;
2499         }
2500
2501         if (sectorsize != PAGE_SIZE) {
2502                 printk(KERN_WARNING "btrfs: Incompatible sector size(%lu) "
2503                        "found on %s\n", (unsigned long)sectorsize, sb->s_id);
2504                 goto fail_sb_buffer;
2505         }
2506
2507         mutex_lock(&fs_info->chunk_mutex);
2508         ret = btrfs_read_sys_array(tree_root);
2509         mutex_unlock(&fs_info->chunk_mutex);
2510         if (ret) {
2511                 printk(KERN_WARNING "btrfs: failed to read the system "
2512                        "array on %s\n", sb->s_id);
2513                 goto fail_sb_buffer;
2514         }
2515
2516         blocksize = btrfs_level_size(tree_root,
2517                                      btrfs_super_chunk_root_level(disk_super));
2518         generation = btrfs_super_chunk_root_generation(disk_super);
2519
2520         __setup_root(nodesize, leafsize, sectorsize, stripesize,
2521                      chunk_root, fs_info, BTRFS_CHUNK_TREE_OBJECTID);
2522
2523         chunk_root->node = read_tree_block(chunk_root,
2524                                            btrfs_super_chunk_root(disk_super),
2525                                            blocksize, generation);
2526         if (!chunk_root->node ||
2527             !test_bit(EXTENT_BUFFER_UPTODATE, &chunk_root->node->bflags)) {
2528                 printk(KERN_WARNING "btrfs: failed to read chunk root on %s\n",
2529                        sb->s_id);
2530                 goto fail_tree_roots;
2531         }
2532         btrfs_set_root_node(&chunk_root->root_item, chunk_root->node);
2533         chunk_root->commit_root = btrfs_root_node(chunk_root);
2534
2535         read_extent_buffer(chunk_root->node, fs_info->chunk_tree_uuid,
2536            (unsigned long)btrfs_header_chunk_tree_uuid(chunk_root->node),
2537            BTRFS_UUID_SIZE);
2538
2539         ret = btrfs_read_chunk_tree(chunk_root);
2540         if (ret) {
2541                 printk(KERN_WARNING "btrfs: failed to read chunk tree on %s\n",
2542                        sb->s_id);
2543                 goto fail_tree_roots;
2544         }
2545
2546         /*
2547          * keep the device that is marked to be the target device for the
2548          * dev_replace procedure
2549          */
2550         btrfs_close_extra_devices(fs_info, fs_devices, 0);
2551
2552         if (!fs_devices->latest_bdev) {
2553                 printk(KERN_CRIT "btrfs: failed to read devices on %s\n",
2554                        sb->s_id);
2555                 goto fail_tree_roots;
2556         }
2557
2558 retry_root_backup:
2559         blocksize = btrfs_level_size(tree_root,
2560                                      btrfs_super_root_level(disk_super));
2561         generation = btrfs_super_generation(disk_super);
2562
2563         tree_root->node = read_tree_block(tree_root,
2564                                           btrfs_super_root(disk_super),
2565                                           blocksize, generation);
2566         if (!tree_root->node ||
2567             !test_bit(EXTENT_BUFFER_UPTODATE, &tree_root->node->bflags)) {
2568                 printk(KERN_WARNING "btrfs: failed to read tree root on %s\n",
2569                        sb->s_id);
2570
2571                 goto recovery_tree_root;
2572         }
2573
2574         btrfs_set_root_node(&tree_root->root_item, tree_root->node);
2575         tree_root->commit_root = btrfs_root_node(tree_root);
2576
2577         ret = find_and_setup_root(tree_root, fs_info,
2578                                   BTRFS_EXTENT_TREE_OBJECTID, extent_root);
2579         if (ret)
2580                 goto recovery_tree_root;
2581         extent_root->track_dirty = 1;
2582
2583         ret = find_and_setup_root(tree_root, fs_info,
2584                                   BTRFS_DEV_TREE_OBJECTID, dev_root);
2585         if (ret)
2586                 goto recovery_tree_root;
2587         dev_root->track_dirty = 1;
2588
2589         ret = find_and_setup_root(tree_root, fs_info,
2590                                   BTRFS_CSUM_TREE_OBJECTID, csum_root);
2591         if (ret)
2592                 goto recovery_tree_root;
2593         csum_root->track_dirty = 1;
2594
2595         ret = find_and_setup_root(tree_root, fs_info,
2596                                   BTRFS_QUOTA_TREE_OBJECTID, quota_root);
2597         if (ret) {
2598                 kfree(quota_root);
2599                 quota_root = fs_info->quota_root = NULL;
2600         } else {
2601                 quota_root->track_dirty = 1;
2602                 fs_info->quota_enabled = 1;
2603                 fs_info->pending_quota_state = 1;
2604         }
2605
2606         fs_info->generation = generation;
2607         fs_info->last_trans_committed = generation;
2608
2609         ret = btrfs_recover_balance(fs_info);
2610         if (ret) {
2611                 printk(KERN_WARNING "btrfs: failed to recover balance\n");
2612                 goto fail_block_groups;
2613         }
2614
2615         ret = btrfs_init_dev_stats(fs_info);
2616         if (ret) {
2617                 printk(KERN_ERR "btrfs: failed to init dev_stats: %d\n",
2618                        ret);
2619                 goto fail_block_groups;
2620         }
2621
2622         ret = btrfs_init_dev_replace(fs_info);
2623         if (ret) {
2624                 pr_err("btrfs: failed to init dev_replace: %d\n", ret);
2625                 goto fail_block_groups;
2626         }
2627
2628         btrfs_close_extra_devices(fs_info, fs_devices, 1);
2629
2630         ret = btrfs_init_space_info(fs_info);
2631         if (ret) {
2632                 printk(KERN_ERR "Failed to initial space info: %d\n", ret);
2633                 goto fail_block_groups;
2634         }
2635
2636         ret = btrfs_read_block_groups(extent_root);
2637         if (ret) {
2638                 printk(KERN_ERR "Failed to read block groups: %d\n", ret);
2639                 goto fail_block_groups;
2640         }
2641         fs_info->num_tolerated_disk_barrier_failures =
2642                 btrfs_calc_num_tolerated_disk_barrier_failures(fs_info);
2643         if (fs_info->fs_devices->missing_devices >
2644              fs_info->num_tolerated_disk_barrier_failures &&
2645             !(sb->s_flags & MS_RDONLY)) {
2646                 printk(KERN_WARNING
2647                        "Btrfs: too many missing devices, writeable mount is not allowed\n");
2648                 goto fail_block_groups;
2649         }
2650
2651         fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root,
2652                                                "btrfs-cleaner");
2653         if (IS_ERR(fs_info->cleaner_kthread))
2654                 goto fail_block_groups;
2655
2656         fs_info->transaction_kthread = kthread_run(transaction_kthread,
2657                                                    tree_root,
2658                                                    "btrfs-transaction");
2659         if (IS_ERR(fs_info->transaction_kthread))
2660                 goto fail_cleaner;
2661
2662         if (!btrfs_test_opt(tree_root, SSD) &&
2663             !btrfs_test_opt(tree_root, NOSSD) &&
2664             !fs_info->fs_devices->rotating) {
2665                 printk(KERN_INFO "Btrfs detected SSD devices, enabling SSD "
2666                        "mode\n");
2667                 btrfs_set_opt(fs_info->mount_opt, SSD);
2668         }
2669
2670 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
2671         if (btrfs_test_opt(tree_root, CHECK_INTEGRITY)) {
2672                 ret = btrfsic_mount(tree_root, fs_devices,
2673                                     btrfs_test_opt(tree_root,
2674                                         CHECK_INTEGRITY_INCLUDING_EXTENT_DATA) ?
2675                                     1 : 0,
2676                                     fs_info->check_integrity_print_mask);
2677                 if (ret)
2678                         printk(KERN_WARNING "btrfs: failed to initialize"
2679                                " integrity check module %s\n", sb->s_id);
2680         }
2681 #endif
2682         ret = btrfs_read_qgroup_config(fs_info);
2683         if (ret)
2684                 goto fail_trans_kthread;
2685
2686         /* do not make disk changes in broken FS */
2687         if (btrfs_super_log_root(disk_super) != 0) {
2688                 u64 bytenr = btrfs_super_log_root(disk_super);
2689
2690                 if (fs_devices->rw_devices == 0) {
2691                         printk(KERN_WARNING "Btrfs log replay required "
2692                                "on RO media\n");
2693                         err = -EIO;
2694                         goto fail_qgroup;
2695                 }
2696                 blocksize =
2697                      btrfs_level_size(tree_root,
2698                                       btrfs_super_log_root_level(disk_super));
2699
2700                 log_tree_root = btrfs_alloc_root(fs_info);
2701                 if (!log_tree_root) {
2702                         err = -ENOMEM;
2703                         goto fail_qgroup;
2704                 }
2705
2706                 __setup_root(nodesize, leafsize, sectorsize, stripesize,
2707                              log_tree_root, fs_info, BTRFS_TREE_LOG_OBJECTID);
2708
2709                 log_tree_root->node = read_tree_block(tree_root, bytenr,
2710                                                       blocksize,
2711                                                       generation + 1);
2712                 if (!log_tree_root->node ||
2713                     !extent_buffer_uptodate(log_tree_root->node)) {
2714                         printk(KERN_ERR "btrfs: failed to read log tree\n");
2715                         free_extent_buffer(log_tree_root->node);
2716                         kfree(log_tree_root);
2717                         goto fail_trans_kthread;
2718                 }
2719                 /* returns with log_tree_root freed on success */
2720                 ret = btrfs_recover_log_trees(log_tree_root);
2721                 if (ret) {
2722                         btrfs_error(tree_root->fs_info, ret,
2723                                     "Failed to recover log tree");
2724                         free_extent_buffer(log_tree_root->node);
2725                         kfree(log_tree_root);
2726                         goto fail_trans_kthread;
2727                 }
2728
2729                 if (sb->s_flags & MS_RDONLY) {
2730                         ret = btrfs_commit_super(tree_root);
2731                         if (ret)
2732                                 goto fail_trans_kthread;
2733                 }
2734         }
2735
2736         ret = btrfs_find_orphan_roots(tree_root);
2737         if (ret)
2738                 goto fail_trans_kthread;
2739
2740         if (!(sb->s_flags & MS_RDONLY)) {
2741                 ret = btrfs_cleanup_fs_roots(fs_info);
2742                 if (ret)
2743                         goto fail_trans_kthread;
2744
2745                 ret = btrfs_recover_relocation(tree_root);
2746                 if (ret < 0) {
2747                         printk(KERN_WARNING
2748                                "btrfs: failed to recover relocation\n");
2749                         err = -EINVAL;
2750                         goto fail_qgroup;
2751                 }
2752         }
2753
2754         location.objectid = BTRFS_FS_TREE_OBJECTID;
2755         location.type = BTRFS_ROOT_ITEM_KEY;
2756         location.offset = (u64)-1;
2757
2758         fs_info->fs_root = btrfs_read_fs_root_no_name(fs_info, &location);
2759         if (!fs_info->fs_root)
2760                 goto fail_qgroup;
2761         if (IS_ERR(fs_info->fs_root)) {
2762                 err = PTR_ERR(fs_info->fs_root);
2763                 goto fail_qgroup;
2764         }
2765
2766         if (sb->s_flags & MS_RDONLY)
2767                 return 0;
2768
2769         down_read(&fs_info->cleanup_work_sem);
2770         if ((ret = btrfs_orphan_cleanup(fs_info->fs_root)) ||
2771             (ret = btrfs_orphan_cleanup(fs_info->tree_root))) {
2772                 up_read(&fs_info->cleanup_work_sem);
2773                 close_ctree(tree_root);
2774                 return ret;
2775         }
2776         up_read(&fs_info->cleanup_work_sem);
2777
2778         ret = btrfs_resume_balance_async(fs_info);
2779         if (ret) {
2780                 printk(KERN_WARNING "btrfs: failed to resume balance\n");
2781                 close_ctree(tree_root);
2782                 return ret;
2783         }
2784
2785         ret = btrfs_resume_dev_replace_async(fs_info);
2786         if (ret) {
2787                 pr_warn("btrfs: failed to resume dev_replace\n");
2788                 close_ctree(tree_root);
2789                 return ret;
2790         }
2791
2792         return 0;
2793
2794 fail_qgroup:
2795         btrfs_free_qgroup_config(fs_info);
2796 fail_trans_kthread:
2797         kthread_stop(fs_info->transaction_kthread);
2798         del_fs_roots(fs_info);
2799         btrfs_cleanup_transaction(fs_info->tree_root);
2800 fail_cleaner:
2801         kthread_stop(fs_info->cleaner_kthread);
2802
2803         /*
2804          * make sure we're done with the btree inode before we stop our
2805          * kthreads
2806          */
2807         filemap_write_and_wait(fs_info->btree_inode->i_mapping);
2808
2809 fail_block_groups:
2810         btrfs_put_block_group_cache(fs_info);
2811         btrfs_free_block_groups(fs_info);
2812
2813 fail_tree_roots:
2814         free_root_pointers(fs_info, 1);
2815         invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
2816
2817 fail_sb_buffer:
2818         btrfs_stop_all_workers(fs_info);
2819 fail_alloc:
2820 fail_iput:
2821         btrfs_mapping_tree_free(&fs_info->mapping_tree);
2822
2823         iput(fs_info->btree_inode);
2824 fail_delalloc_bytes:
2825         percpu_counter_destroy(&fs_info->delalloc_bytes);
2826 fail_dirty_metadata_bytes:
2827         percpu_counter_destroy(&fs_info->dirty_metadata_bytes);
2828 fail_bdi:
2829         bdi_destroy(&fs_info->bdi);
2830 fail_srcu:
2831         cleanup_srcu_struct(&fs_info->subvol_srcu);
2832 fail:
2833         btrfs_free_stripe_hash_table(fs_info);
2834         btrfs_close_devices(fs_info->fs_devices);
2835         return err;
2836
2837 recovery_tree_root:
2838         if (!btrfs_test_opt(tree_root, RECOVERY))
2839                 goto fail_tree_roots;
2840
2841         free_root_pointers(fs_info, 0);
2842
2843         /* don't use the log in recovery mode, it won't be valid */
2844         btrfs_set_super_log_root(disk_super, 0);
2845
2846         /* we can't trust the free space cache either */
2847         btrfs_set_opt(fs_info->mount_opt, CLEAR_CACHE);
2848
2849         ret = next_root_backup(fs_info, fs_info->super_copy,
2850                                &num_backups_tried, &backup_index);
2851         if (ret == -1)
2852                 goto fail_block_groups;
2853         goto retry_root_backup;
2854 }
2855
2856 static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate)
2857 {
2858         if (uptodate) {
2859                 set_buffer_uptodate(bh);
2860         } else {
2861                 struct btrfs_device *device = (struct btrfs_device *)
2862                         bh->b_private;
2863
2864                 printk_ratelimited_in_rcu(KERN_WARNING "lost page write due to "
2865                                           "I/O error on %s\n",
2866                                           rcu_str_deref(device->name));
2867                 /* note, we dont' set_buffer_write_io_error because we have
2868                  * our own ways of dealing with the IO errors
2869                  */
2870                 clear_buffer_uptodate(bh);
2871                 btrfs_dev_stat_inc_and_print(device, BTRFS_DEV_STAT_WRITE_ERRS);
2872         }
2873         unlock_buffer(bh);
2874         put_bh(bh);
2875 }
2876
2877 struct buffer_head *btrfs_read_dev_super(struct block_device *bdev)
2878 {
2879         struct buffer_head *bh;
2880         struct buffer_head *latest = NULL;
2881         struct btrfs_super_block *super;
2882         int i;
2883         u64 transid = 0;
2884         u64 bytenr;
2885
2886         /* we would like to check all the supers, but that would make
2887          * a btrfs mount succeed after a mkfs from a different FS.
2888          * So, we need to add a special mount option to scan for
2889          * later supers, using BTRFS_SUPER_MIRROR_MAX instead
2890          */
2891         for (i = 0; i < 1; i++) {
2892                 bytenr = btrfs_sb_offset(i);
2893                 if (bytenr + 4096 >= i_size_read(bdev->bd_inode))
2894                         break;
2895                 bh = __bread(bdev, bytenr / 4096, 4096);
2896                 if (!bh)
2897                         continue;
2898
2899                 super = (struct btrfs_super_block *)bh->b_data;
2900                 if (btrfs_super_bytenr(super) != bytenr ||
2901                     super->magic != cpu_to_le64(BTRFS_MAGIC)) {
2902                         brelse(bh);
2903                         continue;
2904                 }
2905
2906                 if (!latest || btrfs_super_generation(super) > transid) {
2907                         brelse(latest);
2908                         latest = bh;
2909                         transid = btrfs_super_generation(super);
2910                 } else {
2911                         brelse(bh);
2912                 }
2913         }
2914         return latest;
2915 }
2916
2917 /*
2918  * this should be called twice, once with wait == 0 and
2919  * once with wait == 1.  When wait == 0 is done, all the buffer heads
2920  * we write are pinned.
2921  *
2922  * They are released when wait == 1 is done.
2923  * max_mirrors must be the same for both runs, and it indicates how
2924  * many supers on this one device should be written.
2925  *
2926  * max_mirrors == 0 means to write them all.
2927  */
2928 static int write_dev_supers(struct btrfs_device *device,
2929                             struct btrfs_super_block *sb,
2930                             int do_barriers, int wait, int max_mirrors)
2931 {
2932         struct buffer_head *bh;
2933         int i;
2934         int ret;
2935         int errors = 0;
2936         u32 crc;
2937         u64 bytenr;
2938
2939         if (max_mirrors == 0)
2940                 max_mirrors = BTRFS_SUPER_MIRROR_MAX;
2941
2942         for (i = 0; i < max_mirrors; i++) {
2943                 bytenr = btrfs_sb_offset(i);
2944                 if (bytenr + BTRFS_SUPER_INFO_SIZE >= device->total_bytes)
2945                         break;
2946
2947                 if (wait) {
2948                         bh = __find_get_block(device->bdev, bytenr / 4096,
2949                                               BTRFS_SUPER_INFO_SIZE);
2950                         if (!bh) {
2951                                 errors++;
2952                                 continue;
2953                         }
2954                         wait_on_buffer(bh);
2955                         if (!buffer_uptodate(bh))
2956                                 errors++;
2957
2958                         /* drop our reference */
2959                         brelse(bh);
2960
2961                         /* drop the reference from the wait == 0 run */
2962                         brelse(bh);
2963                         continue;
2964                 } else {
2965                         btrfs_set_super_bytenr(sb, bytenr);
2966
2967                         crc = ~(u32)0;
2968                         crc = btrfs_csum_data((char *)sb +
2969                                               BTRFS_CSUM_SIZE, crc,
2970                                               BTRFS_SUPER_INFO_SIZE -
2971                                               BTRFS_CSUM_SIZE);
2972                         btrfs_csum_final(crc, sb->csum);
2973
2974                         /*
2975                          * one reference for us, and we leave it for the
2976                          * caller
2977                          */
2978                         bh = __getblk(device->bdev, bytenr / 4096,
2979                                       BTRFS_SUPER_INFO_SIZE);
2980                         if (!bh) {
2981                                 printk(KERN_ERR "btrfs: couldn't get super "
2982                                        "buffer head for bytenr %Lu\n", bytenr);
2983                                 errors++;
2984                                 continue;
2985                         }
2986
2987                         memcpy(bh->b_data, sb, BTRFS_SUPER_INFO_SIZE);
2988
2989                         /* one reference for submit_bh */
2990                         get_bh(bh);
2991
2992                         set_buffer_uptodate(bh);
2993                         lock_buffer(bh);
2994                         bh->b_end_io = btrfs_end_buffer_write_sync;
2995                         bh->b_private = device;
2996                 }
2997
2998                 /*
2999                  * we fua the first super.  The others we allow
3000                  * to go down lazy.
3001                  */
3002                 ret = btrfsic_submit_bh(WRITE_FUA, bh);
3003                 if (ret)
3004                         errors++;
3005         }
3006         return errors < i ? 0 : -1;
3007 }
3008
3009 /*
3010  * endio for the write_dev_flush, this will wake anyone waiting
3011  * for the barrier when it is done
3012  */
3013 static void btrfs_end_empty_barrier(struct bio *bio, int err)
3014 {
3015         if (err) {
3016                 if (err == -EOPNOTSUPP)
3017                         set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
3018                 clear_bit(BIO_UPTODATE, &bio->bi_flags);
3019         }
3020         if (bio->bi_private)
3021                 complete(bio->bi_private);
3022         bio_put(bio);
3023 }
3024
3025 /*
3026  * trigger flushes for one the devices.  If you pass wait == 0, the flushes are
3027  * sent down.  With wait == 1, it waits for the previous flush.
3028  *
3029  * any device where the flush fails with eopnotsupp are flagged as not-barrier
3030  * capable
3031  */
3032 static int write_dev_flush(struct btrfs_device *device, int wait)
3033 {
3034         struct bio *bio;
3035         int ret = 0;
3036
3037         if (device->nobarriers)
3038                 return 0;
3039
3040         if (wait) {
3041                 bio = device->flush_bio;
3042                 if (!bio)
3043                         return 0;
3044
3045                 wait_for_completion(&device->flush_wait);
3046
3047                 if (bio_flagged(bio, BIO_EOPNOTSUPP)) {
3048                         printk_in_rcu("btrfs: disabling barriers on dev %s\n",
3049                                       rcu_str_deref(device->name));
3050                         device->nobarriers = 1;
3051                 } else if (!bio_flagged(bio, BIO_UPTODATE)) {
3052                         ret = -EIO;
3053                         btrfs_dev_stat_inc_and_print(device,
3054                                 BTRFS_DEV_STAT_FLUSH_ERRS);
3055                 }
3056
3057                 /* drop the reference from the wait == 0 run */
3058                 bio_put(bio);
3059                 device->flush_bio = NULL;
3060
3061                 return ret;
3062         }
3063
3064         /*
3065          * one reference for us, and we leave it for the
3066          * caller
3067          */
3068         device->flush_bio = NULL;
3069         bio = bio_alloc(GFP_NOFS, 0);
3070         if (!bio)
3071                 return -ENOMEM;
3072
3073         bio->bi_end_io = btrfs_end_empty_barrier;
3074         bio->bi_bdev = device->bdev;
3075         init_completion(&device->flush_wait);
3076         bio->bi_private = &device->flush_wait;
3077         device->flush_bio = bio;
3078
3079         bio_get(bio);
3080         btrfsic_submit_bio(WRITE_FLUSH, bio);
3081
3082         return 0;
3083 }
3084
3085 /*
3086  * send an empty flush down to each device in parallel,
3087  * then wait for them
3088  */
3089 static int barrier_all_devices(struct btrfs_fs_info *info)
3090 {
3091         struct list_head *head;
3092         struct btrfs_device *dev;
3093         int errors_send = 0;
3094         int errors_wait = 0;
3095         int ret;
3096
3097         /* send down all the barriers */
3098         head = &info->fs_devices->devices;
3099         list_for_each_entry_rcu(dev, head, dev_list) {
3100                 if (!dev->bdev) {
3101                         errors_send++;
3102                         continue;
3103                 }
3104                 if (!dev->in_fs_metadata || !dev->writeable)
3105                         continue;
3106
3107                 ret = write_dev_flush(dev, 0);
3108                 if (ret)
3109                         errors_send++;
3110         }
3111
3112         /* wait for all the barriers */
3113         list_for_each_entry_rcu(dev, head, dev_list) {
3114                 if (!dev->bdev) {
3115                         errors_wait++;
3116                         continue;
3117                 }
3118                 if (!dev->in_fs_metadata || !dev->writeable)
3119                         continue;
3120
3121                 ret = write_dev_flush(dev, 1);
3122                 if (ret)
3123                         errors_wait++;
3124         }
3125         if (errors_send > info->num_tolerated_disk_barrier_failures ||
3126             errors_wait > info->num_tolerated_disk_barrier_failures)
3127                 return -EIO;
3128         return 0;
3129 }
3130
3131 int btrfs_calc_num_tolerated_disk_barrier_failures(
3132         struct btrfs_fs_info *fs_info)
3133 {
3134         struct btrfs_ioctl_space_info space;
3135         struct btrfs_space_info *sinfo;
3136         u64 types[] = {BTRFS_BLOCK_GROUP_DATA,
3137                        BTRFS_BLOCK_GROUP_SYSTEM,
3138                        BTRFS_BLOCK_GROUP_METADATA,
3139                        BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA};
3140         int num_types = 4;
3141         int i;
3142         int c;
3143         int num_tolerated_disk_barrier_failures =
3144                 (int)fs_info->fs_devices->num_devices;
3145
3146         for (i = 0; i < num_types; i++) {
3147                 struct btrfs_space_info *tmp;
3148
3149                 sinfo = NULL;
3150                 rcu_read_lock();
3151                 list_for_each_entry_rcu(tmp, &fs_info->space_info, list) {
3152                         if (tmp->flags == types[i]) {
3153                                 sinfo = tmp;
3154                                 break;
3155                         }
3156                 }
3157                 rcu_read_unlock();
3158
3159                 if (!sinfo)
3160                         continue;
3161
3162                 down_read(&sinfo->groups_sem);
3163                 for (c = 0; c < BTRFS_NR_RAID_TYPES; c++) {
3164                         if (!list_empty(&sinfo->block_groups[c])) {
3165                                 u64 flags;
3166
3167                                 btrfs_get_block_group_info(
3168                                         &sinfo->block_groups[c], &space);
3169                                 if (space.total_bytes == 0 ||
3170                                     space.used_bytes == 0)
3171                                         continue;
3172                                 flags = space.flags;
3173                                 /*
3174                                  * return
3175                                  * 0: if dup, single or RAID0 is configured for
3176                                  *    any of metadata, system or data, else
3177                                  * 1: if RAID5 is configured, or if RAID1 or
3178                                  *    RAID10 is configured and only two mirrors
3179                                  *    are used, else
3180                                  * 2: if RAID6 is configured, else
3181                                  * num_mirrors - 1: if RAID1 or RAID10 is
3182                                  *                  configured and more than
3183                                  *                  2 mirrors are used.
3184                                  */
3185                                 if (num_tolerated_disk_barrier_failures > 0 &&
3186                                     ((flags & (BTRFS_BLOCK_GROUP_DUP |
3187                                                BTRFS_BLOCK_GROUP_RAID0)) ||
3188                                      ((flags & BTRFS_BLOCK_GROUP_PROFILE_MASK)
3189                                       == 0)))
3190                                         num_tolerated_disk_barrier_failures = 0;
3191                                 else if (num_tolerated_disk_barrier_failures > 1) {
3192                                         if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
3193                                             BTRFS_BLOCK_GROUP_RAID5 |
3194                                             BTRFS_BLOCK_GROUP_RAID10)) {
3195                                                 num_tolerated_disk_barrier_failures = 1;
3196                                         } else if (flags &
3197                                                    BTRFS_BLOCK_GROUP_RAID5) {
3198                                                 num_tolerated_disk_barrier_failures = 2;
3199                                         }
3200                                 }
3201                         }
3202                 }
3203                 up_read(&sinfo->groups_sem);
3204         }
3205
3206         return num_tolerated_disk_barrier_failures;
3207 }
3208
3209 static int write_all_supers(struct btrfs_root *root, int max_mirrors)
3210 {
3211         struct list_head *head;
3212         struct btrfs_device *dev;
3213         struct btrfs_super_block *sb;
3214         struct btrfs_dev_item *dev_item;
3215         int ret;
3216         int do_barriers;
3217         int max_errors;
3218         int total_errors = 0;
3219         u64 flags;
3220
3221         max_errors = btrfs_super_num_devices(root->fs_info->super_copy) - 1;
3222         do_barriers = !btrfs_test_opt(root, NOBARRIER);
3223         backup_super_roots(root->fs_info);
3224
3225         sb = root->fs_info->super_for_commit;
3226         dev_item = &sb->dev_item;
3227
3228         mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
3229         head = &root->fs_info->fs_devices->devices;
3230
3231         if (do_barriers) {
3232                 ret = barrier_all_devices(root->fs_info);
3233                 if (ret) {
3234                         mutex_unlock(
3235                                 &root->fs_info->fs_devices->device_list_mutex);
3236                         btrfs_error(root->fs_info, ret,
3237                                     "errors while submitting device barriers.");
3238                         return ret;
3239                 }
3240         }
3241
3242         list_for_each_entry_rcu(dev, head, dev_list) {
3243                 if (!dev->bdev) {
3244                         total_errors++;
3245                         continue;
3246                 }
3247                 if (!dev->in_fs_metadata || !dev->writeable)
3248                         continue;
3249
3250                 btrfs_set_stack_device_generation(dev_item, 0);
3251                 btrfs_set_stack_device_type(dev_item, dev->type);
3252                 btrfs_set_stack_device_id(dev_item, dev->devid);
3253                 btrfs_set_stack_device_total_bytes(dev_item, dev->total_bytes);
3254                 btrfs_set_stack_device_bytes_used(dev_item, dev->bytes_used);
3255                 btrfs_set_stack_device_io_align(dev_item, dev->io_align);
3256                 btrfs_set_stack_device_io_width(dev_item, dev->io_width);
3257                 btrfs_set_stack_device_sector_size(dev_item, dev->sector_size);
3258                 memcpy(dev_item->uuid, dev->uuid, BTRFS_UUID_SIZE);
3259                 memcpy(dev_item->fsid, dev->fs_devices->fsid, BTRFS_UUID_SIZE);
3260
3261                 flags = btrfs_super_flags(sb);
3262                 btrfs_set_super_flags(sb, flags | BTRFS_HEADER_FLAG_WRITTEN);
3263
3264                 ret = write_dev_supers(dev, sb, do_barriers, 0, max_mirrors);
3265                 if (ret)
3266                         total_errors++;
3267         }
3268         if (total_errors > max_errors) {
3269                 printk(KERN_ERR "btrfs: %d errors while writing supers\n",
3270                        total_errors);
3271
3272                 /* This shouldn't happen. FUA is masked off if unsupported */
3273                 BUG();
3274         }
3275
3276         total_errors = 0;
3277         list_for_each_entry_rcu(dev, head, dev_list) {
3278                 if (!dev->bdev)
3279                         continue;
3280                 if (!dev->in_fs_metadata || !dev->writeable)
3281                         continue;
3282
3283                 ret = write_dev_supers(dev, sb, do_barriers, 1, max_mirrors);
3284                 if (ret)
3285                         total_errors++;
3286         }
3287         mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
3288         if (total_errors > max_errors) {
3289                 btrfs_error(root->fs_info, -EIO,
3290                             "%d errors while writing supers", total_errors);
3291                 return -EIO;
3292         }
3293         return 0;
3294 }
3295
3296 int write_ctree_super(struct btrfs_trans_handle *trans,
3297                       struct btrfs_root *root, int max_mirrors)
3298 {
3299         int ret;
3300
3301         ret = write_all_supers(root, max_mirrors);
3302         return ret;
3303 }
3304
3305 void btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
3306 {
3307         spin_lock(&fs_info->fs_roots_radix_lock);
3308         radix_tree_delete(&fs_info->fs_roots_radix,
3309                           (unsigned long)root->root_key.objectid);
3310         spin_unlock(&fs_info->fs_roots_radix_lock);
3311
3312         if (btrfs_root_refs(&root->root_item) == 0)
3313                 synchronize_srcu(&fs_info->subvol_srcu);
3314
3315         if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
3316                 btrfs_free_log(NULL, root);
3317                 btrfs_free_log_root_tree(NULL, fs_info);
3318         }
3319
3320         __btrfs_remove_free_space_cache(root->free_ino_pinned);
3321         __btrfs_remove_free_space_cache(root->free_ino_ctl);
3322         free_fs_root(root);
3323 }
3324
3325 static void free_fs_root(struct btrfs_root *root)
3326 {
3327         iput(root->cache_inode);
3328         WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree));
3329         if (root->anon_dev)
3330                 free_anon_bdev(root->anon_dev);
3331         free_extent_buffer(root->node);
3332         free_extent_buffer(root->commit_root);
3333         kfree(root->free_ino_ctl);
3334         kfree(root->free_ino_pinned);
3335         kfree(root->name);
3336         kfree(root);
3337 }
3338
3339 int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info)
3340 {
3341         u64 root_objectid = 0;
3342         struct btrfs_root *gang[8];
3343         int i;
3344         int ret;
3345
3346         while (1) {
3347                 ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
3348                                              (void **)gang, root_objectid,
3349                                              ARRAY_SIZE(gang));
3350                 if (!ret)
3351                         break;
3352
3353                 root_objectid = gang[ret - 1]->root_key.objectid + 1;
3354                 for (i = 0; i < ret; i++) {
3355                         int err;
3356
3357                         root_objectid = gang[i]->root_key.objectid;
3358                         err = btrfs_orphan_cleanup(gang[i]);
3359                         if (err)
3360                                 return err;
3361                 }
3362                 root_objectid++;
3363         }
3364         return 0;
3365 }
3366
3367 int btrfs_commit_super(struct btrfs_root *root)
3368 {
3369         struct btrfs_trans_handle *trans;
3370         int ret;
3371
3372         mutex_lock(&root->fs_info->cleaner_mutex);
3373         btrfs_run_delayed_iputs(root);
3374         mutex_unlock(&root->fs_info->cleaner_mutex);
3375         wake_up_process(root->fs_info->cleaner_kthread);
3376
3377         /* wait until ongoing cleanup work done */
3378         down_write(&root->fs_info->cleanup_work_sem);
3379         up_write(&root->fs_info->cleanup_work_sem);
3380
3381         trans = btrfs_join_transaction(root);
3382         if (IS_ERR(trans))
3383                 return PTR_ERR(trans);
3384         ret = btrfs_commit_transaction(trans, root);
3385         if (ret)
3386                 return ret;
3387         /* run commit again to drop the original snapshot */
3388         trans = btrfs_join_transaction(root);
3389         if (IS_ERR(trans))
3390                 return PTR_ERR(trans);
3391         ret = btrfs_commit_transaction(trans, root);
3392         if (ret)
3393                 return ret;
3394         ret = btrfs_write_and_wait_transaction(NULL, root);
3395         if (ret) {
3396                 btrfs_error(root->fs_info, ret,
3397                             "Failed to sync btree inode to disk.");
3398                 return ret;
3399         }
3400
3401         ret = write_ctree_super(NULL, root, 0);
3402         return ret;
3403 }
3404
3405 int close_ctree(struct btrfs_root *root)
3406 {
3407         struct btrfs_fs_info *fs_info = root->fs_info;
3408         int ret;
3409
3410         fs_info->closing = 1;
3411         smp_mb();
3412
3413         /* pause restriper - we want to resume on mount */
3414         btrfs_pause_balance(fs_info);
3415
3416         btrfs_dev_replace_suspend_for_unmount(fs_info);
3417
3418         btrfs_scrub_cancel(fs_info);
3419
3420         /* wait for any defraggers to finish */
3421         wait_event(fs_info->transaction_wait,
3422                    (atomic_read(&fs_info->defrag_running) == 0));
3423
3424         /* clear out the rbtree of defraggable inodes */
3425         btrfs_cleanup_defrag_inodes(fs_info);
3426
3427         if (!(fs_info->sb->s_flags & MS_RDONLY)) {
3428                 ret = btrfs_commit_super(root);
3429                 if (ret)
3430                         printk(KERN_ERR "btrfs: commit super ret %d\n", ret);
3431         }
3432
3433         if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
3434                 btrfs_error_commit_super(root);
3435
3436         btrfs_put_block_group_cache(fs_info);
3437
3438         kthread_stop(fs_info->transaction_kthread);
3439         kthread_stop(fs_info->cleaner_kthread);
3440
3441         fs_info->closing = 2;
3442         smp_mb();
3443
3444         btrfs_free_qgroup_config(root->fs_info);
3445
3446         if (percpu_counter_sum(&fs_info->delalloc_bytes)) {
3447                 printk(KERN_INFO "btrfs: at unmount delalloc count %lld\n",
3448                        percpu_counter_sum(&fs_info->delalloc_bytes));
3449         }
3450
3451         free_root_pointers(fs_info, 1);
3452
3453         btrfs_free_block_groups(fs_info);
3454
3455         del_fs_roots(fs_info);
3456
3457         iput(fs_info->btree_inode);
3458
3459         btrfs_stop_all_workers(fs_info);
3460
3461 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
3462         if (btrfs_test_opt(root, CHECK_INTEGRITY))
3463                 btrfsic_unmount(root, fs_info->fs_devices);
3464 #endif
3465
3466         btrfs_close_devices(fs_info->fs_devices);
3467         btrfs_mapping_tree_free(&fs_info->mapping_tree);
3468
3469         percpu_counter_destroy(&fs_info->dirty_metadata_bytes);
3470         percpu_counter_destroy(&fs_info->delalloc_bytes);
3471         bdi_destroy(&fs_info->bdi);
3472         cleanup_srcu_struct(&fs_info->subvol_srcu);
3473
3474         btrfs_free_stripe_hash_table(fs_info);
3475
3476         return 0;
3477 }
3478
3479 int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid,
3480                           int atomic)
3481 {
3482         int ret;
3483         struct inode *btree_inode = buf->pages[0]->mapping->host;
3484
3485         ret = extent_buffer_uptodate(buf);
3486         if (!ret)
3487                 return ret;
3488
3489         ret = verify_parent_transid(&BTRFS_I(btree_inode)->io_tree, buf,
3490                                     parent_transid, atomic);
3491         if (ret == -EAGAIN)
3492                 return ret;
3493         return !ret;
3494 }
3495
3496 int btrfs_set_buffer_uptodate(struct extent_buffer *buf)
3497 {
3498         return set_extent_buffer_uptodate(buf);
3499 }
3500
3501 void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
3502 {
3503         struct btrfs_root *root = BTRFS_I(buf->pages[0]->mapping->host)->root;
3504         u64 transid = btrfs_header_generation(buf);
3505         int was_dirty;
3506
3507         btrfs_assert_tree_locked(buf);
3508         if (transid != root->fs_info->generation)
3509                 WARN(1, KERN_CRIT "btrfs transid mismatch buffer %llu, "
3510                        "found %llu running %llu\n",
3511                         (unsigned long long)buf->start,
3512                         (unsigned long long)transid,
3513                         (unsigned long long)root->fs_info->generation);
3514         was_dirty = set_extent_buffer_dirty(buf);
3515         if (!was_dirty)
3516                 __percpu_counter_add(&root->fs_info->dirty_metadata_bytes,
3517                                      buf->len,
3518                                      root->fs_info->dirty_metadata_batch);
3519 }
3520
3521 static void __btrfs_btree_balance_dirty(struct btrfs_root *root,
3522                                         int flush_delayed)
3523 {
3524         /*
3525          * looks as though older kernels can get into trouble with
3526          * this code, they end up stuck in balance_dirty_pages forever
3527          */
3528         int ret;
3529
3530         if (current->flags & PF_MEMALLOC)
3531                 return;
3532
3533         if (flush_delayed)
3534                 btrfs_balance_delayed_items(root);
3535
3536         ret = percpu_counter_compare(&root->fs_info->dirty_metadata_bytes,
3537                                      BTRFS_DIRTY_METADATA_THRESH);
3538         if (ret > 0) {
3539                 balance_dirty_pages_ratelimited(
3540                                    root->fs_info->btree_inode->i_mapping);
3541         }
3542         return;
3543 }
3544
3545 void btrfs_btree_balance_dirty(struct btrfs_root *root)
3546 {
3547         __btrfs_btree_balance_dirty(root, 1);
3548 }
3549
3550 void btrfs_btree_balance_dirty_nodelay(struct btrfs_root *root)
3551 {
3552         __btrfs_btree_balance_dirty(root, 0);
3553 }
3554
3555 int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid)
3556 {
3557         struct btrfs_root *root = BTRFS_I(buf->pages[0]->mapping->host)->root;
3558         return btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
3559 }
3560
3561 static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
3562                               int read_only)
3563 {
3564         if (btrfs_super_csum_type(fs_info->super_copy) >= ARRAY_SIZE(btrfs_csum_sizes)) {
3565                 printk(KERN_ERR "btrfs: unsupported checksum algorithm\n");
3566                 return -EINVAL;
3567         }
3568
3569         if (read_only)
3570                 return 0;
3571
3572         return 0;
3573 }
3574
3575 static void btrfs_error_commit_super(struct btrfs_root *root)
3576 {
3577         mutex_lock(&root->fs_info->cleaner_mutex);
3578         btrfs_run_delayed_iputs(root);
3579         mutex_unlock(&root->fs_info->cleaner_mutex);
3580
3581         down_write(&root->fs_info->cleanup_work_sem);
3582         up_write(&root->fs_info->cleanup_work_sem);
3583
3584         /* cleanup FS via transaction */
3585         btrfs_cleanup_transaction(root);
3586 }
3587
3588 static void btrfs_destroy_ordered_operations(struct btrfs_transaction *t,
3589                                              struct btrfs_root *root)
3590 {
3591         struct btrfs_inode *btrfs_inode;
3592         struct list_head splice;
3593
3594         INIT_LIST_HEAD(&splice);
3595
3596         mutex_lock(&root->fs_info->ordered_operations_mutex);
3597         spin_lock(&root->fs_info->ordered_extent_lock);
3598
3599         list_splice_init(&t->ordered_operations, &splice);
3600         while (!list_empty(&splice)) {
3601                 btrfs_inode = list_entry(splice.next, struct btrfs_inode,
3602                                          ordered_operations);
3603
3604                 list_del_init(&btrfs_inode->ordered_operations);
3605
3606                 btrfs_invalidate_inodes(btrfs_inode->root);
3607         }
3608
3609         spin_unlock(&root->fs_info->ordered_extent_lock);
3610         mutex_unlock(&root->fs_info->ordered_operations_mutex);
3611 }
3612
3613 static void btrfs_destroy_ordered_extents(struct btrfs_root *root)
3614 {
3615         struct btrfs_ordered_extent *ordered;
3616
3617         spin_lock(&root->fs_info->ordered_extent_lock);
3618         /*
3619          * This will just short circuit the ordered completion stuff which will
3620          * make sure the ordered extent gets properly cleaned up.
3621          */
3622         list_for_each_entry(ordered, &root->fs_info->ordered_extents,
3623                             root_extent_list)
3624                 set_bit(BTRFS_ORDERED_IOERR, &ordered->flags);
3625         spin_unlock(&root->fs_info->ordered_extent_lock);
3626 }
3627
3628 int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
3629                                struct btrfs_root *root)
3630 {
3631         struct rb_node *node;
3632         struct btrfs_delayed_ref_root *delayed_refs;
3633         struct btrfs_delayed_ref_node *ref;
3634         int ret = 0;
3635
3636         delayed_refs = &trans->delayed_refs;
3637
3638         spin_lock(&delayed_refs->lock);
3639         if (delayed_refs->num_entries == 0) {
3640                 spin_unlock(&delayed_refs->lock);
3641                 printk(KERN_INFO "delayed_refs has NO entry\n");
3642                 return ret;
3643         }
3644
3645         while ((node = rb_first(&delayed_refs->root)) != NULL) {
3646                 struct btrfs_delayed_ref_head *head = NULL;
3647
3648                 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
3649                 atomic_set(&ref->refs, 1);
3650                 if (btrfs_delayed_ref_is_head(ref)) {
3651
3652                         head = btrfs_delayed_node_to_head(ref);
3653                         if (!mutex_trylock(&head->mutex)) {
3654                                 atomic_inc(&ref->refs);
3655                                 spin_unlock(&delayed_refs->lock);
3656
3657                                 /* Need to wait for the delayed ref to run */
3658                                 mutex_lock(&head->mutex);
3659                                 mutex_unlock(&head->mutex);
3660                                 btrfs_put_delayed_ref(ref);
3661
3662                                 spin_lock(&delayed_refs->lock);
3663                                 continue;
3664                         }
3665
3666                         if (head->must_insert_reserved)
3667                                 btrfs_pin_extent(root, ref->bytenr,
3668                                                  ref->num_bytes, 1);
3669                         btrfs_free_delayed_extent_op(head->extent_op);
3670                         delayed_refs->num_heads--;
3671                         if (list_empty(&head->cluster))
3672                                 delayed_refs->num_heads_ready--;
3673                         list_del_init(&head->cluster);
3674                 }
3675
3676                 ref->in_tree = 0;
3677                 rb_erase(&ref->rb_node, &delayed_refs->root);
3678                 delayed_refs->num_entries--;
3679                 if (head)
3680                         mutex_unlock(&head->mutex);
3681                 spin_unlock(&delayed_refs->lock);
3682                 btrfs_put_delayed_ref(ref);
3683
3684                 cond_resched();
3685                 spin_lock(&delayed_refs->lock);
3686         }
3687
3688         spin_unlock(&delayed_refs->lock);
3689
3690         return ret;
3691 }
3692
3693 static void btrfs_evict_pending_snapshots(struct btrfs_transaction *t)
3694 {
3695         struct btrfs_pending_snapshot *snapshot;
3696         struct list_head splice;
3697
3698         INIT_LIST_HEAD(&splice);
3699
3700         list_splice_init(&t->pending_snapshots, &splice);
3701
3702         while (!list_empty(&splice)) {
3703                 snapshot = list_entry(splice.next,
3704                                       struct btrfs_pending_snapshot,
3705                                       list);
3706                 snapshot->error = -ECANCELED;
3707                 list_del_init(&snapshot->list);
3708         }
3709 }
3710
3711 static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root)
3712 {
3713         struct btrfs_inode *btrfs_inode;
3714         struct list_head splice;
3715
3716         INIT_LIST_HEAD(&splice);
3717
3718         spin_lock(&root->fs_info->delalloc_lock);
3719         list_splice_init(&root->fs_info->delalloc_inodes, &splice);
3720
3721         while (!list_empty(&splice)) {
3722                 btrfs_inode = list_entry(splice.next, struct btrfs_inode,
3723                                     delalloc_inodes);
3724
3725                 list_del_init(&btrfs_inode->delalloc_inodes);
3726                 clear_bit(BTRFS_INODE_IN_DELALLOC_LIST,
3727                           &btrfs_inode->runtime_flags);
3728
3729                 btrfs_invalidate_inodes(btrfs_inode->root);
3730         }
3731
3732         spin_unlock(&root->fs_info->delalloc_lock);
3733 }
3734
3735 static int btrfs_destroy_marked_extents(struct btrfs_root *root,
3736                                         struct extent_io_tree *dirty_pages,
3737                                         int mark)
3738 {
3739         int ret;
3740         struct extent_buffer *eb;
3741         u64 start = 0;
3742         u64 end;
3743
3744         while (1) {
3745                 ret = find_first_extent_bit(dirty_pages, start, &start, &end,
3746                                             mark, NULL);
3747                 if (ret)
3748                         break;
3749
3750                 clear_extent_bits(dirty_pages, start, end, mark, GFP_NOFS);
3751                 while (start <= end) {
3752                         eb = btrfs_find_tree_block(root, start,
3753                                                    root->leafsize);
3754                         start += eb->len;
3755                         if (!eb)
3756                                 continue;
3757                         wait_on_extent_buffer_writeback(eb);
3758
3759                         if (test_and_clear_bit(EXTENT_BUFFER_DIRTY,
3760                                                &eb->bflags))
3761                                 clear_extent_buffer_dirty(eb);
3762                         free_extent_buffer_stale(eb);
3763                 }
3764         }
3765
3766         return ret;
3767 }
3768
3769 static int btrfs_destroy_pinned_extent(struct btrfs_root *root,
3770                                        struct extent_io_tree *pinned_extents)
3771 {
3772         struct extent_io_tree *unpin;
3773         u64 start;
3774         u64 end;
3775         int ret;
3776         bool loop = true;
3777
3778         unpin = pinned_extents;
3779 again:
3780         while (1) {
3781                 ret = find_first_extent_bit(unpin, 0, &start, &end,
3782                                             EXTENT_DIRTY, NULL);
3783                 if (ret)
3784                         break;
3785
3786                 /* opt_discard */
3787                 if (btrfs_test_opt(root, DISCARD))
3788                         ret = btrfs_error_discard_extent(root, start,
3789                                                          end + 1 - start,
3790                                                          NULL);
3791
3792                 clear_extent_dirty(unpin, start, end, GFP_NOFS);
3793                 btrfs_error_unpin_extent_range(root, start, end);
3794                 cond_resched();
3795         }
3796
3797         if (loop) {
3798                 if (unpin == &root->fs_info->freed_extents[0])
3799                         unpin = &root->fs_info->freed_extents[1];
3800                 else
3801                         unpin = &root->fs_info->freed_extents[0];
3802                 loop = false;
3803                 goto again;
3804         }
3805
3806         return 0;
3807 }
3808
3809 void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
3810                                    struct btrfs_root *root)
3811 {
3812         btrfs_destroy_delayed_refs(cur_trans, root);
3813         btrfs_block_rsv_release(root, &root->fs_info->trans_block_rsv,
3814                                 cur_trans->dirty_pages.dirty_bytes);
3815
3816         /* FIXME: cleanup wait for commit */
3817         cur_trans->in_commit = 1;
3818         cur_trans->blocked = 1;
3819         wake_up(&root->fs_info->transaction_blocked_wait);
3820
3821         btrfs_evict_pending_snapshots(cur_trans);
3822
3823         cur_trans->blocked = 0;
3824         wake_up(&root->fs_info->transaction_wait);
3825
3826         cur_trans->commit_done = 1;
3827         wake_up(&cur_trans->commit_wait);
3828
3829         btrfs_destroy_delayed_inodes(root);
3830         btrfs_assert_delayed_root_empty(root);
3831
3832         btrfs_destroy_marked_extents(root, &cur_trans->dirty_pages,
3833                                      EXTENT_DIRTY);
3834         btrfs_destroy_pinned_extent(root,
3835                                     root->fs_info->pinned_extents);
3836
3837         /*
3838         memset(cur_trans, 0, sizeof(*cur_trans));
3839         kmem_cache_free(btrfs_transaction_cachep, cur_trans);
3840         */
3841 }
3842
3843 static int btrfs_cleanup_transaction(struct btrfs_root *root)
3844 {
3845         struct btrfs_transaction *t;
3846         LIST_HEAD(list);
3847
3848         mutex_lock(&root->fs_info->transaction_kthread_mutex);
3849
3850         spin_lock(&root->fs_info->trans_lock);
3851         list_splice_init(&root->fs_info->trans_list, &list);
3852         root->fs_info->trans_no_join = 1;
3853         spin_unlock(&root->fs_info->trans_lock);
3854
3855         while (!list_empty(&list)) {
3856                 t = list_entry(list.next, struct btrfs_transaction, list);
3857
3858                 btrfs_destroy_ordered_operations(t, root);
3859
3860                 btrfs_destroy_ordered_extents(root);
3861
3862                 btrfs_destroy_delayed_refs(t, root);
3863
3864                 /* FIXME: cleanup wait for commit */
3865                 t->in_commit = 1;
3866                 t->blocked = 1;
3867                 smp_mb();
3868                 if (waitqueue_active(&root->fs_info->transaction_blocked_wait))
3869                         wake_up(&root->fs_info->transaction_blocked_wait);
3870
3871                 btrfs_evict_pending_snapshots(t);
3872
3873                 t->blocked = 0;
3874                 smp_mb();
3875                 if (waitqueue_active(&root->fs_info->transaction_wait))
3876                         wake_up(&root->fs_info->transaction_wait);
3877
3878                 t->commit_done = 1;
3879                 smp_mb();
3880                 if (waitqueue_active(&t->commit_wait))
3881                         wake_up(&t->commit_wait);
3882
3883                 btrfs_destroy_delayed_inodes(root);
3884                 btrfs_assert_delayed_root_empty(root);
3885
3886                 btrfs_destroy_delalloc_inodes(root);
3887
3888                 spin_lock(&root->fs_info->trans_lock);
3889                 root->fs_info->running_transaction = NULL;
3890                 spin_unlock(&root->fs_info->trans_lock);
3891
3892                 btrfs_destroy_marked_extents(root, &t->dirty_pages,
3893                                              EXTENT_DIRTY);
3894
3895                 btrfs_destroy_pinned_extent(root,
3896                                             root->fs_info->pinned_extents);
3897
3898                 atomic_set(&t->use_count, 0);
3899                 list_del_init(&t->list);
3900                 memset(t, 0, sizeof(*t));
3901                 kmem_cache_free(btrfs_transaction_cachep, t);
3902         }
3903
3904         spin_lock(&root->fs_info->trans_lock);
3905         root->fs_info->trans_no_join = 0;
3906         spin_unlock(&root->fs_info->trans_lock);
3907         mutex_unlock(&root->fs_info->transaction_kthread_mutex);
3908
3909         return 0;
3910 }
3911
3912 static struct extent_io_ops btree_extent_io_ops = {
3913         .readpage_end_io_hook = btree_readpage_end_io_hook,
3914         .readpage_io_failed_hook = btree_io_failed_hook,
3915         .submit_bio_hook = btree_submit_bio_hook,
3916         /* note we're sharing with inode.c for the merge bio hook */
3917         .merge_bio_hook = btrfs_merge_bio_hook,
3918 };