Btrfs: ensure an entire eb is written at once
[linux-2.6-block.git] / fs / btrfs / disk-io.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <linux/fs.h>
20 #include <linux/blkdev.h>
21 #include <linux/scatterlist.h>
22 #include <linux/swap.h>
23 #include <linux/radix-tree.h>
24 #include <linux/writeback.h>
25 #include <linux/buffer_head.h>
26 #include <linux/workqueue.h>
27 #include <linux/kthread.h>
28 #include <linux/freezer.h>
29 #include <linux/crc32c.h>
30 #include <linux/slab.h>
31 #include <linux/migrate.h>
32 #include <linux/ratelimit.h>
33 #include <asm/unaligned.h>
34 #include "compat.h"
35 #include "ctree.h"
36 #include "disk-io.h"
37 #include "transaction.h"
38 #include "btrfs_inode.h"
39 #include "volumes.h"
40 #include "print-tree.h"
41 #include "async-thread.h"
42 #include "locking.h"
43 #include "tree-log.h"
44 #include "free-space-cache.h"
45 #include "inode-map.h"
46 #include "check-integrity.h"
47
48 static struct extent_io_ops btree_extent_io_ops;
49 static void end_workqueue_fn(struct btrfs_work *work);
50 static void free_fs_root(struct btrfs_root *root);
51 static void btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
52                                     int read_only);
53 static int btrfs_destroy_ordered_operations(struct btrfs_root *root);
54 static int btrfs_destroy_ordered_extents(struct btrfs_root *root);
55 static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
56                                       struct btrfs_root *root);
57 static int btrfs_destroy_pending_snapshots(struct btrfs_transaction *t);
58 static int btrfs_destroy_delalloc_inodes(struct btrfs_root *root);
59 static int btrfs_destroy_marked_extents(struct btrfs_root *root,
60                                         struct extent_io_tree *dirty_pages,
61                                         int mark);
62 static int btrfs_destroy_pinned_extent(struct btrfs_root *root,
63                                        struct extent_io_tree *pinned_extents);
64 static int btrfs_cleanup_transaction(struct btrfs_root *root);
65
66 /*
67  * end_io_wq structs are used to do processing in task context when an IO is
68  * complete.  This is used during reads to verify checksums, and it is used
69  * by writes to insert metadata for new file extents after IO is complete.
70  */
71 struct end_io_wq {
72         struct bio *bio;
73         bio_end_io_t *end_io;
74         void *private;
75         struct btrfs_fs_info *info;
76         int error;
77         int metadata;
78         struct list_head list;
79         struct btrfs_work work;
80 };
81
82 /*
83  * async submit bios are used to offload expensive checksumming
84  * onto the worker threads.  They checksum file and metadata bios
85  * just before they are sent down the IO stack.
86  */
87 struct async_submit_bio {
88         struct inode *inode;
89         struct bio *bio;
90         struct list_head list;
91         extent_submit_bio_hook_t *submit_bio_start;
92         extent_submit_bio_hook_t *submit_bio_done;
93         int rw;
94         int mirror_num;
95         unsigned long bio_flags;
96         /*
97          * bio_offset is optional, can be used if the pages in the bio
98          * can't tell us where in the file the bio should go
99          */
100         u64 bio_offset;
101         struct btrfs_work work;
102 };
103
104 /*
105  * Lockdep class keys for extent_buffer->lock's in this root.  For a given
106  * eb, the lockdep key is determined by the btrfs_root it belongs to and
107  * the level the eb occupies in the tree.
108  *
109  * Different roots are used for different purposes and may nest inside each
110  * other and they require separate keysets.  As lockdep keys should be
111  * static, assign keysets according to the purpose of the root as indicated
112  * by btrfs_root->objectid.  This ensures that all special purpose roots
113  * have separate keysets.
114  *
115  * Lock-nesting across peer nodes is always done with the immediate parent
116  * node locked thus preventing deadlock.  As lockdep doesn't know this, use
117  * subclass to avoid triggering lockdep warning in such cases.
118  *
119  * The key is set by the readpage_end_io_hook after the buffer has passed
120  * csum validation but before the pages are unlocked.  It is also set by
121  * btrfs_init_new_buffer on freshly allocated blocks.
122  *
123  * We also add a check to make sure the highest level of the tree is the
124  * same as our lockdep setup here.  If BTRFS_MAX_LEVEL changes, this code
125  * needs update as well.
126  */
127 #ifdef CONFIG_DEBUG_LOCK_ALLOC
128 # if BTRFS_MAX_LEVEL != 8
129 #  error
130 # endif
131
132 static struct btrfs_lockdep_keyset {
133         u64                     id;             /* root objectid */
134         const char              *name_stem;     /* lock name stem */
135         char                    names[BTRFS_MAX_LEVEL + 1][20];
136         struct lock_class_key   keys[BTRFS_MAX_LEVEL + 1];
137 } btrfs_lockdep_keysets[] = {
138         { .id = BTRFS_ROOT_TREE_OBJECTID,       .name_stem = "root"     },
139         { .id = BTRFS_EXTENT_TREE_OBJECTID,     .name_stem = "extent"   },
140         { .id = BTRFS_CHUNK_TREE_OBJECTID,      .name_stem = "chunk"    },
141         { .id = BTRFS_DEV_TREE_OBJECTID,        .name_stem = "dev"      },
142         { .id = BTRFS_FS_TREE_OBJECTID,         .name_stem = "fs"       },
143         { .id = BTRFS_CSUM_TREE_OBJECTID,       .name_stem = "csum"     },
144         { .id = BTRFS_ORPHAN_OBJECTID,          .name_stem = "orphan"   },
145         { .id = BTRFS_TREE_LOG_OBJECTID,        .name_stem = "log"      },
146         { .id = BTRFS_TREE_RELOC_OBJECTID,      .name_stem = "treloc"   },
147         { .id = BTRFS_DATA_RELOC_TREE_OBJECTID, .name_stem = "dreloc"   },
148         { .id = 0,                              .name_stem = "tree"     },
149 };
150
151 void __init btrfs_init_lockdep(void)
152 {
153         int i, j;
154
155         /* initialize lockdep class names */
156         for (i = 0; i < ARRAY_SIZE(btrfs_lockdep_keysets); i++) {
157                 struct btrfs_lockdep_keyset *ks = &btrfs_lockdep_keysets[i];
158
159                 for (j = 0; j < ARRAY_SIZE(ks->names); j++)
160                         snprintf(ks->names[j], sizeof(ks->names[j]),
161                                  "btrfs-%s-%02d", ks->name_stem, j);
162         }
163 }
164
165 void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb,
166                                     int level)
167 {
168         struct btrfs_lockdep_keyset *ks;
169
170         BUG_ON(level >= ARRAY_SIZE(ks->keys));
171
172         /* find the matching keyset, id 0 is the default entry */
173         for (ks = btrfs_lockdep_keysets; ks->id; ks++)
174                 if (ks->id == objectid)
175                         break;
176
177         lockdep_set_class_and_name(&eb->lock,
178                                    &ks->keys[level], ks->names[level]);
179 }
180
181 #endif
182
183 /*
184  * extents on the btree inode are pretty simple, there's one extent
185  * that covers the entire device
186  */
187 static struct extent_map *btree_get_extent(struct inode *inode,
188                 struct page *page, size_t pg_offset, u64 start, u64 len,
189                 int create)
190 {
191         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
192         struct extent_map *em;
193         int ret;
194
195         read_lock(&em_tree->lock);
196         em = lookup_extent_mapping(em_tree, start, len);
197         if (em) {
198                 em->bdev =
199                         BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
200                 read_unlock(&em_tree->lock);
201                 goto out;
202         }
203         read_unlock(&em_tree->lock);
204
205         em = alloc_extent_map();
206         if (!em) {
207                 em = ERR_PTR(-ENOMEM);
208                 goto out;
209         }
210         em->start = 0;
211         em->len = (u64)-1;
212         em->block_len = (u64)-1;
213         em->block_start = 0;
214         em->bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
215
216         write_lock(&em_tree->lock);
217         ret = add_extent_mapping(em_tree, em);
218         if (ret == -EEXIST) {
219                 u64 failed_start = em->start;
220                 u64 failed_len = em->len;
221
222                 free_extent_map(em);
223                 em = lookup_extent_mapping(em_tree, start, len);
224                 if (em) {
225                         ret = 0;
226                 } else {
227                         em = lookup_extent_mapping(em_tree, failed_start,
228                                                    failed_len);
229                         ret = -EIO;
230                 }
231         } else if (ret) {
232                 free_extent_map(em);
233                 em = NULL;
234         }
235         write_unlock(&em_tree->lock);
236
237         if (ret)
238                 em = ERR_PTR(ret);
239 out:
240         return em;
241 }
242
243 u32 btrfs_csum_data(struct btrfs_root *root, char *data, u32 seed, size_t len)
244 {
245         return crc32c(seed, data, len);
246 }
247
248 void btrfs_csum_final(u32 crc, char *result)
249 {
250         put_unaligned_le32(~crc, result);
251 }
252
253 /*
254  * compute the csum for a btree block, and either verify it or write it
255  * into the csum field of the block.
256  */
257 static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf,
258                            int verify)
259 {
260         u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
261         char *result = NULL;
262         unsigned long len;
263         unsigned long cur_len;
264         unsigned long offset = BTRFS_CSUM_SIZE;
265         char *kaddr;
266         unsigned long map_start;
267         unsigned long map_len;
268         int err;
269         u32 crc = ~(u32)0;
270         unsigned long inline_result;
271
272         len = buf->len - offset;
273         while (len > 0) {
274                 err = map_private_extent_buffer(buf, offset, 32,
275                                         &kaddr, &map_start, &map_len);
276                 if (err)
277                         return 1;
278                 cur_len = min(len, map_len - (offset - map_start));
279                 crc = btrfs_csum_data(root, kaddr + offset - map_start,
280                                       crc, cur_len);
281                 len -= cur_len;
282                 offset += cur_len;
283         }
284         if (csum_size > sizeof(inline_result)) {
285                 result = kzalloc(csum_size * sizeof(char), GFP_NOFS);
286                 if (!result)
287                         return 1;
288         } else {
289                 result = (char *)&inline_result;
290         }
291
292         btrfs_csum_final(crc, result);
293
294         if (verify) {
295                 if (memcmp_extent_buffer(buf, result, 0, csum_size)) {
296                         u32 val;
297                         u32 found = 0;
298                         memcpy(&found, result, csum_size);
299
300                         read_extent_buffer(buf, &val, 0, csum_size);
301                         printk_ratelimited(KERN_INFO "btrfs: %s checksum verify "
302                                        "failed on %llu wanted %X found %X "
303                                        "level %d\n",
304                                        root->fs_info->sb->s_id,
305                                        (unsigned long long)buf->start, val, found,
306                                        btrfs_header_level(buf));
307                         if (result != (char *)&inline_result)
308                                 kfree(result);
309                         return 1;
310                 }
311         } else {
312                 write_extent_buffer(buf, result, 0, csum_size);
313         }
314         if (result != (char *)&inline_result)
315                 kfree(result);
316         return 0;
317 }
318
319 /*
320  * we can't consider a given block up to date unless the transid of the
321  * block matches the transid in the parent node's pointer.  This is how we
322  * detect blocks that either didn't get written at all or got written
323  * in the wrong place.
324  */
325 static int verify_parent_transid(struct extent_io_tree *io_tree,
326                                  struct extent_buffer *eb, u64 parent_transid)
327 {
328         struct extent_state *cached_state = NULL;
329         int ret;
330
331         if (!parent_transid || btrfs_header_generation(eb) == parent_transid)
332                 return 0;
333
334         lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1,
335                          0, &cached_state, GFP_NOFS);
336         if (extent_buffer_uptodate(eb) &&
337             btrfs_header_generation(eb) == parent_transid) {
338                 ret = 0;
339                 goto out;
340         }
341         printk_ratelimited("parent transid verify failed on %llu wanted %llu "
342                        "found %llu\n",
343                        (unsigned long long)eb->start,
344                        (unsigned long long)parent_transid,
345                        (unsigned long long)btrfs_header_generation(eb));
346         ret = 1;
347         clear_extent_buffer_uptodate(eb);
348 out:
349         unlock_extent_cached(io_tree, eb->start, eb->start + eb->len - 1,
350                              &cached_state, GFP_NOFS);
351         return ret;
352 }
353
354 /*
355  * helper to read a given tree block, doing retries as required when
356  * the checksums don't match and we have alternate mirrors to try.
357  */
358 static int btree_read_extent_buffer_pages(struct btrfs_root *root,
359                                           struct extent_buffer *eb,
360                                           u64 start, u64 parent_transid)
361 {
362         struct extent_io_tree *io_tree;
363         int ret;
364         int num_copies = 0;
365         int mirror_num = 0;
366
367         clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
368         io_tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree;
369         while (1) {
370                 ret = read_extent_buffer_pages(io_tree, eb, start,
371                                                WAIT_COMPLETE,
372                                                btree_get_extent, mirror_num);
373                 if (!ret && !verify_parent_transid(io_tree, eb, parent_transid))
374                         return ret;
375
376                 /*
377                  * This buffer's crc is fine, but its contents are corrupted, so
378                  * there is no reason to read the other copies, they won't be
379                  * any less wrong.
380                  */
381                 if (test_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags))
382                         return ret;
383
384                 num_copies = btrfs_num_copies(&root->fs_info->mapping_tree,
385                                               eb->start, eb->len);
386                 if (num_copies == 1)
387                         return ret;
388
389                 mirror_num++;
390                 if (mirror_num > num_copies)
391                         return ret;
392         }
393         return -EIO;
394 }
395
396 /*
397  * checksum a dirty tree block before IO.  This has extra checks to make sure
398  * we only fill in the checksum field in the first page of a multi-page block
399  */
400
401 static int csum_dirty_buffer(struct btrfs_root *root, struct page *page)
402 {
403         struct extent_io_tree *tree;
404         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
405         u64 found_start;
406         struct extent_buffer *eb;
407
408         tree = &BTRFS_I(page->mapping->host)->io_tree;
409
410         eb = (struct extent_buffer *)page->private;
411         if (page != eb->pages[0])
412                 return 0;
413
414         found_start = btrfs_header_bytenr(eb);
415         if (found_start != start) {
416                 WARN_ON(1);
417                 return 0;
418         }
419         if (eb->pages[0] != page) {
420                 WARN_ON(1);
421                 return 0;
422         }
423         if (!PageUptodate(page)) {
424                 WARN_ON(1);
425                 return 0;
426         }
427         csum_tree_block(root, eb, 0);
428         return 0;
429 }
430
431 static int check_tree_block_fsid(struct btrfs_root *root,
432                                  struct extent_buffer *eb)
433 {
434         struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
435         u8 fsid[BTRFS_UUID_SIZE];
436         int ret = 1;
437
438         read_extent_buffer(eb, fsid, (unsigned long)btrfs_header_fsid(eb),
439                            BTRFS_FSID_SIZE);
440         while (fs_devices) {
441                 if (!memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE)) {
442                         ret = 0;
443                         break;
444                 }
445                 fs_devices = fs_devices->seed;
446         }
447         return ret;
448 }
449
450 #define CORRUPT(reason, eb, root, slot)                         \
451         printk(KERN_CRIT "btrfs: corrupt leaf, %s: block=%llu," \
452                "root=%llu, slot=%d\n", reason,                  \
453                (unsigned long long)btrfs_header_bytenr(eb),     \
454                (unsigned long long)root->objectid, slot)
455
456 static noinline int check_leaf(struct btrfs_root *root,
457                                struct extent_buffer *leaf)
458 {
459         struct btrfs_key key;
460         struct btrfs_key leaf_key;
461         u32 nritems = btrfs_header_nritems(leaf);
462         int slot;
463
464         if (nritems == 0)
465                 return 0;
466
467         /* Check the 0 item */
468         if (btrfs_item_offset_nr(leaf, 0) + btrfs_item_size_nr(leaf, 0) !=
469             BTRFS_LEAF_DATA_SIZE(root)) {
470                 CORRUPT("invalid item offset size pair", leaf, root, 0);
471                 return -EIO;
472         }
473
474         /*
475          * Check to make sure each items keys are in the correct order and their
476          * offsets make sense.  We only have to loop through nritems-1 because
477          * we check the current slot against the next slot, which verifies the
478          * next slot's offset+size makes sense and that the current's slot
479          * offset is correct.
480          */
481         for (slot = 0; slot < nritems - 1; slot++) {
482                 btrfs_item_key_to_cpu(leaf, &leaf_key, slot);
483                 btrfs_item_key_to_cpu(leaf, &key, slot + 1);
484
485                 /* Make sure the keys are in the right order */
486                 if (btrfs_comp_cpu_keys(&leaf_key, &key) >= 0) {
487                         CORRUPT("bad key order", leaf, root, slot);
488                         return -EIO;
489                 }
490
491                 /*
492                  * Make sure the offset and ends are right, remember that the
493                  * item data starts at the end of the leaf and grows towards the
494                  * front.
495                  */
496                 if (btrfs_item_offset_nr(leaf, slot) !=
497                         btrfs_item_end_nr(leaf, slot + 1)) {
498                         CORRUPT("slot offset bad", leaf, root, slot);
499                         return -EIO;
500                 }
501
502                 /*
503                  * Check to make sure that we don't point outside of the leaf,
504                  * just incase all the items are consistent to eachother, but
505                  * all point outside of the leaf.
506                  */
507                 if (btrfs_item_end_nr(leaf, slot) >
508                     BTRFS_LEAF_DATA_SIZE(root)) {
509                         CORRUPT("slot end outside of leaf", leaf, root, slot);
510                         return -EIO;
511                 }
512         }
513
514         return 0;
515 }
516
517 struct extent_buffer *find_eb_for_page(struct extent_io_tree *tree,
518                                        struct page *page, int max_walk)
519 {
520         struct extent_buffer *eb;
521         u64 start = page_offset(page);
522         u64 target = start;
523         u64 min_start;
524
525         if (start < max_walk)
526                 min_start = 0;
527         else
528                 min_start = start - max_walk;
529
530         while (start >= min_start) {
531                 eb = find_extent_buffer(tree, start, 0);
532                 if (eb) {
533                         /*
534                          * we found an extent buffer and it contains our page
535                          * horray!
536                          */
537                         if (eb->start <= target &&
538                             eb->start + eb->len > target)
539                                 return eb;
540
541                         /* we found an extent buffer that wasn't for us */
542                         free_extent_buffer(eb);
543                         return NULL;
544                 }
545                 if (start == 0)
546                         break;
547                 start -= PAGE_CACHE_SIZE;
548         }
549         return NULL;
550 }
551
552 static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
553                                struct extent_state *state)
554 {
555         struct extent_io_tree *tree;
556         u64 found_start;
557         int found_level;
558         struct extent_buffer *eb;
559         struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
560         int ret = 0;
561         int reads_done;
562
563         if (!page->private)
564                 goto out;
565
566         tree = &BTRFS_I(page->mapping->host)->io_tree;
567         eb = (struct extent_buffer *)page->private;
568
569         /* the pending IO might have been the only thing that kept this buffer
570          * in memory.  Make sure we have a ref for all this other checks
571          */
572         extent_buffer_get(eb);
573
574         reads_done = atomic_dec_and_test(&eb->io_pages);
575         if (!reads_done)
576                 goto err;
577
578         found_start = btrfs_header_bytenr(eb);
579         if (found_start != eb->start) {
580                 printk_ratelimited(KERN_INFO "btrfs bad tree block start "
581                                "%llu %llu\n",
582                                (unsigned long long)found_start,
583                                (unsigned long long)eb->start);
584                 ret = -EIO;
585                 goto err;
586         }
587         if (check_tree_block_fsid(root, eb)) {
588                 printk_ratelimited(KERN_INFO "btrfs bad fsid on block %llu\n",
589                                (unsigned long long)eb->start);
590                 ret = -EIO;
591                 goto err;
592         }
593         found_level = btrfs_header_level(eb);
594
595         btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb),
596                                        eb, found_level);
597
598         ret = csum_tree_block(root, eb, 1);
599         if (ret) {
600                 ret = -EIO;
601                 goto err;
602         }
603
604         /*
605          * If this is a leaf block and it is corrupt, set the corrupt bit so
606          * that we don't try and read the other copies of this block, just
607          * return -EIO.
608          */
609         if (found_level == 0 && check_leaf(root, eb)) {
610                 set_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
611                 ret = -EIO;
612         }
613
614         if (!ret)
615                 set_extent_buffer_uptodate(eb);
616 err:
617         if (test_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags)) {
618                 clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags);
619                 btree_readahead_hook(root, eb, eb->start, ret);
620         }
621
622         if (ret)
623                 clear_extent_buffer_uptodate(eb);
624         free_extent_buffer(eb);
625 out:
626         return ret;
627 }
628
629 static int btree_io_failed_hook(struct bio *failed_bio,
630                          struct page *page, u64 start, u64 end,
631                          int mirror_num, struct extent_state *state)
632 {
633         struct extent_buffer *eb;
634         struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
635
636         eb = (struct extent_buffer *)page->private;
637         if (page != eb->pages[0])
638                 return -EIO;
639
640         if (test_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags)) {
641                 clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags);
642                 btree_readahead_hook(root, eb, eb->start, -EIO);
643         }
644         return -EIO;    /* we fixed nothing */
645 }
646
647 static void end_workqueue_bio(struct bio *bio, int err)
648 {
649         struct end_io_wq *end_io_wq = bio->bi_private;
650         struct btrfs_fs_info *fs_info;
651
652         fs_info = end_io_wq->info;
653         end_io_wq->error = err;
654         end_io_wq->work.func = end_workqueue_fn;
655         end_io_wq->work.flags = 0;
656
657         if (bio->bi_rw & REQ_WRITE) {
658                 if (end_io_wq->metadata == 1)
659                         btrfs_queue_worker(&fs_info->endio_meta_write_workers,
660                                            &end_io_wq->work);
661                 else if (end_io_wq->metadata == 2)
662                         btrfs_queue_worker(&fs_info->endio_freespace_worker,
663                                            &end_io_wq->work);
664                 else
665                         btrfs_queue_worker(&fs_info->endio_write_workers,
666                                            &end_io_wq->work);
667         } else {
668                 if (end_io_wq->metadata)
669                         btrfs_queue_worker(&fs_info->endio_meta_workers,
670                                            &end_io_wq->work);
671                 else
672                         btrfs_queue_worker(&fs_info->endio_workers,
673                                            &end_io_wq->work);
674         }
675 }
676
677 /*
678  * For the metadata arg you want
679  *
680  * 0 - if data
681  * 1 - if normal metadta
682  * 2 - if writing to the free space cache area
683  */
684 int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
685                         int metadata)
686 {
687         struct end_io_wq *end_io_wq;
688         end_io_wq = kmalloc(sizeof(*end_io_wq), GFP_NOFS);
689         if (!end_io_wq)
690                 return -ENOMEM;
691
692         end_io_wq->private = bio->bi_private;
693         end_io_wq->end_io = bio->bi_end_io;
694         end_io_wq->info = info;
695         end_io_wq->error = 0;
696         end_io_wq->bio = bio;
697         end_io_wq->metadata = metadata;
698
699         bio->bi_private = end_io_wq;
700         bio->bi_end_io = end_workqueue_bio;
701         return 0;
702 }
703
704 unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info)
705 {
706         unsigned long limit = min_t(unsigned long,
707                                     info->workers.max_workers,
708                                     info->fs_devices->open_devices);
709         return 256 * limit;
710 }
711
712 static void run_one_async_start(struct btrfs_work *work)
713 {
714         struct async_submit_bio *async;
715
716         async = container_of(work, struct  async_submit_bio, work);
717         async->submit_bio_start(async->inode, async->rw, async->bio,
718                                async->mirror_num, async->bio_flags,
719                                async->bio_offset);
720 }
721
722 static void run_one_async_done(struct btrfs_work *work)
723 {
724         struct btrfs_fs_info *fs_info;
725         struct async_submit_bio *async;
726         int limit;
727
728         async = container_of(work, struct  async_submit_bio, work);
729         fs_info = BTRFS_I(async->inode)->root->fs_info;
730
731         limit = btrfs_async_submit_limit(fs_info);
732         limit = limit * 2 / 3;
733
734         atomic_dec(&fs_info->nr_async_submits);
735
736         if (atomic_read(&fs_info->nr_async_submits) < limit &&
737             waitqueue_active(&fs_info->async_submit_wait))
738                 wake_up(&fs_info->async_submit_wait);
739
740         async->submit_bio_done(async->inode, async->rw, async->bio,
741                                async->mirror_num, async->bio_flags,
742                                async->bio_offset);
743 }
744
745 static void run_one_async_free(struct btrfs_work *work)
746 {
747         struct async_submit_bio *async;
748
749         async = container_of(work, struct  async_submit_bio, work);
750         kfree(async);
751 }
752
753 int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
754                         int rw, struct bio *bio, int mirror_num,
755                         unsigned long bio_flags,
756                         u64 bio_offset,
757                         extent_submit_bio_hook_t *submit_bio_start,
758                         extent_submit_bio_hook_t *submit_bio_done)
759 {
760         struct async_submit_bio *async;
761
762         async = kmalloc(sizeof(*async), GFP_NOFS);
763         if (!async)
764                 return -ENOMEM;
765
766         async->inode = inode;
767         async->rw = rw;
768         async->bio = bio;
769         async->mirror_num = mirror_num;
770         async->submit_bio_start = submit_bio_start;
771         async->submit_bio_done = submit_bio_done;
772
773         async->work.func = run_one_async_start;
774         async->work.ordered_func = run_one_async_done;
775         async->work.ordered_free = run_one_async_free;
776
777         async->work.flags = 0;
778         async->bio_flags = bio_flags;
779         async->bio_offset = bio_offset;
780
781         atomic_inc(&fs_info->nr_async_submits);
782
783         if (rw & REQ_SYNC)
784                 btrfs_set_work_high_prio(&async->work);
785
786         btrfs_queue_worker(&fs_info->workers, &async->work);
787
788         while (atomic_read(&fs_info->async_submit_draining) &&
789               atomic_read(&fs_info->nr_async_submits)) {
790                 wait_event(fs_info->async_submit_wait,
791                            (atomic_read(&fs_info->nr_async_submits) == 0));
792         }
793
794         return 0;
795 }
796
797 static int btree_csum_one_bio(struct bio *bio)
798 {
799         struct bio_vec *bvec = bio->bi_io_vec;
800         int bio_index = 0;
801         struct btrfs_root *root;
802
803         WARN_ON(bio->bi_vcnt <= 0);
804         while (bio_index < bio->bi_vcnt) {
805                 root = BTRFS_I(bvec->bv_page->mapping->host)->root;
806                 csum_dirty_buffer(root, bvec->bv_page);
807                 bio_index++;
808                 bvec++;
809         }
810         return 0;
811 }
812
813 static int __btree_submit_bio_start(struct inode *inode, int rw,
814                                     struct bio *bio, int mirror_num,
815                                     unsigned long bio_flags,
816                                     u64 bio_offset)
817 {
818         /*
819          * when we're called for a write, we're already in the async
820          * submission context.  Just jump into btrfs_map_bio
821          */
822         btree_csum_one_bio(bio);
823         return 0;
824 }
825
826 static int __btree_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
827                                  int mirror_num, unsigned long bio_flags,
828                                  u64 bio_offset)
829 {
830         /*
831          * when we're called for a write, we're already in the async
832          * submission context.  Just jump into btrfs_map_bio
833          */
834         return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, mirror_num, 1);
835 }
836
837 static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
838                                  int mirror_num, unsigned long bio_flags,
839                                  u64 bio_offset)
840 {
841         int ret;
842
843         ret = btrfs_bio_wq_end_io(BTRFS_I(inode)->root->fs_info,
844                                           bio, 1);
845         BUG_ON(ret);
846
847         if (!(rw & REQ_WRITE)) {
848                 /*
849                  * called for a read, do the setup so that checksum validation
850                  * can happen in the async kernel threads
851                  */
852                 return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio,
853                                      mirror_num, 0);
854         }
855
856         /*
857          * kthread helpers are used to submit writes so that checksumming
858          * can happen in parallel across all CPUs
859          */
860         return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
861                                    inode, rw, bio, mirror_num, 0,
862                                    bio_offset,
863                                    __btree_submit_bio_start,
864                                    __btree_submit_bio_done);
865 }
866
867 #ifdef CONFIG_MIGRATION
868 static int btree_migratepage(struct address_space *mapping,
869                         struct page *newpage, struct page *page,
870                         enum migrate_mode mode)
871 {
872         /*
873          * we can't safely write a btree page from here,
874          * we haven't done the locking hook
875          */
876         if (PageDirty(page))
877                 return -EAGAIN;
878         /*
879          * Buffers may be managed in a filesystem specific way.
880          * We must have no buffers or drop them.
881          */
882         if (page_has_private(page) &&
883             !try_to_release_page(page, GFP_KERNEL))
884                 return -EAGAIN;
885         return migrate_page(mapping, newpage, page, mode);
886 }
887 #endif
888
889
890 static int btree_writepages(struct address_space *mapping,
891                             struct writeback_control *wbc)
892 {
893         struct extent_io_tree *tree;
894         tree = &BTRFS_I(mapping->host)->io_tree;
895         if (wbc->sync_mode == WB_SYNC_NONE) {
896                 struct btrfs_root *root = BTRFS_I(mapping->host)->root;
897                 u64 num_dirty;
898                 unsigned long thresh = 32 * 1024 * 1024;
899
900                 if (wbc->for_kupdate)
901                         return 0;
902
903                 /* this is a bit racy, but that's ok */
904                 num_dirty = root->fs_info->dirty_metadata_bytes;
905                 if (num_dirty < thresh)
906                         return 0;
907         }
908         return btree_write_cache_pages(mapping, wbc);
909 }
910
911 static int btree_readpage(struct file *file, struct page *page)
912 {
913         struct extent_io_tree *tree;
914         tree = &BTRFS_I(page->mapping->host)->io_tree;
915         return extent_read_full_page(tree, page, btree_get_extent, 0);
916 }
917
918 static int btree_releasepage(struct page *page, gfp_t gfp_flags)
919 {
920         if (PageWriteback(page) || PageDirty(page))
921                 return 0;
922         /*
923          * We need to mask out eg. __GFP_HIGHMEM and __GFP_DMA32 as we're doing
924          * slab allocation from alloc_extent_state down the callchain where
925          * it'd hit a BUG_ON as those flags are not allowed.
926          */
927         gfp_flags &= ~GFP_SLAB_BUG_MASK;
928
929         return try_release_extent_buffer(page, gfp_flags);
930 }
931
932 static void btree_invalidatepage(struct page *page, unsigned long offset)
933 {
934         struct extent_io_tree *tree;
935         tree = &BTRFS_I(page->mapping->host)->io_tree;
936         extent_invalidatepage(tree, page, offset);
937         btree_releasepage(page, GFP_NOFS);
938         if (PagePrivate(page)) {
939                 printk(KERN_WARNING "btrfs warning page private not zero "
940                        "on page %llu\n", (unsigned long long)page_offset(page));
941                 ClearPagePrivate(page);
942                 set_page_private(page, 0);
943                 page_cache_release(page);
944         }
945 }
946
947 static int btree_set_page_dirty(struct page *page)
948 {
949         struct extent_buffer *eb;
950
951         BUG_ON(!PagePrivate(page));
952         eb = (struct extent_buffer *)page->private;
953         BUG_ON(!eb);
954         BUG_ON(!test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
955         BUG_ON(!atomic_read(&eb->refs));
956         btrfs_assert_tree_locked(eb);
957         return __set_page_dirty_nobuffers(page);
958 }
959
960 static const struct address_space_operations btree_aops = {
961         .readpage       = btree_readpage,
962         .writepages     = btree_writepages,
963         .releasepage    = btree_releasepage,
964         .invalidatepage = btree_invalidatepage,
965 #ifdef CONFIG_MIGRATION
966         .migratepage    = btree_migratepage,
967 #endif
968         .set_page_dirty = btree_set_page_dirty,
969 };
970
971 int readahead_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize,
972                          u64 parent_transid)
973 {
974         struct extent_buffer *buf = NULL;
975         struct inode *btree_inode = root->fs_info->btree_inode;
976         int ret = 0;
977
978         buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
979         if (!buf)
980                 return 0;
981         read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree,
982                                  buf, 0, WAIT_NONE, btree_get_extent, 0);
983         free_extent_buffer(buf);
984         return ret;
985 }
986
987 int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr, u32 blocksize,
988                          int mirror_num, struct extent_buffer **eb)
989 {
990         struct extent_buffer *buf = NULL;
991         struct inode *btree_inode = root->fs_info->btree_inode;
992         struct extent_io_tree *io_tree = &BTRFS_I(btree_inode)->io_tree;
993         int ret;
994
995         buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
996         if (!buf)
997                 return 0;
998
999         set_bit(EXTENT_BUFFER_READAHEAD, &buf->bflags);
1000
1001         ret = read_extent_buffer_pages(io_tree, buf, 0, WAIT_PAGE_LOCK,
1002                                        btree_get_extent, mirror_num);
1003         if (ret) {
1004                 free_extent_buffer(buf);
1005                 return ret;
1006         }
1007
1008         if (test_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags)) {
1009                 free_extent_buffer(buf);
1010                 return -EIO;
1011         } else if (extent_buffer_uptodate(buf)) {
1012                 *eb = buf;
1013         } else {
1014                 free_extent_buffer(buf);
1015         }
1016         return 0;
1017 }
1018
1019 struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root,
1020                                             u64 bytenr, u32 blocksize)
1021 {
1022         struct inode *btree_inode = root->fs_info->btree_inode;
1023         struct extent_buffer *eb;
1024         eb = find_extent_buffer(&BTRFS_I(btree_inode)->io_tree,
1025                                 bytenr, blocksize);
1026         return eb;
1027 }
1028
1029 struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
1030                                                  u64 bytenr, u32 blocksize)
1031 {
1032         struct inode *btree_inode = root->fs_info->btree_inode;
1033         struct extent_buffer *eb;
1034
1035         eb = alloc_extent_buffer(&BTRFS_I(btree_inode)->io_tree,
1036                                  bytenr, blocksize);
1037         return eb;
1038 }
1039
1040
1041 int btrfs_write_tree_block(struct extent_buffer *buf)
1042 {
1043         return filemap_fdatawrite_range(buf->pages[0]->mapping, buf->start,
1044                                         buf->start + buf->len - 1);
1045 }
1046
1047 int btrfs_wait_tree_block_writeback(struct extent_buffer *buf)
1048 {
1049         return filemap_fdatawait_range(buf->pages[0]->mapping,
1050                                        buf->start, buf->start + buf->len - 1);
1051 }
1052
1053 struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
1054                                       u32 blocksize, u64 parent_transid)
1055 {
1056         struct extent_buffer *buf = NULL;
1057         int ret;
1058
1059         buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
1060         if (!buf)
1061                 return NULL;
1062
1063         ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
1064         return buf;
1065
1066 }
1067
1068 int clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root,
1069                      struct extent_buffer *buf)
1070 {
1071         if (btrfs_header_generation(buf) ==
1072             root->fs_info->running_transaction->transid) {
1073                 btrfs_assert_tree_locked(buf);
1074
1075                 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)) {
1076                         spin_lock(&root->fs_info->delalloc_lock);
1077                         if (root->fs_info->dirty_metadata_bytes >= buf->len)
1078                                 root->fs_info->dirty_metadata_bytes -= buf->len;
1079                         else
1080                                 WARN_ON(1);
1081                         spin_unlock(&root->fs_info->delalloc_lock);
1082                 }
1083
1084                 /* ugh, clear_extent_buffer_dirty needs to lock the page */
1085                 btrfs_set_lock_blocking(buf);
1086                 clear_extent_buffer_dirty(buf);
1087         }
1088         return 0;
1089 }
1090
1091 static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
1092                         u32 stripesize, struct btrfs_root *root,
1093                         struct btrfs_fs_info *fs_info,
1094                         u64 objectid)
1095 {
1096         root->node = NULL;
1097         root->commit_root = NULL;
1098         root->sectorsize = sectorsize;
1099         root->nodesize = nodesize;
1100         root->leafsize = leafsize;
1101         root->stripesize = stripesize;
1102         root->ref_cows = 0;
1103         root->track_dirty = 0;
1104         root->in_radix = 0;
1105         root->orphan_item_inserted = 0;
1106         root->orphan_cleanup_state = 0;
1107
1108         root->objectid = objectid;
1109         root->last_trans = 0;
1110         root->highest_objectid = 0;
1111         root->name = NULL;
1112         root->inode_tree = RB_ROOT;
1113         INIT_RADIX_TREE(&root->delayed_nodes_tree, GFP_ATOMIC);
1114         root->block_rsv = NULL;
1115         root->orphan_block_rsv = NULL;
1116
1117         INIT_LIST_HEAD(&root->dirty_list);
1118         INIT_LIST_HEAD(&root->orphan_list);
1119         INIT_LIST_HEAD(&root->root_list);
1120         spin_lock_init(&root->orphan_lock);
1121         spin_lock_init(&root->inode_lock);
1122         spin_lock_init(&root->accounting_lock);
1123         mutex_init(&root->objectid_mutex);
1124         mutex_init(&root->log_mutex);
1125         init_waitqueue_head(&root->log_writer_wait);
1126         init_waitqueue_head(&root->log_commit_wait[0]);
1127         init_waitqueue_head(&root->log_commit_wait[1]);
1128         atomic_set(&root->log_commit[0], 0);
1129         atomic_set(&root->log_commit[1], 0);
1130         atomic_set(&root->log_writers, 0);
1131         root->log_batch = 0;
1132         root->log_transid = 0;
1133         root->last_log_commit = 0;
1134         extent_io_tree_init(&root->dirty_log_pages,
1135                              fs_info->btree_inode->i_mapping);
1136
1137         memset(&root->root_key, 0, sizeof(root->root_key));
1138         memset(&root->root_item, 0, sizeof(root->root_item));
1139         memset(&root->defrag_progress, 0, sizeof(root->defrag_progress));
1140         memset(&root->root_kobj, 0, sizeof(root->root_kobj));
1141         root->defrag_trans_start = fs_info->generation;
1142         init_completion(&root->kobj_unregister);
1143         root->defrag_running = 0;
1144         root->root_key.objectid = objectid;
1145         root->anon_dev = 0;
1146         return 0;
1147 }
1148
1149 static int find_and_setup_root(struct btrfs_root *tree_root,
1150                                struct btrfs_fs_info *fs_info,
1151                                u64 objectid,
1152                                struct btrfs_root *root)
1153 {
1154         int ret;
1155         u32 blocksize;
1156         u64 generation;
1157
1158         __setup_root(tree_root->nodesize, tree_root->leafsize,
1159                      tree_root->sectorsize, tree_root->stripesize,
1160                      root, fs_info, objectid);
1161         ret = btrfs_find_last_root(tree_root, objectid,
1162                                    &root->root_item, &root->root_key);
1163         if (ret > 0)
1164                 return -ENOENT;
1165         BUG_ON(ret);
1166
1167         generation = btrfs_root_generation(&root->root_item);
1168         blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item));
1169         root->commit_root = NULL;
1170         root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
1171                                      blocksize, generation);
1172         if (!root->node || !btrfs_buffer_uptodate(root->node, generation)) {
1173                 free_extent_buffer(root->node);
1174                 root->node = NULL;
1175                 return -EIO;
1176         }
1177         root->commit_root = btrfs_root_node(root);
1178         return 0;
1179 }
1180
1181 static struct btrfs_root *btrfs_alloc_root(struct btrfs_fs_info *fs_info)
1182 {
1183         struct btrfs_root *root = kzalloc(sizeof(*root), GFP_NOFS);
1184         if (root)
1185                 root->fs_info = fs_info;
1186         return root;
1187 }
1188
1189 static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans,
1190                                          struct btrfs_fs_info *fs_info)
1191 {
1192         struct btrfs_root *root;
1193         struct btrfs_root *tree_root = fs_info->tree_root;
1194         struct extent_buffer *leaf;
1195
1196         root = btrfs_alloc_root(fs_info);
1197         if (!root)
1198                 return ERR_PTR(-ENOMEM);
1199
1200         __setup_root(tree_root->nodesize, tree_root->leafsize,
1201                      tree_root->sectorsize, tree_root->stripesize,
1202                      root, fs_info, BTRFS_TREE_LOG_OBJECTID);
1203
1204         root->root_key.objectid = BTRFS_TREE_LOG_OBJECTID;
1205         root->root_key.type = BTRFS_ROOT_ITEM_KEY;
1206         root->root_key.offset = BTRFS_TREE_LOG_OBJECTID;
1207         /*
1208          * log trees do not get reference counted because they go away
1209          * before a real commit is actually done.  They do store pointers
1210          * to file data extents, and those reference counts still get
1211          * updated (along with back refs to the log tree).
1212          */
1213         root->ref_cows = 0;
1214
1215         leaf = btrfs_alloc_free_block(trans, root, root->leafsize, 0,
1216                                       BTRFS_TREE_LOG_OBJECTID, NULL,
1217                                       0, 0, 0, 0);
1218         if (IS_ERR(leaf)) {
1219                 kfree(root);
1220                 return ERR_CAST(leaf);
1221         }
1222
1223         memset_extent_buffer(leaf, 0, 0, sizeof(struct btrfs_header));
1224         btrfs_set_header_bytenr(leaf, leaf->start);
1225         btrfs_set_header_generation(leaf, trans->transid);
1226         btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV);
1227         btrfs_set_header_owner(leaf, BTRFS_TREE_LOG_OBJECTID);
1228         root->node = leaf;
1229
1230         write_extent_buffer(root->node, root->fs_info->fsid,
1231                             (unsigned long)btrfs_header_fsid(root->node),
1232                             BTRFS_FSID_SIZE);
1233         btrfs_mark_buffer_dirty(root->node);
1234         btrfs_tree_unlock(root->node);
1235         return root;
1236 }
1237
1238 int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
1239                              struct btrfs_fs_info *fs_info)
1240 {
1241         struct btrfs_root *log_root;
1242
1243         log_root = alloc_log_tree(trans, fs_info);
1244         if (IS_ERR(log_root))
1245                 return PTR_ERR(log_root);
1246         WARN_ON(fs_info->log_root_tree);
1247         fs_info->log_root_tree = log_root;
1248         return 0;
1249 }
1250
1251 int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
1252                        struct btrfs_root *root)
1253 {
1254         struct btrfs_root *log_root;
1255         struct btrfs_inode_item *inode_item;
1256
1257         log_root = alloc_log_tree(trans, root->fs_info);
1258         if (IS_ERR(log_root))
1259                 return PTR_ERR(log_root);
1260
1261         log_root->last_trans = trans->transid;
1262         log_root->root_key.offset = root->root_key.objectid;
1263
1264         inode_item = &log_root->root_item.inode;
1265         inode_item->generation = cpu_to_le64(1);
1266         inode_item->size = cpu_to_le64(3);
1267         inode_item->nlink = cpu_to_le32(1);
1268         inode_item->nbytes = cpu_to_le64(root->leafsize);
1269         inode_item->mode = cpu_to_le32(S_IFDIR | 0755);
1270
1271         btrfs_set_root_node(&log_root->root_item, log_root->node);
1272
1273         WARN_ON(root->log_root);
1274         root->log_root = log_root;
1275         root->log_transid = 0;
1276         root->last_log_commit = 0;
1277         return 0;
1278 }
1279
1280 struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root,
1281                                                struct btrfs_key *location)
1282 {
1283         struct btrfs_root *root;
1284         struct btrfs_fs_info *fs_info = tree_root->fs_info;
1285         struct btrfs_path *path;
1286         struct extent_buffer *l;
1287         u64 generation;
1288         u32 blocksize;
1289         int ret = 0;
1290
1291         root = btrfs_alloc_root(fs_info);
1292         if (!root)
1293                 return ERR_PTR(-ENOMEM);
1294         if (location->offset == (u64)-1) {
1295                 ret = find_and_setup_root(tree_root, fs_info,
1296                                           location->objectid, root);
1297                 if (ret) {
1298                         kfree(root);
1299                         return ERR_PTR(ret);
1300                 }
1301                 goto out;
1302         }
1303
1304         __setup_root(tree_root->nodesize, tree_root->leafsize,
1305                      tree_root->sectorsize, tree_root->stripesize,
1306                      root, fs_info, location->objectid);
1307
1308         path = btrfs_alloc_path();
1309         if (!path) {
1310                 kfree(root);
1311                 return ERR_PTR(-ENOMEM);
1312         }
1313         ret = btrfs_search_slot(NULL, tree_root, location, path, 0, 0);
1314         if (ret == 0) {
1315                 l = path->nodes[0];
1316                 read_extent_buffer(l, &root->root_item,
1317                                 btrfs_item_ptr_offset(l, path->slots[0]),
1318                                 sizeof(root->root_item));
1319                 memcpy(&root->root_key, location, sizeof(*location));
1320         }
1321         btrfs_free_path(path);
1322         if (ret) {
1323                 kfree(root);
1324                 if (ret > 0)
1325                         ret = -ENOENT;
1326                 return ERR_PTR(ret);
1327         }
1328
1329         generation = btrfs_root_generation(&root->root_item);
1330         blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item));
1331         root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
1332                                      blocksize, generation);
1333         root->commit_root = btrfs_root_node(root);
1334         BUG_ON(!root->node);
1335 out:
1336         if (location->objectid != BTRFS_TREE_LOG_OBJECTID) {
1337                 root->ref_cows = 1;
1338                 btrfs_check_and_init_root_item(&root->root_item);
1339         }
1340
1341         return root;
1342 }
1343
1344 struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info,
1345                                               struct btrfs_key *location)
1346 {
1347         struct btrfs_root *root;
1348         int ret;
1349
1350         if (location->objectid == BTRFS_ROOT_TREE_OBJECTID)
1351                 return fs_info->tree_root;
1352         if (location->objectid == BTRFS_EXTENT_TREE_OBJECTID)
1353                 return fs_info->extent_root;
1354         if (location->objectid == BTRFS_CHUNK_TREE_OBJECTID)
1355                 return fs_info->chunk_root;
1356         if (location->objectid == BTRFS_DEV_TREE_OBJECTID)
1357                 return fs_info->dev_root;
1358         if (location->objectid == BTRFS_CSUM_TREE_OBJECTID)
1359                 return fs_info->csum_root;
1360 again:
1361         spin_lock(&fs_info->fs_roots_radix_lock);
1362         root = radix_tree_lookup(&fs_info->fs_roots_radix,
1363                                  (unsigned long)location->objectid);
1364         spin_unlock(&fs_info->fs_roots_radix_lock);
1365         if (root)
1366                 return root;
1367
1368         root = btrfs_read_fs_root_no_radix(fs_info->tree_root, location);
1369         if (IS_ERR(root))
1370                 return root;
1371
1372         root->free_ino_ctl = kzalloc(sizeof(*root->free_ino_ctl), GFP_NOFS);
1373         root->free_ino_pinned = kzalloc(sizeof(*root->free_ino_pinned),
1374                                         GFP_NOFS);
1375         if (!root->free_ino_pinned || !root->free_ino_ctl) {
1376                 ret = -ENOMEM;
1377                 goto fail;
1378         }
1379
1380         btrfs_init_free_ino_ctl(root);
1381         mutex_init(&root->fs_commit_mutex);
1382         spin_lock_init(&root->cache_lock);
1383         init_waitqueue_head(&root->cache_wait);
1384
1385         ret = get_anon_bdev(&root->anon_dev);
1386         if (ret)
1387                 goto fail;
1388
1389         if (btrfs_root_refs(&root->root_item) == 0) {
1390                 ret = -ENOENT;
1391                 goto fail;
1392         }
1393
1394         ret = btrfs_find_orphan_item(fs_info->tree_root, location->objectid);
1395         if (ret < 0)
1396                 goto fail;
1397         if (ret == 0)
1398                 root->orphan_item_inserted = 1;
1399
1400         ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
1401         if (ret)
1402                 goto fail;
1403
1404         spin_lock(&fs_info->fs_roots_radix_lock);
1405         ret = radix_tree_insert(&fs_info->fs_roots_radix,
1406                                 (unsigned long)root->root_key.objectid,
1407                                 root);
1408         if (ret == 0)
1409                 root->in_radix = 1;
1410
1411         spin_unlock(&fs_info->fs_roots_radix_lock);
1412         radix_tree_preload_end();
1413         if (ret) {
1414                 if (ret == -EEXIST) {
1415                         free_fs_root(root);
1416                         goto again;
1417                 }
1418                 goto fail;
1419         }
1420
1421         ret = btrfs_find_dead_roots(fs_info->tree_root,
1422                                     root->root_key.objectid);
1423         WARN_ON(ret);
1424         return root;
1425 fail:
1426         free_fs_root(root);
1427         return ERR_PTR(ret);
1428 }
1429
1430 static int btrfs_congested_fn(void *congested_data, int bdi_bits)
1431 {
1432         struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data;
1433         int ret = 0;
1434         struct btrfs_device *device;
1435         struct backing_dev_info *bdi;
1436
1437         rcu_read_lock();
1438         list_for_each_entry_rcu(device, &info->fs_devices->devices, dev_list) {
1439                 if (!device->bdev)
1440                         continue;
1441                 bdi = blk_get_backing_dev_info(device->bdev);
1442                 if (bdi && bdi_congested(bdi, bdi_bits)) {
1443                         ret = 1;
1444                         break;
1445                 }
1446         }
1447         rcu_read_unlock();
1448         return ret;
1449 }
1450
1451 /*
1452  * If this fails, caller must call bdi_destroy() to get rid of the
1453  * bdi again.
1454  */
1455 static int setup_bdi(struct btrfs_fs_info *info, struct backing_dev_info *bdi)
1456 {
1457         int err;
1458
1459         bdi->capabilities = BDI_CAP_MAP_COPY;
1460         err = bdi_setup_and_register(bdi, "btrfs", BDI_CAP_MAP_COPY);
1461         if (err)
1462                 return err;
1463
1464         bdi->ra_pages   = default_backing_dev_info.ra_pages;
1465         bdi->congested_fn       = btrfs_congested_fn;
1466         bdi->congested_data     = info;
1467         return 0;
1468 }
1469
1470 /*
1471  * called by the kthread helper functions to finally call the bio end_io
1472  * functions.  This is where read checksum verification actually happens
1473  */
1474 static void end_workqueue_fn(struct btrfs_work *work)
1475 {
1476         struct bio *bio;
1477         struct end_io_wq *end_io_wq;
1478         struct btrfs_fs_info *fs_info;
1479         int error;
1480
1481         end_io_wq = container_of(work, struct end_io_wq, work);
1482         bio = end_io_wq->bio;
1483         fs_info = end_io_wq->info;
1484
1485         error = end_io_wq->error;
1486         bio->bi_private = end_io_wq->private;
1487         bio->bi_end_io = end_io_wq->end_io;
1488         kfree(end_io_wq);
1489         bio_endio(bio, error);
1490 }
1491
1492 static int cleaner_kthread(void *arg)
1493 {
1494         struct btrfs_root *root = arg;
1495
1496         do {
1497                 vfs_check_frozen(root->fs_info->sb, SB_FREEZE_WRITE);
1498
1499                 if (!(root->fs_info->sb->s_flags & MS_RDONLY) &&
1500                     mutex_trylock(&root->fs_info->cleaner_mutex)) {
1501                         btrfs_run_delayed_iputs(root);
1502                         btrfs_clean_old_snapshots(root);
1503                         mutex_unlock(&root->fs_info->cleaner_mutex);
1504                         btrfs_run_defrag_inodes(root->fs_info);
1505                 }
1506
1507                 if (!try_to_freeze()) {
1508                         set_current_state(TASK_INTERRUPTIBLE);
1509                         if (!kthread_should_stop())
1510                                 schedule();
1511                         __set_current_state(TASK_RUNNING);
1512                 }
1513         } while (!kthread_should_stop());
1514         return 0;
1515 }
1516
1517 static int transaction_kthread(void *arg)
1518 {
1519         struct btrfs_root *root = arg;
1520         struct btrfs_trans_handle *trans;
1521         struct btrfs_transaction *cur;
1522         u64 transid;
1523         unsigned long now;
1524         unsigned long delay;
1525         int ret;
1526
1527         do {
1528                 delay = HZ * 30;
1529                 vfs_check_frozen(root->fs_info->sb, SB_FREEZE_WRITE);
1530                 mutex_lock(&root->fs_info->transaction_kthread_mutex);
1531
1532                 spin_lock(&root->fs_info->trans_lock);
1533                 cur = root->fs_info->running_transaction;
1534                 if (!cur) {
1535                         spin_unlock(&root->fs_info->trans_lock);
1536                         goto sleep;
1537                 }
1538
1539                 now = get_seconds();
1540                 if (!cur->blocked &&
1541                     (now < cur->start_time || now - cur->start_time < 30)) {
1542                         spin_unlock(&root->fs_info->trans_lock);
1543                         delay = HZ * 5;
1544                         goto sleep;
1545                 }
1546                 transid = cur->transid;
1547                 spin_unlock(&root->fs_info->trans_lock);
1548
1549                 trans = btrfs_join_transaction(root);
1550                 BUG_ON(IS_ERR(trans));
1551                 if (transid == trans->transid) {
1552                         ret = btrfs_commit_transaction(trans, root);
1553                         BUG_ON(ret);
1554                 } else {
1555                         btrfs_end_transaction(trans, root);
1556                 }
1557 sleep:
1558                 wake_up_process(root->fs_info->cleaner_kthread);
1559                 mutex_unlock(&root->fs_info->transaction_kthread_mutex);
1560
1561                 if (!try_to_freeze()) {
1562                         set_current_state(TASK_INTERRUPTIBLE);
1563                         if (!kthread_should_stop() &&
1564                             !btrfs_transaction_blocked(root->fs_info))
1565                                 schedule_timeout(delay);
1566                         __set_current_state(TASK_RUNNING);
1567                 }
1568         } while (!kthread_should_stop());
1569         return 0;
1570 }
1571
1572 /*
1573  * this will find the highest generation in the array of
1574  * root backups.  The index of the highest array is returned,
1575  * or -1 if we can't find anything.
1576  *
1577  * We check to make sure the array is valid by comparing the
1578  * generation of the latest  root in the array with the generation
1579  * in the super block.  If they don't match we pitch it.
1580  */
1581 static int find_newest_super_backup(struct btrfs_fs_info *info, u64 newest_gen)
1582 {
1583         u64 cur;
1584         int newest_index = -1;
1585         struct btrfs_root_backup *root_backup;
1586         int i;
1587
1588         for (i = 0; i < BTRFS_NUM_BACKUP_ROOTS; i++) {
1589                 root_backup = info->super_copy->super_roots + i;
1590                 cur = btrfs_backup_tree_root_gen(root_backup);
1591                 if (cur == newest_gen)
1592                         newest_index = i;
1593         }
1594
1595         /* check to see if we actually wrapped around */
1596         if (newest_index == BTRFS_NUM_BACKUP_ROOTS - 1) {
1597                 root_backup = info->super_copy->super_roots;
1598                 cur = btrfs_backup_tree_root_gen(root_backup);
1599                 if (cur == newest_gen)
1600                         newest_index = 0;
1601         }
1602         return newest_index;
1603 }
1604
1605
1606 /*
1607  * find the oldest backup so we know where to store new entries
1608  * in the backup array.  This will set the backup_root_index
1609  * field in the fs_info struct
1610  */
1611 static void find_oldest_super_backup(struct btrfs_fs_info *info,
1612                                      u64 newest_gen)
1613 {
1614         int newest_index = -1;
1615
1616         newest_index = find_newest_super_backup(info, newest_gen);
1617         /* if there was garbage in there, just move along */
1618         if (newest_index == -1) {
1619                 info->backup_root_index = 0;
1620         } else {
1621                 info->backup_root_index = (newest_index + 1) % BTRFS_NUM_BACKUP_ROOTS;
1622         }
1623 }
1624
1625 /*
1626  * copy all the root pointers into the super backup array.
1627  * this will bump the backup pointer by one when it is
1628  * done
1629  */
1630 static void backup_super_roots(struct btrfs_fs_info *info)
1631 {
1632         int next_backup;
1633         struct btrfs_root_backup *root_backup;
1634         int last_backup;
1635
1636         next_backup = info->backup_root_index;
1637         last_backup = (next_backup + BTRFS_NUM_BACKUP_ROOTS - 1) %
1638                 BTRFS_NUM_BACKUP_ROOTS;
1639
1640         /*
1641          * just overwrite the last backup if we're at the same generation
1642          * this happens only at umount
1643          */
1644         root_backup = info->super_for_commit->super_roots + last_backup;
1645         if (btrfs_backup_tree_root_gen(root_backup) ==
1646             btrfs_header_generation(info->tree_root->node))
1647                 next_backup = last_backup;
1648
1649         root_backup = info->super_for_commit->super_roots + next_backup;
1650
1651         /*
1652          * make sure all of our padding and empty slots get zero filled
1653          * regardless of which ones we use today
1654          */
1655         memset(root_backup, 0, sizeof(*root_backup));
1656
1657         info->backup_root_index = (next_backup + 1) % BTRFS_NUM_BACKUP_ROOTS;
1658
1659         btrfs_set_backup_tree_root(root_backup, info->tree_root->node->start);
1660         btrfs_set_backup_tree_root_gen(root_backup,
1661                                btrfs_header_generation(info->tree_root->node));
1662
1663         btrfs_set_backup_tree_root_level(root_backup,
1664                                btrfs_header_level(info->tree_root->node));
1665
1666         btrfs_set_backup_chunk_root(root_backup, info->chunk_root->node->start);
1667         btrfs_set_backup_chunk_root_gen(root_backup,
1668                                btrfs_header_generation(info->chunk_root->node));
1669         btrfs_set_backup_chunk_root_level(root_backup,
1670                                btrfs_header_level(info->chunk_root->node));
1671
1672         btrfs_set_backup_extent_root(root_backup, info->extent_root->node->start);
1673         btrfs_set_backup_extent_root_gen(root_backup,
1674                                btrfs_header_generation(info->extent_root->node));
1675         btrfs_set_backup_extent_root_level(root_backup,
1676                                btrfs_header_level(info->extent_root->node));
1677
1678         /*
1679          * we might commit during log recovery, which happens before we set
1680          * the fs_root.  Make sure it is valid before we fill it in.
1681          */
1682         if (info->fs_root && info->fs_root->node) {
1683                 btrfs_set_backup_fs_root(root_backup,
1684                                          info->fs_root->node->start);
1685                 btrfs_set_backup_fs_root_gen(root_backup,
1686                                btrfs_header_generation(info->fs_root->node));
1687                 btrfs_set_backup_fs_root_level(root_backup,
1688                                btrfs_header_level(info->fs_root->node));
1689         }
1690
1691         btrfs_set_backup_dev_root(root_backup, info->dev_root->node->start);
1692         btrfs_set_backup_dev_root_gen(root_backup,
1693                                btrfs_header_generation(info->dev_root->node));
1694         btrfs_set_backup_dev_root_level(root_backup,
1695                                        btrfs_header_level(info->dev_root->node));
1696
1697         btrfs_set_backup_csum_root(root_backup, info->csum_root->node->start);
1698         btrfs_set_backup_csum_root_gen(root_backup,
1699                                btrfs_header_generation(info->csum_root->node));
1700         btrfs_set_backup_csum_root_level(root_backup,
1701                                btrfs_header_level(info->csum_root->node));
1702
1703         btrfs_set_backup_total_bytes(root_backup,
1704                              btrfs_super_total_bytes(info->super_copy));
1705         btrfs_set_backup_bytes_used(root_backup,
1706                              btrfs_super_bytes_used(info->super_copy));
1707         btrfs_set_backup_num_devices(root_backup,
1708                              btrfs_super_num_devices(info->super_copy));
1709
1710         /*
1711          * if we don't copy this out to the super_copy, it won't get remembered
1712          * for the next commit
1713          */
1714         memcpy(&info->super_copy->super_roots,
1715                &info->super_for_commit->super_roots,
1716                sizeof(*root_backup) * BTRFS_NUM_BACKUP_ROOTS);
1717 }
1718
1719 /*
1720  * this copies info out of the root backup array and back into
1721  * the in-memory super block.  It is meant to help iterate through
1722  * the array, so you send it the number of backups you've already
1723  * tried and the last backup index you used.
1724  *
1725  * this returns -1 when it has tried all the backups
1726  */
1727 static noinline int next_root_backup(struct btrfs_fs_info *info,
1728                                      struct btrfs_super_block *super,
1729                                      int *num_backups_tried, int *backup_index)
1730 {
1731         struct btrfs_root_backup *root_backup;
1732         int newest = *backup_index;
1733
1734         if (*num_backups_tried == 0) {
1735                 u64 gen = btrfs_super_generation(super);
1736
1737                 newest = find_newest_super_backup(info, gen);
1738                 if (newest == -1)
1739                         return -1;
1740
1741                 *backup_index = newest;
1742                 *num_backups_tried = 1;
1743         } else if (*num_backups_tried == BTRFS_NUM_BACKUP_ROOTS) {
1744                 /* we've tried all the backups, all done */
1745                 return -1;
1746         } else {
1747                 /* jump to the next oldest backup */
1748                 newest = (*backup_index + BTRFS_NUM_BACKUP_ROOTS - 1) %
1749                         BTRFS_NUM_BACKUP_ROOTS;
1750                 *backup_index = newest;
1751                 *num_backups_tried += 1;
1752         }
1753         root_backup = super->super_roots + newest;
1754
1755         btrfs_set_super_generation(super,
1756                                    btrfs_backup_tree_root_gen(root_backup));
1757         btrfs_set_super_root(super, btrfs_backup_tree_root(root_backup));
1758         btrfs_set_super_root_level(super,
1759                                    btrfs_backup_tree_root_level(root_backup));
1760         btrfs_set_super_bytes_used(super, btrfs_backup_bytes_used(root_backup));
1761
1762         /*
1763          * fixme: the total bytes and num_devices need to match or we should
1764          * need a fsck
1765          */
1766         btrfs_set_super_total_bytes(super, btrfs_backup_total_bytes(root_backup));
1767         btrfs_set_super_num_devices(super, btrfs_backup_num_devices(root_backup));
1768         return 0;
1769 }
1770
1771 /* helper to cleanup tree roots */
1772 static void free_root_pointers(struct btrfs_fs_info *info, int chunk_root)
1773 {
1774         free_extent_buffer(info->tree_root->node);
1775         free_extent_buffer(info->tree_root->commit_root);
1776         free_extent_buffer(info->dev_root->node);
1777         free_extent_buffer(info->dev_root->commit_root);
1778         free_extent_buffer(info->extent_root->node);
1779         free_extent_buffer(info->extent_root->commit_root);
1780         free_extent_buffer(info->csum_root->node);
1781         free_extent_buffer(info->csum_root->commit_root);
1782
1783         info->tree_root->node = NULL;
1784         info->tree_root->commit_root = NULL;
1785         info->dev_root->node = NULL;
1786         info->dev_root->commit_root = NULL;
1787         info->extent_root->node = NULL;
1788         info->extent_root->commit_root = NULL;
1789         info->csum_root->node = NULL;
1790         info->csum_root->commit_root = NULL;
1791
1792         if (chunk_root) {
1793                 free_extent_buffer(info->chunk_root->node);
1794                 free_extent_buffer(info->chunk_root->commit_root);
1795                 info->chunk_root->node = NULL;
1796                 info->chunk_root->commit_root = NULL;
1797         }
1798 }
1799
1800
1801 int open_ctree(struct super_block *sb,
1802                struct btrfs_fs_devices *fs_devices,
1803                char *options)
1804 {
1805         u32 sectorsize;
1806         u32 nodesize;
1807         u32 leafsize;
1808         u32 blocksize;
1809         u32 stripesize;
1810         u64 generation;
1811         u64 features;
1812         struct btrfs_key location;
1813         struct buffer_head *bh;
1814         struct btrfs_super_block *disk_super;
1815         struct btrfs_fs_info *fs_info = btrfs_sb(sb);
1816         struct btrfs_root *tree_root;
1817         struct btrfs_root *extent_root;
1818         struct btrfs_root *csum_root;
1819         struct btrfs_root *chunk_root;
1820         struct btrfs_root *dev_root;
1821         struct btrfs_root *log_tree_root;
1822         int ret;
1823         int err = -EINVAL;
1824         int num_backups_tried = 0;
1825         int backup_index = 0;
1826
1827         tree_root = fs_info->tree_root = btrfs_alloc_root(fs_info);
1828         extent_root = fs_info->extent_root = btrfs_alloc_root(fs_info);
1829         csum_root = fs_info->csum_root = btrfs_alloc_root(fs_info);
1830         chunk_root = fs_info->chunk_root = btrfs_alloc_root(fs_info);
1831         dev_root = fs_info->dev_root = btrfs_alloc_root(fs_info);
1832
1833         if (!tree_root || !extent_root || !csum_root ||
1834             !chunk_root || !dev_root) {
1835                 err = -ENOMEM;
1836                 goto fail;
1837         }
1838
1839         ret = init_srcu_struct(&fs_info->subvol_srcu);
1840         if (ret) {
1841                 err = ret;
1842                 goto fail;
1843         }
1844
1845         ret = setup_bdi(fs_info, &fs_info->bdi);
1846         if (ret) {
1847                 err = ret;
1848                 goto fail_srcu;
1849         }
1850
1851         fs_info->btree_inode = new_inode(sb);
1852         if (!fs_info->btree_inode) {
1853                 err = -ENOMEM;
1854                 goto fail_bdi;
1855         }
1856
1857         mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS);
1858
1859         INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC);
1860         INIT_LIST_HEAD(&fs_info->trans_list);
1861         INIT_LIST_HEAD(&fs_info->dead_roots);
1862         INIT_LIST_HEAD(&fs_info->delayed_iputs);
1863         INIT_LIST_HEAD(&fs_info->hashers);
1864         INIT_LIST_HEAD(&fs_info->delalloc_inodes);
1865         INIT_LIST_HEAD(&fs_info->ordered_operations);
1866         INIT_LIST_HEAD(&fs_info->caching_block_groups);
1867         spin_lock_init(&fs_info->delalloc_lock);
1868         spin_lock_init(&fs_info->trans_lock);
1869         spin_lock_init(&fs_info->ref_cache_lock);
1870         spin_lock_init(&fs_info->fs_roots_radix_lock);
1871         spin_lock_init(&fs_info->delayed_iput_lock);
1872         spin_lock_init(&fs_info->defrag_inodes_lock);
1873         spin_lock_init(&fs_info->free_chunk_lock);
1874         mutex_init(&fs_info->reloc_mutex);
1875
1876         init_completion(&fs_info->kobj_unregister);
1877         INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
1878         INIT_LIST_HEAD(&fs_info->space_info);
1879         btrfs_mapping_init(&fs_info->mapping_tree);
1880         btrfs_init_block_rsv(&fs_info->global_block_rsv);
1881         btrfs_init_block_rsv(&fs_info->delalloc_block_rsv);
1882         btrfs_init_block_rsv(&fs_info->trans_block_rsv);
1883         btrfs_init_block_rsv(&fs_info->chunk_block_rsv);
1884         btrfs_init_block_rsv(&fs_info->empty_block_rsv);
1885         btrfs_init_block_rsv(&fs_info->delayed_block_rsv);
1886         atomic_set(&fs_info->nr_async_submits, 0);
1887         atomic_set(&fs_info->async_delalloc_pages, 0);
1888         atomic_set(&fs_info->async_submit_draining, 0);
1889         atomic_set(&fs_info->nr_async_bios, 0);
1890         atomic_set(&fs_info->defrag_running, 0);
1891         fs_info->sb = sb;
1892         fs_info->max_inline = 8192 * 1024;
1893         fs_info->metadata_ratio = 0;
1894         fs_info->defrag_inodes = RB_ROOT;
1895         fs_info->trans_no_join = 0;
1896         fs_info->free_chunk_space = 0;
1897
1898         /* readahead state */
1899         INIT_RADIX_TREE(&fs_info->reada_tree, GFP_NOFS & ~__GFP_WAIT);
1900         spin_lock_init(&fs_info->reada_lock);
1901
1902         fs_info->thread_pool_size = min_t(unsigned long,
1903                                           num_online_cpus() + 2, 8);
1904
1905         INIT_LIST_HEAD(&fs_info->ordered_extents);
1906         spin_lock_init(&fs_info->ordered_extent_lock);
1907         fs_info->delayed_root = kmalloc(sizeof(struct btrfs_delayed_root),
1908                                         GFP_NOFS);
1909         if (!fs_info->delayed_root) {
1910                 err = -ENOMEM;
1911                 goto fail_iput;
1912         }
1913         btrfs_init_delayed_root(fs_info->delayed_root);
1914
1915         mutex_init(&fs_info->scrub_lock);
1916         atomic_set(&fs_info->scrubs_running, 0);
1917         atomic_set(&fs_info->scrub_pause_req, 0);
1918         atomic_set(&fs_info->scrubs_paused, 0);
1919         atomic_set(&fs_info->scrub_cancel_req, 0);
1920         init_waitqueue_head(&fs_info->scrub_pause_wait);
1921         init_rwsem(&fs_info->scrub_super_lock);
1922         fs_info->scrub_workers_refcnt = 0;
1923 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
1924         fs_info->check_integrity_print_mask = 0;
1925 #endif
1926
1927         spin_lock_init(&fs_info->balance_lock);
1928         mutex_init(&fs_info->balance_mutex);
1929         atomic_set(&fs_info->balance_running, 0);
1930         atomic_set(&fs_info->balance_pause_req, 0);
1931         atomic_set(&fs_info->balance_cancel_req, 0);
1932         fs_info->balance_ctl = NULL;
1933         init_waitqueue_head(&fs_info->balance_wait_q);
1934
1935         sb->s_blocksize = 4096;
1936         sb->s_blocksize_bits = blksize_bits(4096);
1937         sb->s_bdi = &fs_info->bdi;
1938
1939         fs_info->btree_inode->i_ino = BTRFS_BTREE_INODE_OBJECTID;
1940         set_nlink(fs_info->btree_inode, 1);
1941         /*
1942          * we set the i_size on the btree inode to the max possible int.
1943          * the real end of the address space is determined by all of
1944          * the devices in the system
1945          */
1946         fs_info->btree_inode->i_size = OFFSET_MAX;
1947         fs_info->btree_inode->i_mapping->a_ops = &btree_aops;
1948         fs_info->btree_inode->i_mapping->backing_dev_info = &fs_info->bdi;
1949
1950         RB_CLEAR_NODE(&BTRFS_I(fs_info->btree_inode)->rb_node);
1951         extent_io_tree_init(&BTRFS_I(fs_info->btree_inode)->io_tree,
1952                              fs_info->btree_inode->i_mapping);
1953         BTRFS_I(fs_info->btree_inode)->io_tree.track_uptodate = 0;
1954         extent_map_tree_init(&BTRFS_I(fs_info->btree_inode)->extent_tree);
1955
1956         BTRFS_I(fs_info->btree_inode)->io_tree.ops = &btree_extent_io_ops;
1957
1958         BTRFS_I(fs_info->btree_inode)->root = tree_root;
1959         memset(&BTRFS_I(fs_info->btree_inode)->location, 0,
1960                sizeof(struct btrfs_key));
1961         BTRFS_I(fs_info->btree_inode)->dummy_inode = 1;
1962         insert_inode_hash(fs_info->btree_inode);
1963
1964         spin_lock_init(&fs_info->block_group_cache_lock);
1965         fs_info->block_group_cache_tree = RB_ROOT;
1966
1967         extent_io_tree_init(&fs_info->freed_extents[0],
1968                              fs_info->btree_inode->i_mapping);
1969         extent_io_tree_init(&fs_info->freed_extents[1],
1970                              fs_info->btree_inode->i_mapping);
1971         fs_info->pinned_extents = &fs_info->freed_extents[0];
1972         fs_info->do_barriers = 1;
1973
1974
1975         mutex_init(&fs_info->ordered_operations_mutex);
1976         mutex_init(&fs_info->tree_log_mutex);
1977         mutex_init(&fs_info->chunk_mutex);
1978         mutex_init(&fs_info->transaction_kthread_mutex);
1979         mutex_init(&fs_info->cleaner_mutex);
1980         mutex_init(&fs_info->volume_mutex);
1981         init_rwsem(&fs_info->extent_commit_sem);
1982         init_rwsem(&fs_info->cleanup_work_sem);
1983         init_rwsem(&fs_info->subvol_sem);
1984
1985         btrfs_init_free_cluster(&fs_info->meta_alloc_cluster);
1986         btrfs_init_free_cluster(&fs_info->data_alloc_cluster);
1987
1988         init_waitqueue_head(&fs_info->transaction_throttle);
1989         init_waitqueue_head(&fs_info->transaction_wait);
1990         init_waitqueue_head(&fs_info->transaction_blocked_wait);
1991         init_waitqueue_head(&fs_info->async_submit_wait);
1992
1993         __setup_root(4096, 4096, 4096, 4096, tree_root,
1994                      fs_info, BTRFS_ROOT_TREE_OBJECTID);
1995
1996         bh = btrfs_read_dev_super(fs_devices->latest_bdev);
1997         if (!bh) {
1998                 err = -EINVAL;
1999                 goto fail_alloc;
2000         }
2001
2002         memcpy(fs_info->super_copy, bh->b_data, sizeof(*fs_info->super_copy));
2003         memcpy(fs_info->super_for_commit, fs_info->super_copy,
2004                sizeof(*fs_info->super_for_commit));
2005         brelse(bh);
2006
2007         memcpy(fs_info->fsid, fs_info->super_copy->fsid, BTRFS_FSID_SIZE);
2008
2009         disk_super = fs_info->super_copy;
2010         if (!btrfs_super_root(disk_super))
2011                 goto fail_alloc;
2012
2013         /* check FS state, whether FS is broken. */
2014         fs_info->fs_state |= btrfs_super_flags(disk_super);
2015
2016         btrfs_check_super_valid(fs_info, sb->s_flags & MS_RDONLY);
2017
2018         /*
2019          * run through our array of backup supers and setup
2020          * our ring pointer to the oldest one
2021          */
2022         generation = btrfs_super_generation(disk_super);
2023         find_oldest_super_backup(fs_info, generation);
2024
2025         /*
2026          * In the long term, we'll store the compression type in the super
2027          * block, and it'll be used for per file compression control.
2028          */
2029         fs_info->compress_type = BTRFS_COMPRESS_ZLIB;
2030
2031         ret = btrfs_parse_options(tree_root, options);
2032         if (ret) {
2033                 err = ret;
2034                 goto fail_alloc;
2035         }
2036
2037         features = btrfs_super_incompat_flags(disk_super) &
2038                 ~BTRFS_FEATURE_INCOMPAT_SUPP;
2039         if (features) {
2040                 printk(KERN_ERR "BTRFS: couldn't mount because of "
2041                        "unsupported optional features (%Lx).\n",
2042                        (unsigned long long)features);
2043                 err = -EINVAL;
2044                 goto fail_alloc;
2045         }
2046
2047         if (btrfs_super_leafsize(disk_super) !=
2048             btrfs_super_nodesize(disk_super)) {
2049                 printk(KERN_ERR "BTRFS: couldn't mount because metadata "
2050                        "blocksizes don't match.  node %d leaf %d\n",
2051                        btrfs_super_nodesize(disk_super),
2052                        btrfs_super_leafsize(disk_super));
2053                 err = -EINVAL;
2054                 goto fail_alloc;
2055         }
2056         if (btrfs_super_leafsize(disk_super) > BTRFS_MAX_METADATA_BLOCKSIZE) {
2057                 printk(KERN_ERR "BTRFS: couldn't mount because metadata "
2058                        "blocksize (%d) was too large\n",
2059                        btrfs_super_leafsize(disk_super));
2060                 err = -EINVAL;
2061                 goto fail_alloc;
2062         }
2063
2064         features = btrfs_super_incompat_flags(disk_super);
2065         features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF;
2066         if (tree_root->fs_info->compress_type & BTRFS_COMPRESS_LZO)
2067                 features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO;
2068
2069         /*
2070          * flag our filesystem as having big metadata blocks if
2071          * they are bigger than the page size
2072          */
2073         if (btrfs_super_leafsize(disk_super) > PAGE_CACHE_SIZE) {
2074                 if (!(features & BTRFS_FEATURE_INCOMPAT_BIG_METADATA))
2075                         printk(KERN_INFO "btrfs flagging fs with big metadata feature\n");
2076                 features |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA;
2077         }
2078
2079         btrfs_set_super_incompat_flags(disk_super, features);
2080
2081         features = btrfs_super_compat_ro_flags(disk_super) &
2082                 ~BTRFS_FEATURE_COMPAT_RO_SUPP;
2083         if (!(sb->s_flags & MS_RDONLY) && features) {
2084                 printk(KERN_ERR "BTRFS: couldn't mount RDWR because of "
2085                        "unsupported option features (%Lx).\n",
2086                        (unsigned long long)features);
2087                 err = -EINVAL;
2088                 goto fail_alloc;
2089         }
2090
2091         btrfs_init_workers(&fs_info->generic_worker,
2092                            "genwork", 1, NULL);
2093
2094         btrfs_init_workers(&fs_info->workers, "worker",
2095                            fs_info->thread_pool_size,
2096                            &fs_info->generic_worker);
2097
2098         btrfs_init_workers(&fs_info->delalloc_workers, "delalloc",
2099                            fs_info->thread_pool_size,
2100                            &fs_info->generic_worker);
2101
2102         btrfs_init_workers(&fs_info->submit_workers, "submit",
2103                            min_t(u64, fs_devices->num_devices,
2104                            fs_info->thread_pool_size),
2105                            &fs_info->generic_worker);
2106
2107         btrfs_init_workers(&fs_info->caching_workers, "cache",
2108                            2, &fs_info->generic_worker);
2109
2110         /* a higher idle thresh on the submit workers makes it much more
2111          * likely that bios will be send down in a sane order to the
2112          * devices
2113          */
2114         fs_info->submit_workers.idle_thresh = 64;
2115
2116         fs_info->workers.idle_thresh = 16;
2117         fs_info->workers.ordered = 1;
2118
2119         fs_info->delalloc_workers.idle_thresh = 2;
2120         fs_info->delalloc_workers.ordered = 1;
2121
2122         btrfs_init_workers(&fs_info->fixup_workers, "fixup", 1,
2123                            &fs_info->generic_worker);
2124         btrfs_init_workers(&fs_info->endio_workers, "endio",
2125                            fs_info->thread_pool_size,
2126                            &fs_info->generic_worker);
2127         btrfs_init_workers(&fs_info->endio_meta_workers, "endio-meta",
2128                            fs_info->thread_pool_size,
2129                            &fs_info->generic_worker);
2130         btrfs_init_workers(&fs_info->endio_meta_write_workers,
2131                            "endio-meta-write", fs_info->thread_pool_size,
2132                            &fs_info->generic_worker);
2133         btrfs_init_workers(&fs_info->endio_write_workers, "endio-write",
2134                            fs_info->thread_pool_size,
2135                            &fs_info->generic_worker);
2136         btrfs_init_workers(&fs_info->endio_freespace_worker, "freespace-write",
2137                            1, &fs_info->generic_worker);
2138         btrfs_init_workers(&fs_info->delayed_workers, "delayed-meta",
2139                            fs_info->thread_pool_size,
2140                            &fs_info->generic_worker);
2141         btrfs_init_workers(&fs_info->readahead_workers, "readahead",
2142                            fs_info->thread_pool_size,
2143                            &fs_info->generic_worker);
2144
2145         /*
2146          * endios are largely parallel and should have a very
2147          * low idle thresh
2148          */
2149         fs_info->endio_workers.idle_thresh = 4;
2150         fs_info->endio_meta_workers.idle_thresh = 4;
2151
2152         fs_info->endio_write_workers.idle_thresh = 2;
2153         fs_info->endio_meta_write_workers.idle_thresh = 2;
2154         fs_info->readahead_workers.idle_thresh = 2;
2155
2156         /*
2157          * btrfs_start_workers can really only fail because of ENOMEM so just
2158          * return -ENOMEM if any of these fail.
2159          */
2160         ret = btrfs_start_workers(&fs_info->workers);
2161         ret |= btrfs_start_workers(&fs_info->generic_worker);
2162         ret |= btrfs_start_workers(&fs_info->submit_workers);
2163         ret |= btrfs_start_workers(&fs_info->delalloc_workers);
2164         ret |= btrfs_start_workers(&fs_info->fixup_workers);
2165         ret |= btrfs_start_workers(&fs_info->endio_workers);
2166         ret |= btrfs_start_workers(&fs_info->endio_meta_workers);
2167         ret |= btrfs_start_workers(&fs_info->endio_meta_write_workers);
2168         ret |= btrfs_start_workers(&fs_info->endio_write_workers);
2169         ret |= btrfs_start_workers(&fs_info->endio_freespace_worker);
2170         ret |= btrfs_start_workers(&fs_info->delayed_workers);
2171         ret |= btrfs_start_workers(&fs_info->caching_workers);
2172         ret |= btrfs_start_workers(&fs_info->readahead_workers);
2173         if (ret) {
2174                 ret = -ENOMEM;
2175                 goto fail_sb_buffer;
2176         }
2177
2178         fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super);
2179         fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages,
2180                                     4 * 1024 * 1024 / PAGE_CACHE_SIZE);
2181
2182         nodesize = btrfs_super_nodesize(disk_super);
2183         leafsize = btrfs_super_leafsize(disk_super);
2184         sectorsize = btrfs_super_sectorsize(disk_super);
2185         stripesize = btrfs_super_stripesize(disk_super);
2186         tree_root->nodesize = nodesize;
2187         tree_root->leafsize = leafsize;
2188         tree_root->sectorsize = sectorsize;
2189         tree_root->stripesize = stripesize;
2190
2191         sb->s_blocksize = sectorsize;
2192         sb->s_blocksize_bits = blksize_bits(sectorsize);
2193
2194         if (strncmp((char *)(&disk_super->magic), BTRFS_MAGIC,
2195                     sizeof(disk_super->magic))) {
2196                 printk(KERN_INFO "btrfs: valid FS not found on %s\n", sb->s_id);
2197                 goto fail_sb_buffer;
2198         }
2199
2200         if (sectorsize < PAGE_SIZE) {
2201                 printk(KERN_WARNING "btrfs: Incompatible sector size "
2202                        "found on %s\n", sb->s_id);
2203                 goto fail_sb_buffer;
2204         }
2205
2206         mutex_lock(&fs_info->chunk_mutex);
2207         ret = btrfs_read_sys_array(tree_root);
2208         mutex_unlock(&fs_info->chunk_mutex);
2209         if (ret) {
2210                 printk(KERN_WARNING "btrfs: failed to read the system "
2211                        "array on %s\n", sb->s_id);
2212                 goto fail_sb_buffer;
2213         }
2214
2215         blocksize = btrfs_level_size(tree_root,
2216                                      btrfs_super_chunk_root_level(disk_super));
2217         generation = btrfs_super_chunk_root_generation(disk_super);
2218
2219         __setup_root(nodesize, leafsize, sectorsize, stripesize,
2220                      chunk_root, fs_info, BTRFS_CHUNK_TREE_OBJECTID);
2221
2222         chunk_root->node = read_tree_block(chunk_root,
2223                                            btrfs_super_chunk_root(disk_super),
2224                                            blocksize, generation);
2225         BUG_ON(!chunk_root->node);
2226         if (!test_bit(EXTENT_BUFFER_UPTODATE, &chunk_root->node->bflags)) {
2227                 printk(KERN_WARNING "btrfs: failed to read chunk root on %s\n",
2228                        sb->s_id);
2229                 goto fail_tree_roots;
2230         }
2231         btrfs_set_root_node(&chunk_root->root_item, chunk_root->node);
2232         chunk_root->commit_root = btrfs_root_node(chunk_root);
2233
2234         read_extent_buffer(chunk_root->node, fs_info->chunk_tree_uuid,
2235            (unsigned long)btrfs_header_chunk_tree_uuid(chunk_root->node),
2236            BTRFS_UUID_SIZE);
2237
2238         ret = btrfs_read_chunk_tree(chunk_root);
2239         if (ret) {
2240                 printk(KERN_WARNING "btrfs: failed to read chunk tree on %s\n",
2241                        sb->s_id);
2242                 goto fail_tree_roots;
2243         }
2244
2245         btrfs_close_extra_devices(fs_devices);
2246
2247         if (!fs_devices->latest_bdev) {
2248                 printk(KERN_CRIT "btrfs: failed to read devices on %s\n",
2249                        sb->s_id);
2250                 goto fail_tree_roots;
2251         }
2252
2253 retry_root_backup:
2254         blocksize = btrfs_level_size(tree_root,
2255                                      btrfs_super_root_level(disk_super));
2256         generation = btrfs_super_generation(disk_super);
2257
2258         tree_root->node = read_tree_block(tree_root,
2259                                           btrfs_super_root(disk_super),
2260                                           blocksize, generation);
2261         if (!tree_root->node ||
2262             !test_bit(EXTENT_BUFFER_UPTODATE, &tree_root->node->bflags)) {
2263                 printk(KERN_WARNING "btrfs: failed to read tree root on %s\n",
2264                        sb->s_id);
2265
2266                 goto recovery_tree_root;
2267         }
2268
2269         btrfs_set_root_node(&tree_root->root_item, tree_root->node);
2270         tree_root->commit_root = btrfs_root_node(tree_root);
2271
2272         ret = find_and_setup_root(tree_root, fs_info,
2273                                   BTRFS_EXTENT_TREE_OBJECTID, extent_root);
2274         if (ret)
2275                 goto recovery_tree_root;
2276         extent_root->track_dirty = 1;
2277
2278         ret = find_and_setup_root(tree_root, fs_info,
2279                                   BTRFS_DEV_TREE_OBJECTID, dev_root);
2280         if (ret)
2281                 goto recovery_tree_root;
2282         dev_root->track_dirty = 1;
2283
2284         ret = find_and_setup_root(tree_root, fs_info,
2285                                   BTRFS_CSUM_TREE_OBJECTID, csum_root);
2286         if (ret)
2287                 goto recovery_tree_root;
2288
2289         csum_root->track_dirty = 1;
2290
2291         fs_info->generation = generation;
2292         fs_info->last_trans_committed = generation;
2293
2294         ret = btrfs_init_space_info(fs_info);
2295         if (ret) {
2296                 printk(KERN_ERR "Failed to initial space info: %d\n", ret);
2297                 goto fail_block_groups;
2298         }
2299
2300         ret = btrfs_read_block_groups(extent_root);
2301         if (ret) {
2302                 printk(KERN_ERR "Failed to read block groups: %d\n", ret);
2303                 goto fail_block_groups;
2304         }
2305
2306         fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root,
2307                                                "btrfs-cleaner");
2308         if (IS_ERR(fs_info->cleaner_kthread))
2309                 goto fail_block_groups;
2310
2311         fs_info->transaction_kthread = kthread_run(transaction_kthread,
2312                                                    tree_root,
2313                                                    "btrfs-transaction");
2314         if (IS_ERR(fs_info->transaction_kthread))
2315                 goto fail_cleaner;
2316
2317         if (!btrfs_test_opt(tree_root, SSD) &&
2318             !btrfs_test_opt(tree_root, NOSSD) &&
2319             !fs_info->fs_devices->rotating) {
2320                 printk(KERN_INFO "Btrfs detected SSD devices, enabling SSD "
2321                        "mode\n");
2322                 btrfs_set_opt(fs_info->mount_opt, SSD);
2323         }
2324
2325 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
2326         if (btrfs_test_opt(tree_root, CHECK_INTEGRITY)) {
2327                 ret = btrfsic_mount(tree_root, fs_devices,
2328                                     btrfs_test_opt(tree_root,
2329                                         CHECK_INTEGRITY_INCLUDING_EXTENT_DATA) ?
2330                                     1 : 0,
2331                                     fs_info->check_integrity_print_mask);
2332                 if (ret)
2333                         printk(KERN_WARNING "btrfs: failed to initialize"
2334                                " integrity check module %s\n", sb->s_id);
2335         }
2336 #endif
2337
2338         /* do not make disk changes in broken FS */
2339         if (btrfs_super_log_root(disk_super) != 0 &&
2340             !(fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)) {
2341                 u64 bytenr = btrfs_super_log_root(disk_super);
2342
2343                 if (fs_devices->rw_devices == 0) {
2344                         printk(KERN_WARNING "Btrfs log replay required "
2345                                "on RO media\n");
2346                         err = -EIO;
2347                         goto fail_trans_kthread;
2348                 }
2349                 blocksize =
2350                      btrfs_level_size(tree_root,
2351                                       btrfs_super_log_root_level(disk_super));
2352
2353                 log_tree_root = btrfs_alloc_root(fs_info);
2354                 if (!log_tree_root) {
2355                         err = -ENOMEM;
2356                         goto fail_trans_kthread;
2357                 }
2358
2359                 __setup_root(nodesize, leafsize, sectorsize, stripesize,
2360                              log_tree_root, fs_info, BTRFS_TREE_LOG_OBJECTID);
2361
2362                 log_tree_root->node = read_tree_block(tree_root, bytenr,
2363                                                       blocksize,
2364                                                       generation + 1);
2365                 ret = btrfs_recover_log_trees(log_tree_root);
2366                 BUG_ON(ret);
2367
2368                 if (sb->s_flags & MS_RDONLY) {
2369                         ret =  btrfs_commit_super(tree_root);
2370                         BUG_ON(ret);
2371                 }
2372         }
2373
2374         ret = btrfs_find_orphan_roots(tree_root);
2375         BUG_ON(ret);
2376
2377         if (!(sb->s_flags & MS_RDONLY)) {
2378                 ret = btrfs_cleanup_fs_roots(fs_info);
2379                 BUG_ON(ret);
2380
2381                 ret = btrfs_recover_relocation(tree_root);
2382                 if (ret < 0) {
2383                         printk(KERN_WARNING
2384                                "btrfs: failed to recover relocation\n");
2385                         err = -EINVAL;
2386                         goto fail_trans_kthread;
2387                 }
2388         }
2389
2390         location.objectid = BTRFS_FS_TREE_OBJECTID;
2391         location.type = BTRFS_ROOT_ITEM_KEY;
2392         location.offset = (u64)-1;
2393
2394         fs_info->fs_root = btrfs_read_fs_root_no_name(fs_info, &location);
2395         if (!fs_info->fs_root)
2396                 goto fail_trans_kthread;
2397         if (IS_ERR(fs_info->fs_root)) {
2398                 err = PTR_ERR(fs_info->fs_root);
2399                 goto fail_trans_kthread;
2400         }
2401
2402         if (!(sb->s_flags & MS_RDONLY)) {
2403                 down_read(&fs_info->cleanup_work_sem);
2404                 err = btrfs_orphan_cleanup(fs_info->fs_root);
2405                 if (!err)
2406                         err = btrfs_orphan_cleanup(fs_info->tree_root);
2407                 up_read(&fs_info->cleanup_work_sem);
2408
2409                 if (!err)
2410                         err = btrfs_recover_balance(fs_info->tree_root);
2411
2412                 if (err) {
2413                         close_ctree(tree_root);
2414                         return err;
2415                 }
2416         }
2417
2418         return 0;
2419
2420 fail_trans_kthread:
2421         kthread_stop(fs_info->transaction_kthread);
2422 fail_cleaner:
2423         kthread_stop(fs_info->cleaner_kthread);
2424
2425         /*
2426          * make sure we're done with the btree inode before we stop our
2427          * kthreads
2428          */
2429         filemap_write_and_wait(fs_info->btree_inode->i_mapping);
2430         invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
2431
2432 fail_block_groups:
2433         btrfs_free_block_groups(fs_info);
2434
2435 fail_tree_roots:
2436         free_root_pointers(fs_info, 1);
2437
2438 fail_sb_buffer:
2439         btrfs_stop_workers(&fs_info->generic_worker);
2440         btrfs_stop_workers(&fs_info->readahead_workers);
2441         btrfs_stop_workers(&fs_info->fixup_workers);
2442         btrfs_stop_workers(&fs_info->delalloc_workers);
2443         btrfs_stop_workers(&fs_info->workers);
2444         btrfs_stop_workers(&fs_info->endio_workers);
2445         btrfs_stop_workers(&fs_info->endio_meta_workers);
2446         btrfs_stop_workers(&fs_info->endio_meta_write_workers);
2447         btrfs_stop_workers(&fs_info->endio_write_workers);
2448         btrfs_stop_workers(&fs_info->endio_freespace_worker);
2449         btrfs_stop_workers(&fs_info->submit_workers);
2450         btrfs_stop_workers(&fs_info->delayed_workers);
2451         btrfs_stop_workers(&fs_info->caching_workers);
2452 fail_alloc:
2453 fail_iput:
2454         btrfs_mapping_tree_free(&fs_info->mapping_tree);
2455
2456         invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
2457         iput(fs_info->btree_inode);
2458 fail_bdi:
2459         bdi_destroy(&fs_info->bdi);
2460 fail_srcu:
2461         cleanup_srcu_struct(&fs_info->subvol_srcu);
2462 fail:
2463         btrfs_close_devices(fs_info->fs_devices);
2464         return err;
2465
2466 recovery_tree_root:
2467         if (!btrfs_test_opt(tree_root, RECOVERY))
2468                 goto fail_tree_roots;
2469
2470         free_root_pointers(fs_info, 0);
2471
2472         /* don't use the log in recovery mode, it won't be valid */
2473         btrfs_set_super_log_root(disk_super, 0);
2474
2475         /* we can't trust the free space cache either */
2476         btrfs_set_opt(fs_info->mount_opt, CLEAR_CACHE);
2477
2478         ret = next_root_backup(fs_info, fs_info->super_copy,
2479                                &num_backups_tried, &backup_index);
2480         if (ret == -1)
2481                 goto fail_block_groups;
2482         goto retry_root_backup;
2483 }
2484
2485 static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate)
2486 {
2487         char b[BDEVNAME_SIZE];
2488
2489         if (uptodate) {
2490                 set_buffer_uptodate(bh);
2491         } else {
2492                 printk_ratelimited(KERN_WARNING "lost page write due to "
2493                                         "I/O error on %s\n",
2494                                        bdevname(bh->b_bdev, b));
2495                 /* note, we dont' set_buffer_write_io_error because we have
2496                  * our own ways of dealing with the IO errors
2497                  */
2498                 clear_buffer_uptodate(bh);
2499         }
2500         unlock_buffer(bh);
2501         put_bh(bh);
2502 }
2503
2504 struct buffer_head *btrfs_read_dev_super(struct block_device *bdev)
2505 {
2506         struct buffer_head *bh;
2507         struct buffer_head *latest = NULL;
2508         struct btrfs_super_block *super;
2509         int i;
2510         u64 transid = 0;
2511         u64 bytenr;
2512
2513         /* we would like to check all the supers, but that would make
2514          * a btrfs mount succeed after a mkfs from a different FS.
2515          * So, we need to add a special mount option to scan for
2516          * later supers, using BTRFS_SUPER_MIRROR_MAX instead
2517          */
2518         for (i = 0; i < 1; i++) {
2519                 bytenr = btrfs_sb_offset(i);
2520                 if (bytenr + 4096 >= i_size_read(bdev->bd_inode))
2521                         break;
2522                 bh = __bread(bdev, bytenr / 4096, 4096);
2523                 if (!bh)
2524                         continue;
2525
2526                 super = (struct btrfs_super_block *)bh->b_data;
2527                 if (btrfs_super_bytenr(super) != bytenr ||
2528                     strncmp((char *)(&super->magic), BTRFS_MAGIC,
2529                             sizeof(super->magic))) {
2530                         brelse(bh);
2531                         continue;
2532                 }
2533
2534                 if (!latest || btrfs_super_generation(super) > transid) {
2535                         brelse(latest);
2536                         latest = bh;
2537                         transid = btrfs_super_generation(super);
2538                 } else {
2539                         brelse(bh);
2540                 }
2541         }
2542         return latest;
2543 }
2544
2545 /*
2546  * this should be called twice, once with wait == 0 and
2547  * once with wait == 1.  When wait == 0 is done, all the buffer heads
2548  * we write are pinned.
2549  *
2550  * They are released when wait == 1 is done.
2551  * max_mirrors must be the same for both runs, and it indicates how
2552  * many supers on this one device should be written.
2553  *
2554  * max_mirrors == 0 means to write them all.
2555  */
2556 static int write_dev_supers(struct btrfs_device *device,
2557                             struct btrfs_super_block *sb,
2558                             int do_barriers, int wait, int max_mirrors)
2559 {
2560         struct buffer_head *bh;
2561         int i;
2562         int ret;
2563         int errors = 0;
2564         u32 crc;
2565         u64 bytenr;
2566
2567         if (max_mirrors == 0)
2568                 max_mirrors = BTRFS_SUPER_MIRROR_MAX;
2569
2570         for (i = 0; i < max_mirrors; i++) {
2571                 bytenr = btrfs_sb_offset(i);
2572                 if (bytenr + BTRFS_SUPER_INFO_SIZE >= device->total_bytes)
2573                         break;
2574
2575                 if (wait) {
2576                         bh = __find_get_block(device->bdev, bytenr / 4096,
2577                                               BTRFS_SUPER_INFO_SIZE);
2578                         BUG_ON(!bh);
2579                         wait_on_buffer(bh);
2580                         if (!buffer_uptodate(bh))
2581                                 errors++;
2582
2583                         /* drop our reference */
2584                         brelse(bh);
2585
2586                         /* drop the reference from the wait == 0 run */
2587                         brelse(bh);
2588                         continue;
2589                 } else {
2590                         btrfs_set_super_bytenr(sb, bytenr);
2591
2592                         crc = ~(u32)0;
2593                         crc = btrfs_csum_data(NULL, (char *)sb +
2594                                               BTRFS_CSUM_SIZE, crc,
2595                                               BTRFS_SUPER_INFO_SIZE -
2596                                               BTRFS_CSUM_SIZE);
2597                         btrfs_csum_final(crc, sb->csum);
2598
2599                         /*
2600                          * one reference for us, and we leave it for the
2601                          * caller
2602                          */
2603                         bh = __getblk(device->bdev, bytenr / 4096,
2604                                       BTRFS_SUPER_INFO_SIZE);
2605                         memcpy(bh->b_data, sb, BTRFS_SUPER_INFO_SIZE);
2606
2607                         /* one reference for submit_bh */
2608                         get_bh(bh);
2609
2610                         set_buffer_uptodate(bh);
2611                         lock_buffer(bh);
2612                         bh->b_end_io = btrfs_end_buffer_write_sync;
2613                 }
2614
2615                 /*
2616                  * we fua the first super.  The others we allow
2617                  * to go down lazy.
2618                  */
2619                 ret = btrfsic_submit_bh(WRITE_FUA, bh);
2620                 if (ret)
2621                         errors++;
2622         }
2623         return errors < i ? 0 : -1;
2624 }
2625
2626 /*
2627  * endio for the write_dev_flush, this will wake anyone waiting
2628  * for the barrier when it is done
2629  */
2630 static void btrfs_end_empty_barrier(struct bio *bio, int err)
2631 {
2632         if (err) {
2633                 if (err == -EOPNOTSUPP)
2634                         set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
2635                 clear_bit(BIO_UPTODATE, &bio->bi_flags);
2636         }
2637         if (bio->bi_private)
2638                 complete(bio->bi_private);
2639         bio_put(bio);
2640 }
2641
2642 /*
2643  * trigger flushes for one the devices.  If you pass wait == 0, the flushes are
2644  * sent down.  With wait == 1, it waits for the previous flush.
2645  *
2646  * any device where the flush fails with eopnotsupp are flagged as not-barrier
2647  * capable
2648  */
2649 static int write_dev_flush(struct btrfs_device *device, int wait)
2650 {
2651         struct bio *bio;
2652         int ret = 0;
2653
2654         if (device->nobarriers)
2655                 return 0;
2656
2657         if (wait) {
2658                 bio = device->flush_bio;
2659                 if (!bio)
2660                         return 0;
2661
2662                 wait_for_completion(&device->flush_wait);
2663
2664                 if (bio_flagged(bio, BIO_EOPNOTSUPP)) {
2665                         printk("btrfs: disabling barriers on dev %s\n",
2666                                device->name);
2667                         device->nobarriers = 1;
2668                 }
2669                 if (!bio_flagged(bio, BIO_UPTODATE)) {
2670                         ret = -EIO;
2671                 }
2672
2673                 /* drop the reference from the wait == 0 run */
2674                 bio_put(bio);
2675                 device->flush_bio = NULL;
2676
2677                 return ret;
2678         }
2679
2680         /*
2681          * one reference for us, and we leave it for the
2682          * caller
2683          */
2684         device->flush_bio = NULL;;
2685         bio = bio_alloc(GFP_NOFS, 0);
2686         if (!bio)
2687                 return -ENOMEM;
2688
2689         bio->bi_end_io = btrfs_end_empty_barrier;
2690         bio->bi_bdev = device->bdev;
2691         init_completion(&device->flush_wait);
2692         bio->bi_private = &device->flush_wait;
2693         device->flush_bio = bio;
2694
2695         bio_get(bio);
2696         btrfsic_submit_bio(WRITE_FLUSH, bio);
2697
2698         return 0;
2699 }
2700
2701 /*
2702  * send an empty flush down to each device in parallel,
2703  * then wait for them
2704  */
2705 static int barrier_all_devices(struct btrfs_fs_info *info)
2706 {
2707         struct list_head *head;
2708         struct btrfs_device *dev;
2709         int errors = 0;
2710         int ret;
2711
2712         /* send down all the barriers */
2713         head = &info->fs_devices->devices;
2714         list_for_each_entry_rcu(dev, head, dev_list) {
2715                 if (!dev->bdev) {
2716                         errors++;
2717                         continue;
2718                 }
2719                 if (!dev->in_fs_metadata || !dev->writeable)
2720                         continue;
2721
2722                 ret = write_dev_flush(dev, 0);
2723                 if (ret)
2724                         errors++;
2725         }
2726
2727         /* wait for all the barriers */
2728         list_for_each_entry_rcu(dev, head, dev_list) {
2729                 if (!dev->bdev) {
2730                         errors++;
2731                         continue;
2732                 }
2733                 if (!dev->in_fs_metadata || !dev->writeable)
2734                         continue;
2735
2736                 ret = write_dev_flush(dev, 1);
2737                 if (ret)
2738                         errors++;
2739         }
2740         if (errors)
2741                 return -EIO;
2742         return 0;
2743 }
2744
2745 int write_all_supers(struct btrfs_root *root, int max_mirrors)
2746 {
2747         struct list_head *head;
2748         struct btrfs_device *dev;
2749         struct btrfs_super_block *sb;
2750         struct btrfs_dev_item *dev_item;
2751         int ret;
2752         int do_barriers;
2753         int max_errors;
2754         int total_errors = 0;
2755         u64 flags;
2756
2757         max_errors = btrfs_super_num_devices(root->fs_info->super_copy) - 1;
2758         do_barriers = !btrfs_test_opt(root, NOBARRIER);
2759         backup_super_roots(root->fs_info);
2760
2761         sb = root->fs_info->super_for_commit;
2762         dev_item = &sb->dev_item;
2763
2764         mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2765         head = &root->fs_info->fs_devices->devices;
2766
2767         if (do_barriers)
2768                 barrier_all_devices(root->fs_info);
2769
2770         list_for_each_entry_rcu(dev, head, dev_list) {
2771                 if (!dev->bdev) {
2772                         total_errors++;
2773                         continue;
2774                 }
2775                 if (!dev->in_fs_metadata || !dev->writeable)
2776                         continue;
2777
2778                 btrfs_set_stack_device_generation(dev_item, 0);
2779                 btrfs_set_stack_device_type(dev_item, dev->type);
2780                 btrfs_set_stack_device_id(dev_item, dev->devid);
2781                 btrfs_set_stack_device_total_bytes(dev_item, dev->total_bytes);
2782                 btrfs_set_stack_device_bytes_used(dev_item, dev->bytes_used);
2783                 btrfs_set_stack_device_io_align(dev_item, dev->io_align);
2784                 btrfs_set_stack_device_io_width(dev_item, dev->io_width);
2785                 btrfs_set_stack_device_sector_size(dev_item, dev->sector_size);
2786                 memcpy(dev_item->uuid, dev->uuid, BTRFS_UUID_SIZE);
2787                 memcpy(dev_item->fsid, dev->fs_devices->fsid, BTRFS_UUID_SIZE);
2788
2789                 flags = btrfs_super_flags(sb);
2790                 btrfs_set_super_flags(sb, flags | BTRFS_HEADER_FLAG_WRITTEN);
2791
2792                 ret = write_dev_supers(dev, sb, do_barriers, 0, max_mirrors);
2793                 if (ret)
2794                         total_errors++;
2795         }
2796         if (total_errors > max_errors) {
2797                 printk(KERN_ERR "btrfs: %d errors while writing supers\n",
2798                        total_errors);
2799                 BUG();
2800         }
2801
2802         total_errors = 0;
2803         list_for_each_entry_rcu(dev, head, dev_list) {
2804                 if (!dev->bdev)
2805                         continue;
2806                 if (!dev->in_fs_metadata || !dev->writeable)
2807                         continue;
2808
2809                 ret = write_dev_supers(dev, sb, do_barriers, 1, max_mirrors);
2810                 if (ret)
2811                         total_errors++;
2812         }
2813         mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2814         if (total_errors > max_errors) {
2815                 printk(KERN_ERR "btrfs: %d errors while writing supers\n",
2816                        total_errors);
2817                 BUG();
2818         }
2819         return 0;
2820 }
2821
2822 int write_ctree_super(struct btrfs_trans_handle *trans,
2823                       struct btrfs_root *root, int max_mirrors)
2824 {
2825         int ret;
2826
2827         ret = write_all_supers(root, max_mirrors);
2828         return ret;
2829 }
2830
2831 int btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
2832 {
2833         spin_lock(&fs_info->fs_roots_radix_lock);
2834         radix_tree_delete(&fs_info->fs_roots_radix,
2835                           (unsigned long)root->root_key.objectid);
2836         spin_unlock(&fs_info->fs_roots_radix_lock);
2837
2838         if (btrfs_root_refs(&root->root_item) == 0)
2839                 synchronize_srcu(&fs_info->subvol_srcu);
2840
2841         __btrfs_remove_free_space_cache(root->free_ino_pinned);
2842         __btrfs_remove_free_space_cache(root->free_ino_ctl);
2843         free_fs_root(root);
2844         return 0;
2845 }
2846
2847 static void free_fs_root(struct btrfs_root *root)
2848 {
2849         iput(root->cache_inode);
2850         WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree));
2851         if (root->anon_dev)
2852                 free_anon_bdev(root->anon_dev);
2853         free_extent_buffer(root->node);
2854         free_extent_buffer(root->commit_root);
2855         kfree(root->free_ino_ctl);
2856         kfree(root->free_ino_pinned);
2857         kfree(root->name);
2858         kfree(root);
2859 }
2860
2861 static int del_fs_roots(struct btrfs_fs_info *fs_info)
2862 {
2863         int ret;
2864         struct btrfs_root *gang[8];
2865         int i;
2866
2867         while (!list_empty(&fs_info->dead_roots)) {
2868                 gang[0] = list_entry(fs_info->dead_roots.next,
2869                                      struct btrfs_root, root_list);
2870                 list_del(&gang[0]->root_list);
2871
2872                 if (gang[0]->in_radix) {
2873                         btrfs_free_fs_root(fs_info, gang[0]);
2874                 } else {
2875                         free_extent_buffer(gang[0]->node);
2876                         free_extent_buffer(gang[0]->commit_root);
2877                         kfree(gang[0]);
2878                 }
2879         }
2880
2881         while (1) {
2882                 ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
2883                                              (void **)gang, 0,
2884                                              ARRAY_SIZE(gang));
2885                 if (!ret)
2886                         break;
2887                 for (i = 0; i < ret; i++)
2888                         btrfs_free_fs_root(fs_info, gang[i]);
2889         }
2890         return 0;
2891 }
2892
2893 int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info)
2894 {
2895         u64 root_objectid = 0;
2896         struct btrfs_root *gang[8];
2897         int i;
2898         int ret;
2899
2900         while (1) {
2901                 ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
2902                                              (void **)gang, root_objectid,
2903                                              ARRAY_SIZE(gang));
2904                 if (!ret)
2905                         break;
2906
2907                 root_objectid = gang[ret - 1]->root_key.objectid + 1;
2908                 for (i = 0; i < ret; i++) {
2909                         int err;
2910
2911                         root_objectid = gang[i]->root_key.objectid;
2912                         err = btrfs_orphan_cleanup(gang[i]);
2913                         if (err)
2914                                 return err;
2915                 }
2916                 root_objectid++;
2917         }
2918         return 0;
2919 }
2920
2921 int btrfs_commit_super(struct btrfs_root *root)
2922 {
2923         struct btrfs_trans_handle *trans;
2924         int ret;
2925
2926         mutex_lock(&root->fs_info->cleaner_mutex);
2927         btrfs_run_delayed_iputs(root);
2928         btrfs_clean_old_snapshots(root);
2929         mutex_unlock(&root->fs_info->cleaner_mutex);
2930
2931         /* wait until ongoing cleanup work done */
2932         down_write(&root->fs_info->cleanup_work_sem);
2933         up_write(&root->fs_info->cleanup_work_sem);
2934
2935         trans = btrfs_join_transaction(root);
2936         if (IS_ERR(trans))
2937                 return PTR_ERR(trans);
2938         ret = btrfs_commit_transaction(trans, root);
2939         BUG_ON(ret);
2940         /* run commit again to drop the original snapshot */
2941         trans = btrfs_join_transaction(root);
2942         if (IS_ERR(trans))
2943                 return PTR_ERR(trans);
2944         btrfs_commit_transaction(trans, root);
2945         ret = btrfs_write_and_wait_transaction(NULL, root);
2946         BUG_ON(ret);
2947
2948         ret = write_ctree_super(NULL, root, 0);
2949         return ret;
2950 }
2951
2952 int close_ctree(struct btrfs_root *root)
2953 {
2954         struct btrfs_fs_info *fs_info = root->fs_info;
2955         int ret;
2956
2957         fs_info->closing = 1;
2958         smp_mb();
2959
2960         /* pause restriper - we want to resume on mount */
2961         btrfs_pause_balance(root->fs_info);
2962
2963         btrfs_scrub_cancel(root);
2964
2965         /* wait for any defraggers to finish */
2966         wait_event(fs_info->transaction_wait,
2967                    (atomic_read(&fs_info->defrag_running) == 0));
2968
2969         /* clear out the rbtree of defraggable inodes */
2970         btrfs_run_defrag_inodes(fs_info);
2971
2972         /*
2973          * Here come 2 situations when btrfs is broken to flip readonly:
2974          *
2975          * 1. when btrfs flips readonly somewhere else before
2976          * btrfs_commit_super, sb->s_flags has MS_RDONLY flag,
2977          * and btrfs will skip to write sb directly to keep
2978          * ERROR state on disk.
2979          *
2980          * 2. when btrfs flips readonly just in btrfs_commit_super,
2981          * and in such case, btrfs cannot write sb via btrfs_commit_super,
2982          * and since fs_state has been set BTRFS_SUPER_FLAG_ERROR flag,
2983          * btrfs will cleanup all FS resources first and write sb then.
2984          */
2985         if (!(fs_info->sb->s_flags & MS_RDONLY)) {
2986                 ret = btrfs_commit_super(root);
2987                 if (ret)
2988                         printk(KERN_ERR "btrfs: commit super ret %d\n", ret);
2989         }
2990
2991         if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
2992                 ret = btrfs_error_commit_super(root);
2993                 if (ret)
2994                         printk(KERN_ERR "btrfs: commit super ret %d\n", ret);
2995         }
2996
2997         btrfs_put_block_group_cache(fs_info);
2998
2999         kthread_stop(fs_info->transaction_kthread);
3000         kthread_stop(fs_info->cleaner_kthread);
3001
3002         fs_info->closing = 2;
3003         smp_mb();
3004
3005         if (fs_info->delalloc_bytes) {
3006                 printk(KERN_INFO "btrfs: at unmount delalloc count %llu\n",
3007                        (unsigned long long)fs_info->delalloc_bytes);
3008         }
3009         if (fs_info->total_ref_cache_size) {
3010                 printk(KERN_INFO "btrfs: at umount reference cache size %llu\n",
3011                        (unsigned long long)fs_info->total_ref_cache_size);
3012         }
3013
3014         free_extent_buffer(fs_info->extent_root->node);
3015         free_extent_buffer(fs_info->extent_root->commit_root);
3016         free_extent_buffer(fs_info->tree_root->node);
3017         free_extent_buffer(fs_info->tree_root->commit_root);
3018         free_extent_buffer(fs_info->chunk_root->node);
3019         free_extent_buffer(fs_info->chunk_root->commit_root);
3020         free_extent_buffer(fs_info->dev_root->node);
3021         free_extent_buffer(fs_info->dev_root->commit_root);
3022         free_extent_buffer(fs_info->csum_root->node);
3023         free_extent_buffer(fs_info->csum_root->commit_root);
3024
3025         btrfs_free_block_groups(fs_info);
3026
3027         del_fs_roots(fs_info);
3028
3029         iput(fs_info->btree_inode);
3030
3031         btrfs_stop_workers(&fs_info->generic_worker);
3032         btrfs_stop_workers(&fs_info->fixup_workers);
3033         btrfs_stop_workers(&fs_info->delalloc_workers);
3034         btrfs_stop_workers(&fs_info->workers);
3035         btrfs_stop_workers(&fs_info->endio_workers);
3036         btrfs_stop_workers(&fs_info->endio_meta_workers);
3037         btrfs_stop_workers(&fs_info->endio_meta_write_workers);
3038         btrfs_stop_workers(&fs_info->endio_write_workers);
3039         btrfs_stop_workers(&fs_info->endio_freespace_worker);
3040         btrfs_stop_workers(&fs_info->submit_workers);
3041         btrfs_stop_workers(&fs_info->delayed_workers);
3042         btrfs_stop_workers(&fs_info->caching_workers);
3043         btrfs_stop_workers(&fs_info->readahead_workers);
3044
3045 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
3046         if (btrfs_test_opt(root, CHECK_INTEGRITY))
3047                 btrfsic_unmount(root, fs_info->fs_devices);
3048 #endif
3049
3050         btrfs_close_devices(fs_info->fs_devices);
3051         btrfs_mapping_tree_free(&fs_info->mapping_tree);
3052
3053         bdi_destroy(&fs_info->bdi);
3054         cleanup_srcu_struct(&fs_info->subvol_srcu);
3055
3056         return 0;
3057 }
3058
3059 int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid)
3060 {
3061         int ret;
3062         struct inode *btree_inode = buf->pages[0]->mapping->host;
3063
3064         ret = extent_buffer_uptodate(buf);
3065         if (!ret)
3066                 return ret;
3067
3068         ret = verify_parent_transid(&BTRFS_I(btree_inode)->io_tree, buf,
3069                                     parent_transid);
3070         return !ret;
3071 }
3072
3073 int btrfs_set_buffer_uptodate(struct extent_buffer *buf)
3074 {
3075         return set_extent_buffer_uptodate(buf);
3076 }
3077
3078 void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
3079 {
3080         struct btrfs_root *root = BTRFS_I(buf->pages[0]->mapping->host)->root;
3081         u64 transid = btrfs_header_generation(buf);
3082         int was_dirty;
3083
3084         btrfs_assert_tree_locked(buf);
3085         if (transid != root->fs_info->generation) {
3086                 printk(KERN_CRIT "btrfs transid mismatch buffer %llu, "
3087                        "found %llu running %llu\n",
3088                         (unsigned long long)buf->start,
3089                         (unsigned long long)transid,
3090                         (unsigned long long)root->fs_info->generation);
3091                 WARN_ON(1);
3092         }
3093         was_dirty = set_extent_buffer_dirty(buf);
3094         if (!was_dirty) {
3095                 spin_lock(&root->fs_info->delalloc_lock);
3096                 root->fs_info->dirty_metadata_bytes += buf->len;
3097                 spin_unlock(&root->fs_info->delalloc_lock);
3098         }
3099 }
3100
3101 void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr)
3102 {
3103         /*
3104          * looks as though older kernels can get into trouble with
3105          * this code, they end up stuck in balance_dirty_pages forever
3106          */
3107         u64 num_dirty;
3108         unsigned long thresh = 32 * 1024 * 1024;
3109
3110         if (current->flags & PF_MEMALLOC)
3111                 return;
3112
3113         btrfs_balance_delayed_items(root);
3114
3115         num_dirty = root->fs_info->dirty_metadata_bytes;
3116
3117         if (num_dirty > thresh) {
3118                 balance_dirty_pages_ratelimited_nr(
3119                                    root->fs_info->btree_inode->i_mapping, 1);
3120         }
3121         return;
3122 }
3123
3124 void __btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr)
3125 {
3126         /*
3127          * looks as though older kernels can get into trouble with
3128          * this code, they end up stuck in balance_dirty_pages forever
3129          */
3130         u64 num_dirty;
3131         unsigned long thresh = 32 * 1024 * 1024;
3132
3133         if (current->flags & PF_MEMALLOC)
3134                 return;
3135
3136         num_dirty = root->fs_info->dirty_metadata_bytes;
3137
3138         if (num_dirty > thresh) {
3139                 balance_dirty_pages_ratelimited_nr(
3140                                    root->fs_info->btree_inode->i_mapping, 1);
3141         }
3142         return;
3143 }
3144
3145 int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid)
3146 {
3147         struct btrfs_root *root = BTRFS_I(buf->pages[0]->mapping->host)->root;
3148         return btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
3149 }
3150
3151 static int btree_lock_page_hook(struct page *page, void *data,
3152                                 void (*flush_fn)(void *))
3153 {
3154         struct inode *inode = page->mapping->host;
3155         struct btrfs_root *root = BTRFS_I(inode)->root;
3156         struct extent_buffer *eb;
3157
3158         /*
3159          * We culled this eb but the page is still hanging out on the mapping,
3160          * carry on.
3161          */
3162         if (!PagePrivate(page))
3163                 goto out;
3164
3165         eb = (struct extent_buffer *)page->private;
3166         if (!eb) {
3167                 WARN_ON(1);
3168                 goto out;
3169         }
3170         if (page != eb->pages[0])
3171                 goto out;
3172
3173         if (!btrfs_try_tree_write_lock(eb)) {
3174                 flush_fn(data);
3175                 btrfs_tree_lock(eb);
3176         }
3177         btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
3178
3179         if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
3180                 spin_lock(&root->fs_info->delalloc_lock);
3181                 if (root->fs_info->dirty_metadata_bytes >= eb->len)
3182                         root->fs_info->dirty_metadata_bytes -= eb->len;
3183                 else
3184                         WARN_ON(1);
3185                 spin_unlock(&root->fs_info->delalloc_lock);
3186         }
3187
3188         btrfs_tree_unlock(eb);
3189 out:
3190         if (!trylock_page(page)) {
3191                 flush_fn(data);
3192                 lock_page(page);
3193         }
3194         return 0;
3195 }
3196
3197 static void btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
3198                               int read_only)
3199 {
3200         if (read_only)
3201                 return;
3202
3203         if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)
3204                 printk(KERN_WARNING "warning: mount fs with errors, "
3205                        "running btrfsck is recommended\n");
3206 }
3207
3208 int btrfs_error_commit_super(struct btrfs_root *root)
3209 {
3210         int ret;
3211
3212         mutex_lock(&root->fs_info->cleaner_mutex);
3213         btrfs_run_delayed_iputs(root);
3214         mutex_unlock(&root->fs_info->cleaner_mutex);
3215
3216         down_write(&root->fs_info->cleanup_work_sem);
3217         up_write(&root->fs_info->cleanup_work_sem);
3218
3219         /* cleanup FS via transaction */
3220         btrfs_cleanup_transaction(root);
3221
3222         ret = write_ctree_super(NULL, root, 0);
3223
3224         return ret;
3225 }
3226
3227 static int btrfs_destroy_ordered_operations(struct btrfs_root *root)
3228 {
3229         struct btrfs_inode *btrfs_inode;
3230         struct list_head splice;
3231
3232         INIT_LIST_HEAD(&splice);
3233
3234         mutex_lock(&root->fs_info->ordered_operations_mutex);
3235         spin_lock(&root->fs_info->ordered_extent_lock);
3236
3237         list_splice_init(&root->fs_info->ordered_operations, &splice);
3238         while (!list_empty(&splice)) {
3239                 btrfs_inode = list_entry(splice.next, struct btrfs_inode,
3240                                          ordered_operations);
3241
3242                 list_del_init(&btrfs_inode->ordered_operations);
3243
3244                 btrfs_invalidate_inodes(btrfs_inode->root);
3245         }
3246
3247         spin_unlock(&root->fs_info->ordered_extent_lock);
3248         mutex_unlock(&root->fs_info->ordered_operations_mutex);
3249
3250         return 0;
3251 }
3252
3253 static int btrfs_destroy_ordered_extents(struct btrfs_root *root)
3254 {
3255         struct list_head splice;
3256         struct btrfs_ordered_extent *ordered;
3257         struct inode *inode;
3258
3259         INIT_LIST_HEAD(&splice);
3260
3261         spin_lock(&root->fs_info->ordered_extent_lock);
3262
3263         list_splice_init(&root->fs_info->ordered_extents, &splice);
3264         while (!list_empty(&splice)) {
3265                 ordered = list_entry(splice.next, struct btrfs_ordered_extent,
3266                                      root_extent_list);
3267
3268                 list_del_init(&ordered->root_extent_list);
3269                 atomic_inc(&ordered->refs);
3270
3271                 /* the inode may be getting freed (in sys_unlink path). */
3272                 inode = igrab(ordered->inode);
3273
3274                 spin_unlock(&root->fs_info->ordered_extent_lock);
3275                 if (inode)
3276                         iput(inode);
3277
3278                 atomic_set(&ordered->refs, 1);
3279                 btrfs_put_ordered_extent(ordered);
3280
3281                 spin_lock(&root->fs_info->ordered_extent_lock);
3282         }
3283
3284         spin_unlock(&root->fs_info->ordered_extent_lock);
3285
3286         return 0;
3287 }
3288
3289 static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
3290                                       struct btrfs_root *root)
3291 {
3292         struct rb_node *node;
3293         struct btrfs_delayed_ref_root *delayed_refs;
3294         struct btrfs_delayed_ref_node *ref;
3295         int ret = 0;
3296
3297         delayed_refs = &trans->delayed_refs;
3298
3299         spin_lock(&delayed_refs->lock);
3300         if (delayed_refs->num_entries == 0) {
3301                 spin_unlock(&delayed_refs->lock);
3302                 printk(KERN_INFO "delayed_refs has NO entry\n");
3303                 return ret;
3304         }
3305
3306         node = rb_first(&delayed_refs->root);
3307         while (node) {
3308                 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
3309                 node = rb_next(node);
3310
3311                 ref->in_tree = 0;
3312                 rb_erase(&ref->rb_node, &delayed_refs->root);
3313                 delayed_refs->num_entries--;
3314
3315                 atomic_set(&ref->refs, 1);
3316                 if (btrfs_delayed_ref_is_head(ref)) {
3317                         struct btrfs_delayed_ref_head *head;
3318
3319                         head = btrfs_delayed_node_to_head(ref);
3320                         mutex_lock(&head->mutex);
3321                         kfree(head->extent_op);
3322                         delayed_refs->num_heads--;
3323                         if (list_empty(&head->cluster))
3324                                 delayed_refs->num_heads_ready--;
3325                         list_del_init(&head->cluster);
3326                         mutex_unlock(&head->mutex);
3327                 }
3328
3329                 spin_unlock(&delayed_refs->lock);
3330                 btrfs_put_delayed_ref(ref);
3331
3332                 cond_resched();
3333                 spin_lock(&delayed_refs->lock);
3334         }
3335
3336         spin_unlock(&delayed_refs->lock);
3337
3338         return ret;
3339 }
3340
3341 static int btrfs_destroy_pending_snapshots(struct btrfs_transaction *t)
3342 {
3343         struct btrfs_pending_snapshot *snapshot;
3344         struct list_head splice;
3345
3346         INIT_LIST_HEAD(&splice);
3347
3348         list_splice_init(&t->pending_snapshots, &splice);
3349
3350         while (!list_empty(&splice)) {
3351                 snapshot = list_entry(splice.next,
3352                                       struct btrfs_pending_snapshot,
3353                                       list);
3354
3355                 list_del_init(&snapshot->list);
3356
3357                 kfree(snapshot);
3358         }
3359
3360         return 0;
3361 }
3362
3363 static int btrfs_destroy_delalloc_inodes(struct btrfs_root *root)
3364 {
3365         struct btrfs_inode *btrfs_inode;
3366         struct list_head splice;
3367
3368         INIT_LIST_HEAD(&splice);
3369
3370         spin_lock(&root->fs_info->delalloc_lock);
3371         list_splice_init(&root->fs_info->delalloc_inodes, &splice);
3372
3373         while (!list_empty(&splice)) {
3374                 btrfs_inode = list_entry(splice.next, struct btrfs_inode,
3375                                     delalloc_inodes);
3376
3377                 list_del_init(&btrfs_inode->delalloc_inodes);
3378
3379                 btrfs_invalidate_inodes(btrfs_inode->root);
3380         }
3381
3382         spin_unlock(&root->fs_info->delalloc_lock);
3383
3384         return 0;
3385 }
3386
3387 static int btrfs_destroy_marked_extents(struct btrfs_root *root,
3388                                         struct extent_io_tree *dirty_pages,
3389                                         int mark)
3390 {
3391         int ret;
3392         struct page *page;
3393         struct inode *btree_inode = root->fs_info->btree_inode;
3394         struct extent_buffer *eb;
3395         u64 start = 0;
3396         u64 end;
3397         u64 offset;
3398         unsigned long index;
3399
3400         while (1) {
3401                 ret = find_first_extent_bit(dirty_pages, start, &start, &end,
3402                                             mark);
3403                 if (ret)
3404                         break;
3405
3406                 clear_extent_bits(dirty_pages, start, end, mark, GFP_NOFS);
3407                 while (start <= end) {
3408                         index = start >> PAGE_CACHE_SHIFT;
3409                         start = (u64)(index + 1) << PAGE_CACHE_SHIFT;
3410                         page = find_get_page(btree_inode->i_mapping, index);
3411                         if (!page)
3412                                 continue;
3413                         offset = page_offset(page);
3414
3415                         spin_lock(&dirty_pages->buffer_lock);
3416                         eb = radix_tree_lookup(
3417                              &(&BTRFS_I(page->mapping->host)->io_tree)->buffer,
3418                                                offset >> PAGE_CACHE_SHIFT);
3419                         spin_unlock(&dirty_pages->buffer_lock);
3420                         if (eb) {
3421                                 ret = test_and_clear_bit(EXTENT_BUFFER_DIRTY,
3422                                                          &eb->bflags);
3423                                 atomic_set(&eb->refs, 1);
3424                         }
3425                         if (PageWriteback(page))
3426                                 end_page_writeback(page);
3427
3428                         lock_page(page);
3429                         if (PageDirty(page)) {
3430                                 clear_page_dirty_for_io(page);
3431                                 spin_lock_irq(&page->mapping->tree_lock);
3432                                 radix_tree_tag_clear(&page->mapping->page_tree,
3433                                                         page_index(page),
3434                                                         PAGECACHE_TAG_DIRTY);
3435                                 spin_unlock_irq(&page->mapping->tree_lock);
3436                         }
3437
3438                         page->mapping->a_ops->invalidatepage(page, 0);
3439                         unlock_page(page);
3440                 }
3441         }
3442
3443         return ret;
3444 }
3445
3446 static int btrfs_destroy_pinned_extent(struct btrfs_root *root,
3447                                        struct extent_io_tree *pinned_extents)
3448 {
3449         struct extent_io_tree *unpin;
3450         u64 start;
3451         u64 end;
3452         int ret;
3453
3454         unpin = pinned_extents;
3455         while (1) {
3456                 ret = find_first_extent_bit(unpin, 0, &start, &end,
3457                                             EXTENT_DIRTY);
3458                 if (ret)
3459                         break;
3460
3461                 /* opt_discard */
3462                 if (btrfs_test_opt(root, DISCARD))
3463                         ret = btrfs_error_discard_extent(root, start,
3464                                                          end + 1 - start,
3465                                                          NULL);
3466
3467                 clear_extent_dirty(unpin, start, end, GFP_NOFS);
3468                 btrfs_error_unpin_extent_range(root, start, end);
3469                 cond_resched();
3470         }
3471
3472         return 0;
3473 }
3474
3475 static int btrfs_cleanup_transaction(struct btrfs_root *root)
3476 {
3477         struct btrfs_transaction *t;
3478         LIST_HEAD(list);
3479
3480         WARN_ON(1);
3481
3482         mutex_lock(&root->fs_info->transaction_kthread_mutex);
3483
3484         spin_lock(&root->fs_info->trans_lock);
3485         list_splice_init(&root->fs_info->trans_list, &list);
3486         root->fs_info->trans_no_join = 1;
3487         spin_unlock(&root->fs_info->trans_lock);
3488
3489         while (!list_empty(&list)) {
3490                 t = list_entry(list.next, struct btrfs_transaction, list);
3491                 if (!t)
3492                         break;
3493
3494                 btrfs_destroy_ordered_operations(root);
3495
3496                 btrfs_destroy_ordered_extents(root);
3497
3498                 btrfs_destroy_delayed_refs(t, root);
3499
3500                 btrfs_block_rsv_release(root,
3501                                         &root->fs_info->trans_block_rsv,
3502                                         t->dirty_pages.dirty_bytes);
3503
3504                 /* FIXME: cleanup wait for commit */
3505                 t->in_commit = 1;
3506                 t->blocked = 1;
3507                 if (waitqueue_active(&root->fs_info->transaction_blocked_wait))
3508                         wake_up(&root->fs_info->transaction_blocked_wait);
3509
3510                 t->blocked = 0;
3511                 if (waitqueue_active(&root->fs_info->transaction_wait))
3512                         wake_up(&root->fs_info->transaction_wait);
3513
3514                 t->commit_done = 1;
3515                 if (waitqueue_active(&t->commit_wait))
3516                         wake_up(&t->commit_wait);
3517
3518                 btrfs_destroy_pending_snapshots(t);
3519
3520                 btrfs_destroy_delalloc_inodes(root);
3521
3522                 spin_lock(&root->fs_info->trans_lock);
3523                 root->fs_info->running_transaction = NULL;
3524                 spin_unlock(&root->fs_info->trans_lock);
3525
3526                 btrfs_destroy_marked_extents(root, &t->dirty_pages,
3527                                              EXTENT_DIRTY);
3528
3529                 btrfs_destroy_pinned_extent(root,
3530                                             root->fs_info->pinned_extents);
3531
3532                 atomic_set(&t->use_count, 0);
3533                 list_del_init(&t->list);
3534                 memset(t, 0, sizeof(*t));
3535                 kmem_cache_free(btrfs_transaction_cachep, t);
3536         }
3537
3538         spin_lock(&root->fs_info->trans_lock);
3539         root->fs_info->trans_no_join = 0;
3540         spin_unlock(&root->fs_info->trans_lock);
3541         mutex_unlock(&root->fs_info->transaction_kthread_mutex);
3542
3543         return 0;
3544 }
3545
3546 static struct extent_io_ops btree_extent_io_ops = {
3547         .write_cache_pages_lock_hook = btree_lock_page_hook,
3548         .readpage_end_io_hook = btree_readpage_end_io_hook,
3549         .readpage_io_failed_hook = btree_io_failed_hook,
3550         .submit_bio_hook = btree_submit_bio_hook,
3551         /* note we're sharing with inode.c for the merge bio hook */
3552         .merge_bio_hook = btrfs_merge_bio_hook,
3553 };