Btrfs: remove dead code
[linux-2.6-block.git] / fs / btrfs / extent-tree.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/writeback.h>
21 #include <linux/blkdev.h>
22 #include <linux/sort.h>
23 #include <linux/rcupdate.h>
24 #include <linux/kthread.h>
25 #include "compat.h"
26 #include "hash.h"
27 #include "ctree.h"
28 #include "disk-io.h"
29 #include "print-tree.h"
30 #include "transaction.h"
31 #include "volumes.h"
32 #include "locking.h"
33 #include "free-space-cache.h"
34
35 static int update_block_group(struct btrfs_trans_handle *trans,
36                               struct btrfs_root *root,
37                               u64 bytenr, u64 num_bytes, int alloc,
38                               int mark_free);
39 static int update_reserved_extents(struct btrfs_block_group_cache *cache,
40                                    u64 num_bytes, int reserve);
41 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
42                                 struct btrfs_root *root,
43                                 u64 bytenr, u64 num_bytes, u64 parent,
44                                 u64 root_objectid, u64 owner_objectid,
45                                 u64 owner_offset, int refs_to_drop,
46                                 struct btrfs_delayed_extent_op *extra_op);
47 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
48                                     struct extent_buffer *leaf,
49                                     struct btrfs_extent_item *ei);
50 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
51                                       struct btrfs_root *root,
52                                       u64 parent, u64 root_objectid,
53                                       u64 flags, u64 owner, u64 offset,
54                                       struct btrfs_key *ins, int ref_mod);
55 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
56                                      struct btrfs_root *root,
57                                      u64 parent, u64 root_objectid,
58                                      u64 flags, struct btrfs_disk_key *key,
59                                      int level, struct btrfs_key *ins);
60 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
61                           struct btrfs_root *extent_root, u64 alloc_bytes,
62                           u64 flags, int force);
63 static int pin_down_bytes(struct btrfs_trans_handle *trans,
64                           struct btrfs_root *root,
65                           struct btrfs_path *path,
66                           u64 bytenr, u64 num_bytes,
67                           int is_data, int reserved,
68                           struct extent_buffer **must_clean);
69 static int find_next_key(struct btrfs_path *path, int level,
70                          struct btrfs_key *key);
71
72 static noinline int
73 block_group_cache_done(struct btrfs_block_group_cache *cache)
74 {
75         smp_mb();
76         return cache->cached == BTRFS_CACHE_FINISHED;
77 }
78
79 static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
80 {
81         return (cache->flags & bits) == bits;
82 }
83
84 /*
85  * this adds the block group to the fs_info rb tree for the block group
86  * cache
87  */
88 static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
89                                 struct btrfs_block_group_cache *block_group)
90 {
91         struct rb_node **p;
92         struct rb_node *parent = NULL;
93         struct btrfs_block_group_cache *cache;
94
95         spin_lock(&info->block_group_cache_lock);
96         p = &info->block_group_cache_tree.rb_node;
97
98         while (*p) {
99                 parent = *p;
100                 cache = rb_entry(parent, struct btrfs_block_group_cache,
101                                  cache_node);
102                 if (block_group->key.objectid < cache->key.objectid) {
103                         p = &(*p)->rb_left;
104                 } else if (block_group->key.objectid > cache->key.objectid) {
105                         p = &(*p)->rb_right;
106                 } else {
107                         spin_unlock(&info->block_group_cache_lock);
108                         return -EEXIST;
109                 }
110         }
111
112         rb_link_node(&block_group->cache_node, parent, p);
113         rb_insert_color(&block_group->cache_node,
114                         &info->block_group_cache_tree);
115         spin_unlock(&info->block_group_cache_lock);
116
117         return 0;
118 }
119
120 /*
121  * This will return the block group at or after bytenr if contains is 0, else
122  * it will return the block group that contains the bytenr
123  */
124 static struct btrfs_block_group_cache *
125 block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
126                               int contains)
127 {
128         struct btrfs_block_group_cache *cache, *ret = NULL;
129         struct rb_node *n;
130         u64 end, start;
131
132         spin_lock(&info->block_group_cache_lock);
133         n = info->block_group_cache_tree.rb_node;
134
135         while (n) {
136                 cache = rb_entry(n, struct btrfs_block_group_cache,
137                                  cache_node);
138                 end = cache->key.objectid + cache->key.offset - 1;
139                 start = cache->key.objectid;
140
141                 if (bytenr < start) {
142                         if (!contains && (!ret || start < ret->key.objectid))
143                                 ret = cache;
144                         n = n->rb_left;
145                 } else if (bytenr > start) {
146                         if (contains && bytenr <= end) {
147                                 ret = cache;
148                                 break;
149                         }
150                         n = n->rb_right;
151                 } else {
152                         ret = cache;
153                         break;
154                 }
155         }
156         if (ret)
157                 atomic_inc(&ret->count);
158         spin_unlock(&info->block_group_cache_lock);
159
160         return ret;
161 }
162
163 static int add_excluded_extent(struct btrfs_root *root,
164                                u64 start, u64 num_bytes)
165 {
166         u64 end = start + num_bytes - 1;
167         set_extent_bits(&root->fs_info->freed_extents[0],
168                         start, end, EXTENT_UPTODATE, GFP_NOFS);
169         set_extent_bits(&root->fs_info->freed_extents[1],
170                         start, end, EXTENT_UPTODATE, GFP_NOFS);
171         return 0;
172 }
173
174 static void free_excluded_extents(struct btrfs_root *root,
175                                   struct btrfs_block_group_cache *cache)
176 {
177         u64 start, end;
178
179         start = cache->key.objectid;
180         end = start + cache->key.offset - 1;
181
182         clear_extent_bits(&root->fs_info->freed_extents[0],
183                           start, end, EXTENT_UPTODATE, GFP_NOFS);
184         clear_extent_bits(&root->fs_info->freed_extents[1],
185                           start, end, EXTENT_UPTODATE, GFP_NOFS);
186 }
187
188 static int exclude_super_stripes(struct btrfs_root *root,
189                                  struct btrfs_block_group_cache *cache)
190 {
191         u64 bytenr;
192         u64 *logical;
193         int stripe_len;
194         int i, nr, ret;
195
196         for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
197                 bytenr = btrfs_sb_offset(i);
198                 ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
199                                        cache->key.objectid, bytenr,
200                                        0, &logical, &nr, &stripe_len);
201                 BUG_ON(ret);
202
203                 while (nr--) {
204                         ret = add_excluded_extent(root, logical[nr],
205                                                   stripe_len);
206                         BUG_ON(ret);
207                 }
208
209                 kfree(logical);
210         }
211         return 0;
212 }
213
214 static struct btrfs_caching_control *
215 get_caching_control(struct btrfs_block_group_cache *cache)
216 {
217         struct btrfs_caching_control *ctl;
218
219         spin_lock(&cache->lock);
220         if (cache->cached != BTRFS_CACHE_STARTED) {
221                 spin_unlock(&cache->lock);
222                 return NULL;
223         }
224
225         ctl = cache->caching_ctl;
226         atomic_inc(&ctl->count);
227         spin_unlock(&cache->lock);
228         return ctl;
229 }
230
231 static void put_caching_control(struct btrfs_caching_control *ctl)
232 {
233         if (atomic_dec_and_test(&ctl->count))
234                 kfree(ctl);
235 }
236
237 /*
238  * this is only called by cache_block_group, since we could have freed extents
239  * we need to check the pinned_extents for any extents that can't be used yet
240  * since their free space will be released as soon as the transaction commits.
241  */
242 static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
243                               struct btrfs_fs_info *info, u64 start, u64 end)
244 {
245         u64 extent_start, extent_end, size, total_added = 0;
246         int ret;
247
248         while (start < end) {
249                 ret = find_first_extent_bit(info->pinned_extents, start,
250                                             &extent_start, &extent_end,
251                                             EXTENT_DIRTY | EXTENT_UPTODATE);
252                 if (ret)
253                         break;
254
255                 if (extent_start == start) {
256                         start = extent_end + 1;
257                 } else if (extent_start > start && extent_start < end) {
258                         size = extent_start - start;
259                         total_added += size;
260                         ret = btrfs_add_free_space(block_group, start,
261                                                    size);
262                         BUG_ON(ret);
263                         start = extent_end + 1;
264                 } else {
265                         break;
266                 }
267         }
268
269         if (start < end) {
270                 size = end - start;
271                 total_added += size;
272                 ret = btrfs_add_free_space(block_group, start, size);
273                 BUG_ON(ret);
274         }
275
276         return total_added;
277 }
278
279 static int caching_kthread(void *data)
280 {
281         struct btrfs_block_group_cache *block_group = data;
282         struct btrfs_fs_info *fs_info = block_group->fs_info;
283         struct btrfs_caching_control *caching_ctl = block_group->caching_ctl;
284         struct btrfs_root *extent_root = fs_info->extent_root;
285         struct btrfs_path *path;
286         struct extent_buffer *leaf;
287         struct btrfs_key key;
288         u64 total_found = 0;
289         u64 last = 0;
290         u32 nritems;
291         int ret = 0;
292
293         path = btrfs_alloc_path();
294         if (!path)
295                 return -ENOMEM;
296
297         exclude_super_stripes(extent_root, block_group);
298
299         last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
300
301         /*
302          * We don't want to deadlock with somebody trying to allocate a new
303          * extent for the extent root while also trying to search the extent
304          * root to add free space.  So we skip locking and search the commit
305          * root, since its read-only
306          */
307         path->skip_locking = 1;
308         path->search_commit_root = 1;
309         path->reada = 2;
310
311         key.objectid = last;
312         key.offset = 0;
313         key.type = BTRFS_EXTENT_ITEM_KEY;
314 again:
315         mutex_lock(&caching_ctl->mutex);
316         /* need to make sure the commit_root doesn't disappear */
317         down_read(&fs_info->extent_commit_sem);
318
319         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
320         if (ret < 0)
321                 goto err;
322
323         leaf = path->nodes[0];
324         nritems = btrfs_header_nritems(leaf);
325
326         while (1) {
327                 smp_mb();
328                 if (fs_info->closing > 1) {
329                         last = (u64)-1;
330                         break;
331                 }
332
333                 if (path->slots[0] < nritems) {
334                         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
335                 } else {
336                         ret = find_next_key(path, 0, &key);
337                         if (ret)
338                                 break;
339
340                         caching_ctl->progress = last;
341                         btrfs_release_path(extent_root, path);
342                         up_read(&fs_info->extent_commit_sem);
343                         mutex_unlock(&caching_ctl->mutex);
344                         if (btrfs_transaction_in_commit(fs_info))
345                                 schedule_timeout(1);
346                         else
347                                 cond_resched();
348                         goto again;
349                 }
350
351                 if (key.objectid < block_group->key.objectid) {
352                         path->slots[0]++;
353                         continue;
354                 }
355
356                 if (key.objectid >= block_group->key.objectid +
357                     block_group->key.offset)
358                         break;
359
360                 if (key.type == BTRFS_EXTENT_ITEM_KEY) {
361                         total_found += add_new_free_space(block_group,
362                                                           fs_info, last,
363                                                           key.objectid);
364                         last = key.objectid + key.offset;
365
366                         if (total_found > (1024 * 1024 * 2)) {
367                                 total_found = 0;
368                                 wake_up(&caching_ctl->wait);
369                         }
370                 }
371                 path->slots[0]++;
372         }
373         ret = 0;
374
375         total_found += add_new_free_space(block_group, fs_info, last,
376                                           block_group->key.objectid +
377                                           block_group->key.offset);
378         caching_ctl->progress = (u64)-1;
379
380         spin_lock(&block_group->lock);
381         block_group->caching_ctl = NULL;
382         block_group->cached = BTRFS_CACHE_FINISHED;
383         spin_unlock(&block_group->lock);
384
385 err:
386         btrfs_free_path(path);
387         up_read(&fs_info->extent_commit_sem);
388
389         free_excluded_extents(extent_root, block_group);
390
391         mutex_unlock(&caching_ctl->mutex);
392         wake_up(&caching_ctl->wait);
393
394         put_caching_control(caching_ctl);
395         atomic_dec(&block_group->space_info->caching_threads);
396         return 0;
397 }
398
399 static int cache_block_group(struct btrfs_block_group_cache *cache)
400 {
401         struct btrfs_fs_info *fs_info = cache->fs_info;
402         struct btrfs_caching_control *caching_ctl;
403         struct task_struct *tsk;
404         int ret = 0;
405
406         smp_mb();
407         if (cache->cached != BTRFS_CACHE_NO)
408                 return 0;
409
410         caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_KERNEL);
411         BUG_ON(!caching_ctl);
412
413         INIT_LIST_HEAD(&caching_ctl->list);
414         mutex_init(&caching_ctl->mutex);
415         init_waitqueue_head(&caching_ctl->wait);
416         caching_ctl->block_group = cache;
417         caching_ctl->progress = cache->key.objectid;
418         /* one for caching kthread, one for caching block group list */
419         atomic_set(&caching_ctl->count, 2);
420
421         spin_lock(&cache->lock);
422         if (cache->cached != BTRFS_CACHE_NO) {
423                 spin_unlock(&cache->lock);
424                 kfree(caching_ctl);
425                 return 0;
426         }
427         cache->caching_ctl = caching_ctl;
428         cache->cached = BTRFS_CACHE_STARTED;
429         spin_unlock(&cache->lock);
430
431         down_write(&fs_info->extent_commit_sem);
432         list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
433         up_write(&fs_info->extent_commit_sem);
434
435         atomic_inc(&cache->space_info->caching_threads);
436
437         tsk = kthread_run(caching_kthread, cache, "btrfs-cache-%llu\n",
438                           cache->key.objectid);
439         if (IS_ERR(tsk)) {
440                 ret = PTR_ERR(tsk);
441                 printk(KERN_ERR "error running thread %d\n", ret);
442                 BUG();
443         }
444
445         return ret;
446 }
447
448 /*
449  * return the block group that starts at or after bytenr
450  */
451 static struct btrfs_block_group_cache *
452 btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
453 {
454         struct btrfs_block_group_cache *cache;
455
456         cache = block_group_cache_tree_search(info, bytenr, 0);
457
458         return cache;
459 }
460
461 /*
462  * return the block group that contains the given bytenr
463  */
464 struct btrfs_block_group_cache *btrfs_lookup_block_group(
465                                                  struct btrfs_fs_info *info,
466                                                  u64 bytenr)
467 {
468         struct btrfs_block_group_cache *cache;
469
470         cache = block_group_cache_tree_search(info, bytenr, 1);
471
472         return cache;
473 }
474
475 void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
476 {
477         if (atomic_dec_and_test(&cache->count))
478                 kfree(cache);
479 }
480
481 static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
482                                                   u64 flags)
483 {
484         struct list_head *head = &info->space_info;
485         struct btrfs_space_info *found;
486
487         rcu_read_lock();
488         list_for_each_entry_rcu(found, head, list) {
489                 if (found->flags == flags) {
490                         rcu_read_unlock();
491                         return found;
492                 }
493         }
494         rcu_read_unlock();
495         return NULL;
496 }
497
498 /*
499  * after adding space to the filesystem, we need to clear the full flags
500  * on all the space infos.
501  */
502 void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
503 {
504         struct list_head *head = &info->space_info;
505         struct btrfs_space_info *found;
506
507         rcu_read_lock();
508         list_for_each_entry_rcu(found, head, list)
509                 found->full = 0;
510         rcu_read_unlock();
511 }
512
513 static u64 div_factor(u64 num, int factor)
514 {
515         if (factor == 10)
516                 return num;
517         num *= factor;
518         do_div(num, 10);
519         return num;
520 }
521
522 u64 btrfs_find_block_group(struct btrfs_root *root,
523                            u64 search_start, u64 search_hint, int owner)
524 {
525         struct btrfs_block_group_cache *cache;
526         u64 used;
527         u64 last = max(search_hint, search_start);
528         u64 group_start = 0;
529         int full_search = 0;
530         int factor = 9;
531         int wrapped = 0;
532 again:
533         while (1) {
534                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
535                 if (!cache)
536                         break;
537
538                 spin_lock(&cache->lock);
539                 last = cache->key.objectid + cache->key.offset;
540                 used = btrfs_block_group_used(&cache->item);
541
542                 if ((full_search || !cache->ro) &&
543                     block_group_bits(cache, BTRFS_BLOCK_GROUP_METADATA)) {
544                         if (used + cache->pinned + cache->reserved <
545                             div_factor(cache->key.offset, factor)) {
546                                 group_start = cache->key.objectid;
547                                 spin_unlock(&cache->lock);
548                                 btrfs_put_block_group(cache);
549                                 goto found;
550                         }
551                 }
552                 spin_unlock(&cache->lock);
553                 btrfs_put_block_group(cache);
554                 cond_resched();
555         }
556         if (!wrapped) {
557                 last = search_start;
558                 wrapped = 1;
559                 goto again;
560         }
561         if (!full_search && factor < 10) {
562                 last = search_start;
563                 full_search = 1;
564                 factor = 10;
565                 goto again;
566         }
567 found:
568         return group_start;
569 }
570
571 /* simple helper to search for an existing extent at a given offset */
572 int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len)
573 {
574         int ret;
575         struct btrfs_key key;
576         struct btrfs_path *path;
577
578         path = btrfs_alloc_path();
579         BUG_ON(!path);
580         key.objectid = start;
581         key.offset = len;
582         btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
583         ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
584                                 0, 0);
585         btrfs_free_path(path);
586         return ret;
587 }
588
589 /*
590  * Back reference rules.  Back refs have three main goals:
591  *
592  * 1) differentiate between all holders of references to an extent so that
593  *    when a reference is dropped we can make sure it was a valid reference
594  *    before freeing the extent.
595  *
596  * 2) Provide enough information to quickly find the holders of an extent
597  *    if we notice a given block is corrupted or bad.
598  *
599  * 3) Make it easy to migrate blocks for FS shrinking or storage pool
600  *    maintenance.  This is actually the same as #2, but with a slightly
601  *    different use case.
602  *
603  * There are two kinds of back refs. The implicit back refs is optimized
604  * for pointers in non-shared tree blocks. For a given pointer in a block,
605  * back refs of this kind provide information about the block's owner tree
606  * and the pointer's key. These information allow us to find the block by
607  * b-tree searching. The full back refs is for pointers in tree blocks not
608  * referenced by their owner trees. The location of tree block is recorded
609  * in the back refs. Actually the full back refs is generic, and can be
610  * used in all cases the implicit back refs is used. The major shortcoming
611  * of the full back refs is its overhead. Every time a tree block gets
612  * COWed, we have to update back refs entry for all pointers in it.
613  *
614  * For a newly allocated tree block, we use implicit back refs for
615  * pointers in it. This means most tree related operations only involve
616  * implicit back refs. For a tree block created in old transaction, the
617  * only way to drop a reference to it is COW it. So we can detect the
618  * event that tree block loses its owner tree's reference and do the
619  * back refs conversion.
620  *
621  * When a tree block is COW'd through a tree, there are four cases:
622  *
623  * The reference count of the block is one and the tree is the block's
624  * owner tree. Nothing to do in this case.
625  *
626  * The reference count of the block is one and the tree is not the
627  * block's owner tree. In this case, full back refs is used for pointers
628  * in the block. Remove these full back refs, add implicit back refs for
629  * every pointers in the new block.
630  *
631  * The reference count of the block is greater than one and the tree is
632  * the block's owner tree. In this case, implicit back refs is used for
633  * pointers in the block. Add full back refs for every pointers in the
634  * block, increase lower level extents' reference counts. The original
635  * implicit back refs are entailed to the new block.
636  *
637  * The reference count of the block is greater than one and the tree is
638  * not the block's owner tree. Add implicit back refs for every pointer in
639  * the new block, increase lower level extents' reference count.
640  *
641  * Back Reference Key composing:
642  *
643  * The key objectid corresponds to the first byte in the extent,
644  * The key type is used to differentiate between types of back refs.
645  * There are different meanings of the key offset for different types
646  * of back refs.
647  *
648  * File extents can be referenced by:
649  *
650  * - multiple snapshots, subvolumes, or different generations in one subvol
651  * - different files inside a single subvolume
652  * - different offsets inside a file (bookend extents in file.c)
653  *
654  * The extent ref structure for the implicit back refs has fields for:
655  *
656  * - Objectid of the subvolume root
657  * - objectid of the file holding the reference
658  * - original offset in the file
659  * - how many bookend extents
660  *
661  * The key offset for the implicit back refs is hash of the first
662  * three fields.
663  *
664  * The extent ref structure for the full back refs has field for:
665  *
666  * - number of pointers in the tree leaf
667  *
668  * The key offset for the implicit back refs is the first byte of
669  * the tree leaf
670  *
671  * When a file extent is allocated, The implicit back refs is used.
672  * the fields are filled in:
673  *
674  *     (root_key.objectid, inode objectid, offset in file, 1)
675  *
676  * When a file extent is removed file truncation, we find the
677  * corresponding implicit back refs and check the following fields:
678  *
679  *     (btrfs_header_owner(leaf), inode objectid, offset in file)
680  *
681  * Btree extents can be referenced by:
682  *
683  * - Different subvolumes
684  *
685  * Both the implicit back refs and the full back refs for tree blocks
686  * only consist of key. The key offset for the implicit back refs is
687  * objectid of block's owner tree. The key offset for the full back refs
688  * is the first byte of parent block.
689  *
690  * When implicit back refs is used, information about the lowest key and
691  * level of the tree block are required. These information are stored in
692  * tree block info structure.
693  */
694
695 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
696 static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
697                                   struct btrfs_root *root,
698                                   struct btrfs_path *path,
699                                   u64 owner, u32 extra_size)
700 {
701         struct btrfs_extent_item *item;
702         struct btrfs_extent_item_v0 *ei0;
703         struct btrfs_extent_ref_v0 *ref0;
704         struct btrfs_tree_block_info *bi;
705         struct extent_buffer *leaf;
706         struct btrfs_key key;
707         struct btrfs_key found_key;
708         u32 new_size = sizeof(*item);
709         u64 refs;
710         int ret;
711
712         leaf = path->nodes[0];
713         BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));
714
715         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
716         ei0 = btrfs_item_ptr(leaf, path->slots[0],
717                              struct btrfs_extent_item_v0);
718         refs = btrfs_extent_refs_v0(leaf, ei0);
719
720         if (owner == (u64)-1) {
721                 while (1) {
722                         if (path->slots[0] >= btrfs_header_nritems(leaf)) {
723                                 ret = btrfs_next_leaf(root, path);
724                                 if (ret < 0)
725                                         return ret;
726                                 BUG_ON(ret > 0);
727                                 leaf = path->nodes[0];
728                         }
729                         btrfs_item_key_to_cpu(leaf, &found_key,
730                                               path->slots[0]);
731                         BUG_ON(key.objectid != found_key.objectid);
732                         if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
733                                 path->slots[0]++;
734                                 continue;
735                         }
736                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
737                                               struct btrfs_extent_ref_v0);
738                         owner = btrfs_ref_objectid_v0(leaf, ref0);
739                         break;
740                 }
741         }
742         btrfs_release_path(root, path);
743
744         if (owner < BTRFS_FIRST_FREE_OBJECTID)
745                 new_size += sizeof(*bi);
746
747         new_size -= sizeof(*ei0);
748         ret = btrfs_search_slot(trans, root, &key, path,
749                                 new_size + extra_size, 1);
750         if (ret < 0)
751                 return ret;
752         BUG_ON(ret);
753
754         ret = btrfs_extend_item(trans, root, path, new_size);
755         BUG_ON(ret);
756
757         leaf = path->nodes[0];
758         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
759         btrfs_set_extent_refs(leaf, item, refs);
760         /* FIXME: get real generation */
761         btrfs_set_extent_generation(leaf, item, 0);
762         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
763                 btrfs_set_extent_flags(leaf, item,
764                                        BTRFS_EXTENT_FLAG_TREE_BLOCK |
765                                        BTRFS_BLOCK_FLAG_FULL_BACKREF);
766                 bi = (struct btrfs_tree_block_info *)(item + 1);
767                 /* FIXME: get first key of the block */
768                 memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi));
769                 btrfs_set_tree_block_level(leaf, bi, (int)owner);
770         } else {
771                 btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
772         }
773         btrfs_mark_buffer_dirty(leaf);
774         return 0;
775 }
776 #endif
777
778 static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
779 {
780         u32 high_crc = ~(u32)0;
781         u32 low_crc = ~(u32)0;
782         __le64 lenum;
783
784         lenum = cpu_to_le64(root_objectid);
785         high_crc = crc32c(high_crc, &lenum, sizeof(lenum));
786         lenum = cpu_to_le64(owner);
787         low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
788         lenum = cpu_to_le64(offset);
789         low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
790
791         return ((u64)high_crc << 31) ^ (u64)low_crc;
792 }
793
794 static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
795                                      struct btrfs_extent_data_ref *ref)
796 {
797         return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
798                                     btrfs_extent_data_ref_objectid(leaf, ref),
799                                     btrfs_extent_data_ref_offset(leaf, ref));
800 }
801
802 static int match_extent_data_ref(struct extent_buffer *leaf,
803                                  struct btrfs_extent_data_ref *ref,
804                                  u64 root_objectid, u64 owner, u64 offset)
805 {
806         if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
807             btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
808             btrfs_extent_data_ref_offset(leaf, ref) != offset)
809                 return 0;
810         return 1;
811 }
812
813 static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
814                                            struct btrfs_root *root,
815                                            struct btrfs_path *path,
816                                            u64 bytenr, u64 parent,
817                                            u64 root_objectid,
818                                            u64 owner, u64 offset)
819 {
820         struct btrfs_key key;
821         struct btrfs_extent_data_ref *ref;
822         struct extent_buffer *leaf;
823         u32 nritems;
824         int ret;
825         int recow;
826         int err = -ENOENT;
827
828         key.objectid = bytenr;
829         if (parent) {
830                 key.type = BTRFS_SHARED_DATA_REF_KEY;
831                 key.offset = parent;
832         } else {
833                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
834                 key.offset = hash_extent_data_ref(root_objectid,
835                                                   owner, offset);
836         }
837 again:
838         recow = 0;
839         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
840         if (ret < 0) {
841                 err = ret;
842                 goto fail;
843         }
844
845         if (parent) {
846                 if (!ret)
847                         return 0;
848 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
849                 key.type = BTRFS_EXTENT_REF_V0_KEY;
850                 btrfs_release_path(root, path);
851                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
852                 if (ret < 0) {
853                         err = ret;
854                         goto fail;
855                 }
856                 if (!ret)
857                         return 0;
858 #endif
859                 goto fail;
860         }
861
862         leaf = path->nodes[0];
863         nritems = btrfs_header_nritems(leaf);
864         while (1) {
865                 if (path->slots[0] >= nritems) {
866                         ret = btrfs_next_leaf(root, path);
867                         if (ret < 0)
868                                 err = ret;
869                         if (ret)
870                                 goto fail;
871
872                         leaf = path->nodes[0];
873                         nritems = btrfs_header_nritems(leaf);
874                         recow = 1;
875                 }
876
877                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
878                 if (key.objectid != bytenr ||
879                     key.type != BTRFS_EXTENT_DATA_REF_KEY)
880                         goto fail;
881
882                 ref = btrfs_item_ptr(leaf, path->slots[0],
883                                      struct btrfs_extent_data_ref);
884
885                 if (match_extent_data_ref(leaf, ref, root_objectid,
886                                           owner, offset)) {
887                         if (recow) {
888                                 btrfs_release_path(root, path);
889                                 goto again;
890                         }
891                         err = 0;
892                         break;
893                 }
894                 path->slots[0]++;
895         }
896 fail:
897         return err;
898 }
899
900 static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
901                                            struct btrfs_root *root,
902                                            struct btrfs_path *path,
903                                            u64 bytenr, u64 parent,
904                                            u64 root_objectid, u64 owner,
905                                            u64 offset, int refs_to_add)
906 {
907         struct btrfs_key key;
908         struct extent_buffer *leaf;
909         u32 size;
910         u32 num_refs;
911         int ret;
912
913         key.objectid = bytenr;
914         if (parent) {
915                 key.type = BTRFS_SHARED_DATA_REF_KEY;
916                 key.offset = parent;
917                 size = sizeof(struct btrfs_shared_data_ref);
918         } else {
919                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
920                 key.offset = hash_extent_data_ref(root_objectid,
921                                                   owner, offset);
922                 size = sizeof(struct btrfs_extent_data_ref);
923         }
924
925         ret = btrfs_insert_empty_item(trans, root, path, &key, size);
926         if (ret && ret != -EEXIST)
927                 goto fail;
928
929         leaf = path->nodes[0];
930         if (parent) {
931                 struct btrfs_shared_data_ref *ref;
932                 ref = btrfs_item_ptr(leaf, path->slots[0],
933                                      struct btrfs_shared_data_ref);
934                 if (ret == 0) {
935                         btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
936                 } else {
937                         num_refs = btrfs_shared_data_ref_count(leaf, ref);
938                         num_refs += refs_to_add;
939                         btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
940                 }
941         } else {
942                 struct btrfs_extent_data_ref *ref;
943                 while (ret == -EEXIST) {
944                         ref = btrfs_item_ptr(leaf, path->slots[0],
945                                              struct btrfs_extent_data_ref);
946                         if (match_extent_data_ref(leaf, ref, root_objectid,
947                                                   owner, offset))
948                                 break;
949                         btrfs_release_path(root, path);
950                         key.offset++;
951                         ret = btrfs_insert_empty_item(trans, root, path, &key,
952                                                       size);
953                         if (ret && ret != -EEXIST)
954                                 goto fail;
955
956                         leaf = path->nodes[0];
957                 }
958                 ref = btrfs_item_ptr(leaf, path->slots[0],
959                                      struct btrfs_extent_data_ref);
960                 if (ret == 0) {
961                         btrfs_set_extent_data_ref_root(leaf, ref,
962                                                        root_objectid);
963                         btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
964                         btrfs_set_extent_data_ref_offset(leaf, ref, offset);
965                         btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
966                 } else {
967                         num_refs = btrfs_extent_data_ref_count(leaf, ref);
968                         num_refs += refs_to_add;
969                         btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
970                 }
971         }
972         btrfs_mark_buffer_dirty(leaf);
973         ret = 0;
974 fail:
975         btrfs_release_path(root, path);
976         return ret;
977 }
978
979 static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
980                                            struct btrfs_root *root,
981                                            struct btrfs_path *path,
982                                            int refs_to_drop)
983 {
984         struct btrfs_key key;
985         struct btrfs_extent_data_ref *ref1 = NULL;
986         struct btrfs_shared_data_ref *ref2 = NULL;
987         struct extent_buffer *leaf;
988         u32 num_refs = 0;
989         int ret = 0;
990
991         leaf = path->nodes[0];
992         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
993
994         if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
995                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
996                                       struct btrfs_extent_data_ref);
997                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
998         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
999                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1000                                       struct btrfs_shared_data_ref);
1001                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1002 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1003         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1004                 struct btrfs_extent_ref_v0 *ref0;
1005                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1006                                       struct btrfs_extent_ref_v0);
1007                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1008 #endif
1009         } else {
1010                 BUG();
1011         }
1012
1013         BUG_ON(num_refs < refs_to_drop);
1014         num_refs -= refs_to_drop;
1015
1016         if (num_refs == 0) {
1017                 ret = btrfs_del_item(trans, root, path);
1018         } else {
1019                 if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
1020                         btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
1021                 else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
1022                         btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
1023 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1024                 else {
1025                         struct btrfs_extent_ref_v0 *ref0;
1026                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
1027                                         struct btrfs_extent_ref_v0);
1028                         btrfs_set_ref_count_v0(leaf, ref0, num_refs);
1029                 }
1030 #endif
1031                 btrfs_mark_buffer_dirty(leaf);
1032         }
1033         return ret;
1034 }
1035
1036 static noinline u32 extent_data_ref_count(struct btrfs_root *root,
1037                                           struct btrfs_path *path,
1038                                           struct btrfs_extent_inline_ref *iref)
1039 {
1040         struct btrfs_key key;
1041         struct extent_buffer *leaf;
1042         struct btrfs_extent_data_ref *ref1;
1043         struct btrfs_shared_data_ref *ref2;
1044         u32 num_refs = 0;
1045
1046         leaf = path->nodes[0];
1047         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1048         if (iref) {
1049                 if (btrfs_extent_inline_ref_type(leaf, iref) ==
1050                     BTRFS_EXTENT_DATA_REF_KEY) {
1051                         ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
1052                         num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1053                 } else {
1054                         ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
1055                         num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1056                 }
1057         } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1058                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1059                                       struct btrfs_extent_data_ref);
1060                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1061         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1062                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1063                                       struct btrfs_shared_data_ref);
1064                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1065 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1066         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1067                 struct btrfs_extent_ref_v0 *ref0;
1068                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1069                                       struct btrfs_extent_ref_v0);
1070                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1071 #endif
1072         } else {
1073                 WARN_ON(1);
1074         }
1075         return num_refs;
1076 }
1077
1078 static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
1079                                           struct btrfs_root *root,
1080                                           struct btrfs_path *path,
1081                                           u64 bytenr, u64 parent,
1082                                           u64 root_objectid)
1083 {
1084         struct btrfs_key key;
1085         int ret;
1086
1087         key.objectid = bytenr;
1088         if (parent) {
1089                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1090                 key.offset = parent;
1091         } else {
1092                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1093                 key.offset = root_objectid;
1094         }
1095
1096         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1097         if (ret > 0)
1098                 ret = -ENOENT;
1099 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1100         if (ret == -ENOENT && parent) {
1101                 btrfs_release_path(root, path);
1102                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1103                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1104                 if (ret > 0)
1105                         ret = -ENOENT;
1106         }
1107 #endif
1108         return ret;
1109 }
1110
1111 static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
1112                                           struct btrfs_root *root,
1113                                           struct btrfs_path *path,
1114                                           u64 bytenr, u64 parent,
1115                                           u64 root_objectid)
1116 {
1117         struct btrfs_key key;
1118         int ret;
1119
1120         key.objectid = bytenr;
1121         if (parent) {
1122                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1123                 key.offset = parent;
1124         } else {
1125                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1126                 key.offset = root_objectid;
1127         }
1128
1129         ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1130         btrfs_release_path(root, path);
1131         return ret;
1132 }
1133
1134 static inline int extent_ref_type(u64 parent, u64 owner)
1135 {
1136         int type;
1137         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1138                 if (parent > 0)
1139                         type = BTRFS_SHARED_BLOCK_REF_KEY;
1140                 else
1141                         type = BTRFS_TREE_BLOCK_REF_KEY;
1142         } else {
1143                 if (parent > 0)
1144                         type = BTRFS_SHARED_DATA_REF_KEY;
1145                 else
1146                         type = BTRFS_EXTENT_DATA_REF_KEY;
1147         }
1148         return type;
1149 }
1150
1151 static int find_next_key(struct btrfs_path *path, int level,
1152                          struct btrfs_key *key)
1153
1154 {
1155         for (; level < BTRFS_MAX_LEVEL; level++) {
1156                 if (!path->nodes[level])
1157                         break;
1158                 if (path->slots[level] + 1 >=
1159                     btrfs_header_nritems(path->nodes[level]))
1160                         continue;
1161                 if (level == 0)
1162                         btrfs_item_key_to_cpu(path->nodes[level], key,
1163                                               path->slots[level] + 1);
1164                 else
1165                         btrfs_node_key_to_cpu(path->nodes[level], key,
1166                                               path->slots[level] + 1);
1167                 return 0;
1168         }
1169         return 1;
1170 }
1171
1172 /*
1173  * look for inline back ref. if back ref is found, *ref_ret is set
1174  * to the address of inline back ref, and 0 is returned.
1175  *
1176  * if back ref isn't found, *ref_ret is set to the address where it
1177  * should be inserted, and -ENOENT is returned.
1178  *
1179  * if insert is true and there are too many inline back refs, the path
1180  * points to the extent item, and -EAGAIN is returned.
1181  *
1182  * NOTE: inline back refs are ordered in the same way that back ref
1183  *       items in the tree are ordered.
1184  */
1185 static noinline_for_stack
1186 int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
1187                                  struct btrfs_root *root,
1188                                  struct btrfs_path *path,
1189                                  struct btrfs_extent_inline_ref **ref_ret,
1190                                  u64 bytenr, u64 num_bytes,
1191                                  u64 parent, u64 root_objectid,
1192                                  u64 owner, u64 offset, int insert)
1193 {
1194         struct btrfs_key key;
1195         struct extent_buffer *leaf;
1196         struct btrfs_extent_item *ei;
1197         struct btrfs_extent_inline_ref *iref;
1198         u64 flags;
1199         u64 item_size;
1200         unsigned long ptr;
1201         unsigned long end;
1202         int extra_size;
1203         int type;
1204         int want;
1205         int ret;
1206         int err = 0;
1207
1208         key.objectid = bytenr;
1209         key.type = BTRFS_EXTENT_ITEM_KEY;
1210         key.offset = num_bytes;
1211
1212         want = extent_ref_type(parent, owner);
1213         if (insert) {
1214                 extra_size = btrfs_extent_inline_ref_size(want);
1215                 path->keep_locks = 1;
1216         } else
1217                 extra_size = -1;
1218         ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
1219         if (ret < 0) {
1220                 err = ret;
1221                 goto out;
1222         }
1223         BUG_ON(ret);
1224
1225         leaf = path->nodes[0];
1226         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1227 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1228         if (item_size < sizeof(*ei)) {
1229                 if (!insert) {
1230                         err = -ENOENT;
1231                         goto out;
1232                 }
1233                 ret = convert_extent_item_v0(trans, root, path, owner,
1234                                              extra_size);
1235                 if (ret < 0) {
1236                         err = ret;
1237                         goto out;
1238                 }
1239                 leaf = path->nodes[0];
1240                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1241         }
1242 #endif
1243         BUG_ON(item_size < sizeof(*ei));
1244
1245         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1246         flags = btrfs_extent_flags(leaf, ei);
1247
1248         ptr = (unsigned long)(ei + 1);
1249         end = (unsigned long)ei + item_size;
1250
1251         if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1252                 ptr += sizeof(struct btrfs_tree_block_info);
1253                 BUG_ON(ptr > end);
1254         } else {
1255                 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
1256         }
1257
1258         err = -ENOENT;
1259         while (1) {
1260                 if (ptr >= end) {
1261                         WARN_ON(ptr > end);
1262                         break;
1263                 }
1264                 iref = (struct btrfs_extent_inline_ref *)ptr;
1265                 type = btrfs_extent_inline_ref_type(leaf, iref);
1266                 if (want < type)
1267                         break;
1268                 if (want > type) {
1269                         ptr += btrfs_extent_inline_ref_size(type);
1270                         continue;
1271                 }
1272
1273                 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1274                         struct btrfs_extent_data_ref *dref;
1275                         dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1276                         if (match_extent_data_ref(leaf, dref, root_objectid,
1277                                                   owner, offset)) {
1278                                 err = 0;
1279                                 break;
1280                         }
1281                         if (hash_extent_data_ref_item(leaf, dref) <
1282                             hash_extent_data_ref(root_objectid, owner, offset))
1283                                 break;
1284                 } else {
1285                         u64 ref_offset;
1286                         ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
1287                         if (parent > 0) {
1288                                 if (parent == ref_offset) {
1289                                         err = 0;
1290                                         break;
1291                                 }
1292                                 if (ref_offset < parent)
1293                                         break;
1294                         } else {
1295                                 if (root_objectid == ref_offset) {
1296                                         err = 0;
1297                                         break;
1298                                 }
1299                                 if (ref_offset < root_objectid)
1300                                         break;
1301                         }
1302                 }
1303                 ptr += btrfs_extent_inline_ref_size(type);
1304         }
1305         if (err == -ENOENT && insert) {
1306                 if (item_size + extra_size >=
1307                     BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
1308                         err = -EAGAIN;
1309                         goto out;
1310                 }
1311                 /*
1312                  * To add new inline back ref, we have to make sure
1313                  * there is no corresponding back ref item.
1314                  * For simplicity, we just do not add new inline back
1315                  * ref if there is any kind of item for this block
1316                  */
1317                 if (find_next_key(path, 0, &key) == 0 &&
1318                     key.objectid == bytenr &&
1319                     key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
1320                         err = -EAGAIN;
1321                         goto out;
1322                 }
1323         }
1324         *ref_ret = (struct btrfs_extent_inline_ref *)ptr;
1325 out:
1326         if (insert) {
1327                 path->keep_locks = 0;
1328                 btrfs_unlock_up_safe(path, 1);
1329         }
1330         return err;
1331 }
1332
1333 /*
1334  * helper to add new inline back ref
1335  */
1336 static noinline_for_stack
1337 int setup_inline_extent_backref(struct btrfs_trans_handle *trans,
1338                                 struct btrfs_root *root,
1339                                 struct btrfs_path *path,
1340                                 struct btrfs_extent_inline_ref *iref,
1341                                 u64 parent, u64 root_objectid,
1342                                 u64 owner, u64 offset, int refs_to_add,
1343                                 struct btrfs_delayed_extent_op *extent_op)
1344 {
1345         struct extent_buffer *leaf;
1346         struct btrfs_extent_item *ei;
1347         unsigned long ptr;
1348         unsigned long end;
1349         unsigned long item_offset;
1350         u64 refs;
1351         int size;
1352         int type;
1353         int ret;
1354
1355         leaf = path->nodes[0];
1356         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1357         item_offset = (unsigned long)iref - (unsigned long)ei;
1358
1359         type = extent_ref_type(parent, owner);
1360         size = btrfs_extent_inline_ref_size(type);
1361
1362         ret = btrfs_extend_item(trans, root, path, size);
1363         BUG_ON(ret);
1364
1365         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1366         refs = btrfs_extent_refs(leaf, ei);
1367         refs += refs_to_add;
1368         btrfs_set_extent_refs(leaf, ei, refs);
1369         if (extent_op)
1370                 __run_delayed_extent_op(extent_op, leaf, ei);
1371
1372         ptr = (unsigned long)ei + item_offset;
1373         end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
1374         if (ptr < end - size)
1375                 memmove_extent_buffer(leaf, ptr + size, ptr,
1376                                       end - size - ptr);
1377
1378         iref = (struct btrfs_extent_inline_ref *)ptr;
1379         btrfs_set_extent_inline_ref_type(leaf, iref, type);
1380         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1381                 struct btrfs_extent_data_ref *dref;
1382                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1383                 btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
1384                 btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
1385                 btrfs_set_extent_data_ref_offset(leaf, dref, offset);
1386                 btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
1387         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1388                 struct btrfs_shared_data_ref *sref;
1389                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1390                 btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
1391                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1392         } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
1393                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1394         } else {
1395                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
1396         }
1397         btrfs_mark_buffer_dirty(leaf);
1398         return 0;
1399 }
1400
1401 static int lookup_extent_backref(struct btrfs_trans_handle *trans,
1402                                  struct btrfs_root *root,
1403                                  struct btrfs_path *path,
1404                                  struct btrfs_extent_inline_ref **ref_ret,
1405                                  u64 bytenr, u64 num_bytes, u64 parent,
1406                                  u64 root_objectid, u64 owner, u64 offset)
1407 {
1408         int ret;
1409
1410         ret = lookup_inline_extent_backref(trans, root, path, ref_ret,
1411                                            bytenr, num_bytes, parent,
1412                                            root_objectid, owner, offset, 0);
1413         if (ret != -ENOENT)
1414                 return ret;
1415
1416         btrfs_release_path(root, path);
1417         *ref_ret = NULL;
1418
1419         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1420                 ret = lookup_tree_block_ref(trans, root, path, bytenr, parent,
1421                                             root_objectid);
1422         } else {
1423                 ret = lookup_extent_data_ref(trans, root, path, bytenr, parent,
1424                                              root_objectid, owner, offset);
1425         }
1426         return ret;
1427 }
1428
1429 /*
1430  * helper to update/remove inline back ref
1431  */
1432 static noinline_for_stack
1433 int update_inline_extent_backref(struct btrfs_trans_handle *trans,
1434                                  struct btrfs_root *root,
1435                                  struct btrfs_path *path,
1436                                  struct btrfs_extent_inline_ref *iref,
1437                                  int refs_to_mod,
1438                                  struct btrfs_delayed_extent_op *extent_op)
1439 {
1440         struct extent_buffer *leaf;
1441         struct btrfs_extent_item *ei;
1442         struct btrfs_extent_data_ref *dref = NULL;
1443         struct btrfs_shared_data_ref *sref = NULL;
1444         unsigned long ptr;
1445         unsigned long end;
1446         u32 item_size;
1447         int size;
1448         int type;
1449         int ret;
1450         u64 refs;
1451
1452         leaf = path->nodes[0];
1453         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1454         refs = btrfs_extent_refs(leaf, ei);
1455         WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
1456         refs += refs_to_mod;
1457         btrfs_set_extent_refs(leaf, ei, refs);
1458         if (extent_op)
1459                 __run_delayed_extent_op(extent_op, leaf, ei);
1460
1461         type = btrfs_extent_inline_ref_type(leaf, iref);
1462
1463         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1464                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1465                 refs = btrfs_extent_data_ref_count(leaf, dref);
1466         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1467                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1468                 refs = btrfs_shared_data_ref_count(leaf, sref);
1469         } else {
1470                 refs = 1;
1471                 BUG_ON(refs_to_mod != -1);
1472         }
1473
1474         BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
1475         refs += refs_to_mod;
1476
1477         if (refs > 0) {
1478                 if (type == BTRFS_EXTENT_DATA_REF_KEY)
1479                         btrfs_set_extent_data_ref_count(leaf, dref, refs);
1480                 else
1481                         btrfs_set_shared_data_ref_count(leaf, sref, refs);
1482         } else {
1483                 size =  btrfs_extent_inline_ref_size(type);
1484                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1485                 ptr = (unsigned long)iref;
1486                 end = (unsigned long)ei + item_size;
1487                 if (ptr + size < end)
1488                         memmove_extent_buffer(leaf, ptr, ptr + size,
1489                                               end - ptr - size);
1490                 item_size -= size;
1491                 ret = btrfs_truncate_item(trans, root, path, item_size, 1);
1492                 BUG_ON(ret);
1493         }
1494         btrfs_mark_buffer_dirty(leaf);
1495         return 0;
1496 }
1497
1498 static noinline_for_stack
1499 int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
1500                                  struct btrfs_root *root,
1501                                  struct btrfs_path *path,
1502                                  u64 bytenr, u64 num_bytes, u64 parent,
1503                                  u64 root_objectid, u64 owner,
1504                                  u64 offset, int refs_to_add,
1505                                  struct btrfs_delayed_extent_op *extent_op)
1506 {
1507         struct btrfs_extent_inline_ref *iref;
1508         int ret;
1509
1510         ret = lookup_inline_extent_backref(trans, root, path, &iref,
1511                                            bytenr, num_bytes, parent,
1512                                            root_objectid, owner, offset, 1);
1513         if (ret == 0) {
1514                 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
1515                 ret = update_inline_extent_backref(trans, root, path, iref,
1516                                                    refs_to_add, extent_op);
1517         } else if (ret == -ENOENT) {
1518                 ret = setup_inline_extent_backref(trans, root, path, iref,
1519                                                   parent, root_objectid,
1520                                                   owner, offset, refs_to_add,
1521                                                   extent_op);
1522         }
1523         return ret;
1524 }
1525
1526 static int insert_extent_backref(struct btrfs_trans_handle *trans,
1527                                  struct btrfs_root *root,
1528                                  struct btrfs_path *path,
1529                                  u64 bytenr, u64 parent, u64 root_objectid,
1530                                  u64 owner, u64 offset, int refs_to_add)
1531 {
1532         int ret;
1533         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1534                 BUG_ON(refs_to_add != 1);
1535                 ret = insert_tree_block_ref(trans, root, path, bytenr,
1536                                             parent, root_objectid);
1537         } else {
1538                 ret = insert_extent_data_ref(trans, root, path, bytenr,
1539                                              parent, root_objectid,
1540                                              owner, offset, refs_to_add);
1541         }
1542         return ret;
1543 }
1544
1545 static int remove_extent_backref(struct btrfs_trans_handle *trans,
1546                                  struct btrfs_root *root,
1547                                  struct btrfs_path *path,
1548                                  struct btrfs_extent_inline_ref *iref,
1549                                  int refs_to_drop, int is_data)
1550 {
1551         int ret;
1552
1553         BUG_ON(!is_data && refs_to_drop != 1);
1554         if (iref) {
1555                 ret = update_inline_extent_backref(trans, root, path, iref,
1556                                                    -refs_to_drop, NULL);
1557         } else if (is_data) {
1558                 ret = remove_extent_data_ref(trans, root, path, refs_to_drop);
1559         } else {
1560                 ret = btrfs_del_item(trans, root, path);
1561         }
1562         return ret;
1563 }
1564
1565 #ifdef BIO_RW_DISCARD
1566 static void btrfs_issue_discard(struct block_device *bdev,
1567                                 u64 start, u64 len)
1568 {
1569         blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_KERNEL);
1570 }
1571 #endif
1572
1573 static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
1574                                 u64 num_bytes)
1575 {
1576 #ifdef BIO_RW_DISCARD
1577         int ret;
1578         u64 map_length = num_bytes;
1579         struct btrfs_multi_bio *multi = NULL;
1580
1581         /* Tell the block device(s) that the sectors can be discarded */
1582         ret = btrfs_map_block(&root->fs_info->mapping_tree, READ,
1583                               bytenr, &map_length, &multi, 0);
1584         if (!ret) {
1585                 struct btrfs_bio_stripe *stripe = multi->stripes;
1586                 int i;
1587
1588                 if (map_length > num_bytes)
1589                         map_length = num_bytes;
1590
1591                 for (i = 0; i < multi->num_stripes; i++, stripe++) {
1592                         btrfs_issue_discard(stripe->dev->bdev,
1593                                             stripe->physical,
1594                                             map_length);
1595                 }
1596                 kfree(multi);
1597         }
1598
1599         return ret;
1600 #else
1601         return 0;
1602 #endif
1603 }
1604
1605 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1606                          struct btrfs_root *root,
1607                          u64 bytenr, u64 num_bytes, u64 parent,
1608                          u64 root_objectid, u64 owner, u64 offset)
1609 {
1610         int ret;
1611         BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
1612                root_objectid == BTRFS_TREE_LOG_OBJECTID);
1613
1614         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1615                 ret = btrfs_add_delayed_tree_ref(trans, bytenr, num_bytes,
1616                                         parent, root_objectid, (int)owner,
1617                                         BTRFS_ADD_DELAYED_REF, NULL);
1618         } else {
1619                 ret = btrfs_add_delayed_data_ref(trans, bytenr, num_bytes,
1620                                         parent, root_objectid, owner, offset,
1621                                         BTRFS_ADD_DELAYED_REF, NULL);
1622         }
1623         return ret;
1624 }
1625
1626 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1627                                   struct btrfs_root *root,
1628                                   u64 bytenr, u64 num_bytes,
1629                                   u64 parent, u64 root_objectid,
1630                                   u64 owner, u64 offset, int refs_to_add,
1631                                   struct btrfs_delayed_extent_op *extent_op)
1632 {
1633         struct btrfs_path *path;
1634         struct extent_buffer *leaf;
1635         struct btrfs_extent_item *item;
1636         u64 refs;
1637         int ret;
1638         int err = 0;
1639
1640         path = btrfs_alloc_path();
1641         if (!path)
1642                 return -ENOMEM;
1643
1644         path->reada = 1;
1645         path->leave_spinning = 1;
1646         /* this will setup the path even if it fails to insert the back ref */
1647         ret = insert_inline_extent_backref(trans, root->fs_info->extent_root,
1648                                            path, bytenr, num_bytes, parent,
1649                                            root_objectid, owner, offset,
1650                                            refs_to_add, extent_op);
1651         if (ret == 0)
1652                 goto out;
1653
1654         if (ret != -EAGAIN) {
1655                 err = ret;
1656                 goto out;
1657         }
1658
1659         leaf = path->nodes[0];
1660         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1661         refs = btrfs_extent_refs(leaf, item);
1662         btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
1663         if (extent_op)
1664                 __run_delayed_extent_op(extent_op, leaf, item);
1665
1666         btrfs_mark_buffer_dirty(leaf);
1667         btrfs_release_path(root->fs_info->extent_root, path);
1668
1669         path->reada = 1;
1670         path->leave_spinning = 1;
1671
1672         /* now insert the actual backref */
1673         ret = insert_extent_backref(trans, root->fs_info->extent_root,
1674                                     path, bytenr, parent, root_objectid,
1675                                     owner, offset, refs_to_add);
1676         BUG_ON(ret);
1677 out:
1678         btrfs_free_path(path);
1679         return err;
1680 }
1681
1682 static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
1683                                 struct btrfs_root *root,
1684                                 struct btrfs_delayed_ref_node *node,
1685                                 struct btrfs_delayed_extent_op *extent_op,
1686                                 int insert_reserved)
1687 {
1688         int ret = 0;
1689         struct btrfs_delayed_data_ref *ref;
1690         struct btrfs_key ins;
1691         u64 parent = 0;
1692         u64 ref_root = 0;
1693         u64 flags = 0;
1694
1695         ins.objectid = node->bytenr;
1696         ins.offset = node->num_bytes;
1697         ins.type = BTRFS_EXTENT_ITEM_KEY;
1698
1699         ref = btrfs_delayed_node_to_data_ref(node);
1700         if (node->type == BTRFS_SHARED_DATA_REF_KEY)
1701                 parent = ref->parent;
1702         else
1703                 ref_root = ref->root;
1704
1705         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
1706                 if (extent_op) {
1707                         BUG_ON(extent_op->update_key);
1708                         flags |= extent_op->flags_to_set;
1709                 }
1710                 ret = alloc_reserved_file_extent(trans, root,
1711                                                  parent, ref_root, flags,
1712                                                  ref->objectid, ref->offset,
1713                                                  &ins, node->ref_mod);
1714         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
1715                 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
1716                                              node->num_bytes, parent,
1717                                              ref_root, ref->objectid,
1718                                              ref->offset, node->ref_mod,
1719                                              extent_op);
1720         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
1721                 ret = __btrfs_free_extent(trans, root, node->bytenr,
1722                                           node->num_bytes, parent,
1723                                           ref_root, ref->objectid,
1724                                           ref->offset, node->ref_mod,
1725                                           extent_op);
1726         } else {
1727                 BUG();
1728         }
1729         return ret;
1730 }
1731
1732 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
1733                                     struct extent_buffer *leaf,
1734                                     struct btrfs_extent_item *ei)
1735 {
1736         u64 flags = btrfs_extent_flags(leaf, ei);
1737         if (extent_op->update_flags) {
1738                 flags |= extent_op->flags_to_set;
1739                 btrfs_set_extent_flags(leaf, ei, flags);
1740         }
1741
1742         if (extent_op->update_key) {
1743                 struct btrfs_tree_block_info *bi;
1744                 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
1745                 bi = (struct btrfs_tree_block_info *)(ei + 1);
1746                 btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
1747         }
1748 }
1749
1750 static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
1751                                  struct btrfs_root *root,
1752                                  struct btrfs_delayed_ref_node *node,
1753                                  struct btrfs_delayed_extent_op *extent_op)
1754 {
1755         struct btrfs_key key;
1756         struct btrfs_path *path;
1757         struct btrfs_extent_item *ei;
1758         struct extent_buffer *leaf;
1759         u32 item_size;
1760         int ret;
1761         int err = 0;
1762
1763         path = btrfs_alloc_path();
1764         if (!path)
1765                 return -ENOMEM;
1766
1767         key.objectid = node->bytenr;
1768         key.type = BTRFS_EXTENT_ITEM_KEY;
1769         key.offset = node->num_bytes;
1770
1771         path->reada = 1;
1772         path->leave_spinning = 1;
1773         ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
1774                                 path, 0, 1);
1775         if (ret < 0) {
1776                 err = ret;
1777                 goto out;
1778         }
1779         if (ret > 0) {
1780                 err = -EIO;
1781                 goto out;
1782         }
1783
1784         leaf = path->nodes[0];
1785         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1786 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1787         if (item_size < sizeof(*ei)) {
1788                 ret = convert_extent_item_v0(trans, root->fs_info->extent_root,
1789                                              path, (u64)-1, 0);
1790                 if (ret < 0) {
1791                         err = ret;
1792                         goto out;
1793                 }
1794                 leaf = path->nodes[0];
1795                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1796         }
1797 #endif
1798         BUG_ON(item_size < sizeof(*ei));
1799         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1800         __run_delayed_extent_op(extent_op, leaf, ei);
1801
1802         btrfs_mark_buffer_dirty(leaf);
1803 out:
1804         btrfs_free_path(path);
1805         return err;
1806 }
1807
1808 static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
1809                                 struct btrfs_root *root,
1810                                 struct btrfs_delayed_ref_node *node,
1811                                 struct btrfs_delayed_extent_op *extent_op,
1812                                 int insert_reserved)
1813 {
1814         int ret = 0;
1815         struct btrfs_delayed_tree_ref *ref;
1816         struct btrfs_key ins;
1817         u64 parent = 0;
1818         u64 ref_root = 0;
1819
1820         ins.objectid = node->bytenr;
1821         ins.offset = node->num_bytes;
1822         ins.type = BTRFS_EXTENT_ITEM_KEY;
1823
1824         ref = btrfs_delayed_node_to_tree_ref(node);
1825         if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
1826                 parent = ref->parent;
1827         else
1828                 ref_root = ref->root;
1829
1830         BUG_ON(node->ref_mod != 1);
1831         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
1832                 BUG_ON(!extent_op || !extent_op->update_flags ||
1833                        !extent_op->update_key);
1834                 ret = alloc_reserved_tree_block(trans, root,
1835                                                 parent, ref_root,
1836                                                 extent_op->flags_to_set,
1837                                                 &extent_op->key,
1838                                                 ref->level, &ins);
1839         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
1840                 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
1841                                              node->num_bytes, parent, ref_root,
1842                                              ref->level, 0, 1, extent_op);
1843         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
1844                 ret = __btrfs_free_extent(trans, root, node->bytenr,
1845                                           node->num_bytes, parent, ref_root,
1846                                           ref->level, 0, 1, extent_op);
1847         } else {
1848                 BUG();
1849         }
1850         return ret;
1851 }
1852
1853
1854 /* helper function to actually process a single delayed ref entry */
1855 static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
1856                                struct btrfs_root *root,
1857                                struct btrfs_delayed_ref_node *node,
1858                                struct btrfs_delayed_extent_op *extent_op,
1859                                int insert_reserved)
1860 {
1861         int ret;
1862         if (btrfs_delayed_ref_is_head(node)) {
1863                 struct btrfs_delayed_ref_head *head;
1864                 /*
1865                  * we've hit the end of the chain and we were supposed
1866                  * to insert this extent into the tree.  But, it got
1867                  * deleted before we ever needed to insert it, so all
1868                  * we have to do is clean up the accounting
1869                  */
1870                 BUG_ON(extent_op);
1871                 head = btrfs_delayed_node_to_head(node);
1872                 if (insert_reserved) {
1873                         int mark_free = 0;
1874                         struct extent_buffer *must_clean = NULL;
1875
1876                         ret = pin_down_bytes(trans, root, NULL,
1877                                              node->bytenr, node->num_bytes,
1878                                              head->is_data, 1, &must_clean);
1879                         if (ret > 0)
1880                                 mark_free = 1;
1881
1882                         if (must_clean) {
1883                                 clean_tree_block(NULL, root, must_clean);
1884                                 btrfs_tree_unlock(must_clean);
1885                                 free_extent_buffer(must_clean);
1886                         }
1887                         if (head->is_data) {
1888                                 ret = btrfs_del_csums(trans, root,
1889                                                       node->bytenr,
1890                                                       node->num_bytes);
1891                                 BUG_ON(ret);
1892                         }
1893                         if (mark_free) {
1894                                 ret = btrfs_free_reserved_extent(root,
1895                                                         node->bytenr,
1896                                                         node->num_bytes);
1897                                 BUG_ON(ret);
1898                         }
1899                 }
1900                 mutex_unlock(&head->mutex);
1901                 return 0;
1902         }
1903
1904         if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
1905             node->type == BTRFS_SHARED_BLOCK_REF_KEY)
1906                 ret = run_delayed_tree_ref(trans, root, node, extent_op,
1907                                            insert_reserved);
1908         else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
1909                  node->type == BTRFS_SHARED_DATA_REF_KEY)
1910                 ret = run_delayed_data_ref(trans, root, node, extent_op,
1911                                            insert_reserved);
1912         else
1913                 BUG();
1914         return ret;
1915 }
1916
1917 static noinline struct btrfs_delayed_ref_node *
1918 select_delayed_ref(struct btrfs_delayed_ref_head *head)
1919 {
1920         struct rb_node *node;
1921         struct btrfs_delayed_ref_node *ref;
1922         int action = BTRFS_ADD_DELAYED_REF;
1923 again:
1924         /*
1925          * select delayed ref of type BTRFS_ADD_DELAYED_REF first.
1926          * this prevents ref count from going down to zero when
1927          * there still are pending delayed ref.
1928          */
1929         node = rb_prev(&head->node.rb_node);
1930         while (1) {
1931                 if (!node)
1932                         break;
1933                 ref = rb_entry(node, struct btrfs_delayed_ref_node,
1934                                 rb_node);
1935                 if (ref->bytenr != head->node.bytenr)
1936                         break;
1937                 if (ref->action == action)
1938                         return ref;
1939                 node = rb_prev(node);
1940         }
1941         if (action == BTRFS_ADD_DELAYED_REF) {
1942                 action = BTRFS_DROP_DELAYED_REF;
1943                 goto again;
1944         }
1945         return NULL;
1946 }
1947
1948 static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
1949                                        struct btrfs_root *root,
1950                                        struct list_head *cluster)
1951 {
1952         struct btrfs_delayed_ref_root *delayed_refs;
1953         struct btrfs_delayed_ref_node *ref;
1954         struct btrfs_delayed_ref_head *locked_ref = NULL;
1955         struct btrfs_delayed_extent_op *extent_op;
1956         int ret;
1957         int count = 0;
1958         int must_insert_reserved = 0;
1959
1960         delayed_refs = &trans->transaction->delayed_refs;
1961         while (1) {
1962                 if (!locked_ref) {
1963                         /* pick a new head ref from the cluster list */
1964                         if (list_empty(cluster))
1965                                 break;
1966
1967                         locked_ref = list_entry(cluster->next,
1968                                      struct btrfs_delayed_ref_head, cluster);
1969
1970                         /* grab the lock that says we are going to process
1971                          * all the refs for this head */
1972                         ret = btrfs_delayed_ref_lock(trans, locked_ref);
1973
1974                         /*
1975                          * we may have dropped the spin lock to get the head
1976                          * mutex lock, and that might have given someone else
1977                          * time to free the head.  If that's true, it has been
1978                          * removed from our list and we can move on.
1979                          */
1980                         if (ret == -EAGAIN) {
1981                                 locked_ref = NULL;
1982                                 count++;
1983                                 continue;
1984                         }
1985                 }
1986
1987                 /*
1988                  * record the must insert reserved flag before we
1989                  * drop the spin lock.
1990                  */
1991                 must_insert_reserved = locked_ref->must_insert_reserved;
1992                 locked_ref->must_insert_reserved = 0;
1993
1994                 extent_op = locked_ref->extent_op;
1995                 locked_ref->extent_op = NULL;
1996
1997                 /*
1998                  * locked_ref is the head node, so we have to go one
1999                  * node back for any delayed ref updates
2000                  */
2001                 ref = select_delayed_ref(locked_ref);
2002                 if (!ref) {
2003                         /* All delayed refs have been processed, Go ahead
2004                          * and send the head node to run_one_delayed_ref,
2005                          * so that any accounting fixes can happen
2006                          */
2007                         ref = &locked_ref->node;
2008
2009                         if (extent_op && must_insert_reserved) {
2010                                 kfree(extent_op);
2011                                 extent_op = NULL;
2012                         }
2013
2014                         if (extent_op) {
2015                                 spin_unlock(&delayed_refs->lock);
2016
2017                                 ret = run_delayed_extent_op(trans, root,
2018                                                             ref, extent_op);
2019                                 BUG_ON(ret);
2020                                 kfree(extent_op);
2021
2022                                 cond_resched();
2023                                 spin_lock(&delayed_refs->lock);
2024                                 continue;
2025                         }
2026
2027                         list_del_init(&locked_ref->cluster);
2028                         locked_ref = NULL;
2029                 }
2030
2031                 ref->in_tree = 0;
2032                 rb_erase(&ref->rb_node, &delayed_refs->root);
2033                 delayed_refs->num_entries--;
2034
2035                 spin_unlock(&delayed_refs->lock);
2036
2037                 ret = run_one_delayed_ref(trans, root, ref, extent_op,
2038                                           must_insert_reserved);
2039                 BUG_ON(ret);
2040
2041                 btrfs_put_delayed_ref(ref);
2042                 kfree(extent_op);
2043                 count++;
2044
2045                 cond_resched();
2046                 spin_lock(&delayed_refs->lock);
2047         }
2048         return count;
2049 }
2050
2051 /*
2052  * this starts processing the delayed reference count updates and
2053  * extent insertions we have queued up so far.  count can be
2054  * 0, which means to process everything in the tree at the start
2055  * of the run (but not newly added entries), or it can be some target
2056  * number you'd like to process.
2057  */
2058 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2059                            struct btrfs_root *root, unsigned long count)
2060 {
2061         struct rb_node *node;
2062         struct btrfs_delayed_ref_root *delayed_refs;
2063         struct btrfs_delayed_ref_node *ref;
2064         struct list_head cluster;
2065         int ret;
2066         int run_all = count == (unsigned long)-1;
2067         int run_most = 0;
2068
2069         if (root == root->fs_info->extent_root)
2070                 root = root->fs_info->tree_root;
2071
2072         delayed_refs = &trans->transaction->delayed_refs;
2073         INIT_LIST_HEAD(&cluster);
2074 again:
2075         spin_lock(&delayed_refs->lock);
2076         if (count == 0) {
2077                 count = delayed_refs->num_entries * 2;
2078                 run_most = 1;
2079         }
2080         while (1) {
2081                 if (!(run_all || run_most) &&
2082                     delayed_refs->num_heads_ready < 64)
2083                         break;
2084
2085                 /*
2086                  * go find something we can process in the rbtree.  We start at
2087                  * the beginning of the tree, and then build a cluster
2088                  * of refs to process starting at the first one we are able to
2089                  * lock
2090                  */
2091                 ret = btrfs_find_ref_cluster(trans, &cluster,
2092                                              delayed_refs->run_delayed_start);
2093                 if (ret)
2094                         break;
2095
2096                 ret = run_clustered_refs(trans, root, &cluster);
2097                 BUG_ON(ret < 0);
2098
2099                 count -= min_t(unsigned long, ret, count);
2100
2101                 if (count == 0)
2102                         break;
2103         }
2104
2105         if (run_all) {
2106                 node = rb_first(&delayed_refs->root);
2107                 if (!node)
2108                         goto out;
2109                 count = (unsigned long)-1;
2110
2111                 while (node) {
2112                         ref = rb_entry(node, struct btrfs_delayed_ref_node,
2113                                        rb_node);
2114                         if (btrfs_delayed_ref_is_head(ref)) {
2115                                 struct btrfs_delayed_ref_head *head;
2116
2117                                 head = btrfs_delayed_node_to_head(ref);
2118                                 atomic_inc(&ref->refs);
2119
2120                                 spin_unlock(&delayed_refs->lock);
2121                                 mutex_lock(&head->mutex);
2122                                 mutex_unlock(&head->mutex);
2123
2124                                 btrfs_put_delayed_ref(ref);
2125                                 cond_resched();
2126                                 goto again;
2127                         }
2128                         node = rb_next(node);
2129                 }
2130                 spin_unlock(&delayed_refs->lock);
2131                 schedule_timeout(1);
2132                 goto again;
2133         }
2134 out:
2135         spin_unlock(&delayed_refs->lock);
2136         return 0;
2137 }
2138
2139 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
2140                                 struct btrfs_root *root,
2141                                 u64 bytenr, u64 num_bytes, u64 flags,
2142                                 int is_data)
2143 {
2144         struct btrfs_delayed_extent_op *extent_op;
2145         int ret;
2146
2147         extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
2148         if (!extent_op)
2149                 return -ENOMEM;
2150
2151         extent_op->flags_to_set = flags;
2152         extent_op->update_flags = 1;
2153         extent_op->update_key = 0;
2154         extent_op->is_data = is_data ? 1 : 0;
2155
2156         ret = btrfs_add_delayed_extent_op(trans, bytenr, num_bytes, extent_op);
2157         if (ret)
2158                 kfree(extent_op);
2159         return ret;
2160 }
2161
2162 static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
2163                                       struct btrfs_root *root,
2164                                       struct btrfs_path *path,
2165                                       u64 objectid, u64 offset, u64 bytenr)
2166 {
2167         struct btrfs_delayed_ref_head *head;
2168         struct btrfs_delayed_ref_node *ref;
2169         struct btrfs_delayed_data_ref *data_ref;
2170         struct btrfs_delayed_ref_root *delayed_refs;
2171         struct rb_node *node;
2172         int ret = 0;
2173
2174         ret = -ENOENT;
2175         delayed_refs = &trans->transaction->delayed_refs;
2176         spin_lock(&delayed_refs->lock);
2177         head = btrfs_find_delayed_ref_head(trans, bytenr);
2178         if (!head)
2179                 goto out;
2180
2181         if (!mutex_trylock(&head->mutex)) {
2182                 atomic_inc(&head->node.refs);
2183                 spin_unlock(&delayed_refs->lock);
2184
2185                 btrfs_release_path(root->fs_info->extent_root, path);
2186
2187                 mutex_lock(&head->mutex);
2188                 mutex_unlock(&head->mutex);
2189                 btrfs_put_delayed_ref(&head->node);
2190                 return -EAGAIN;
2191         }
2192
2193         node = rb_prev(&head->node.rb_node);
2194         if (!node)
2195                 goto out_unlock;
2196
2197         ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2198
2199         if (ref->bytenr != bytenr)
2200                 goto out_unlock;
2201
2202         ret = 1;
2203         if (ref->type != BTRFS_EXTENT_DATA_REF_KEY)
2204                 goto out_unlock;
2205
2206         data_ref = btrfs_delayed_node_to_data_ref(ref);
2207
2208         node = rb_prev(node);
2209         if (node) {
2210                 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2211                 if (ref->bytenr == bytenr)
2212                         goto out_unlock;
2213         }
2214
2215         if (data_ref->root != root->root_key.objectid ||
2216             data_ref->objectid != objectid || data_ref->offset != offset)
2217                 goto out_unlock;
2218
2219         ret = 0;
2220 out_unlock:
2221         mutex_unlock(&head->mutex);
2222 out:
2223         spin_unlock(&delayed_refs->lock);
2224         return ret;
2225 }
2226
2227 static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
2228                                         struct btrfs_root *root,
2229                                         struct btrfs_path *path,
2230                                         u64 objectid, u64 offset, u64 bytenr)
2231 {
2232         struct btrfs_root *extent_root = root->fs_info->extent_root;
2233         struct extent_buffer *leaf;
2234         struct btrfs_extent_data_ref *ref;
2235         struct btrfs_extent_inline_ref *iref;
2236         struct btrfs_extent_item *ei;
2237         struct btrfs_key key;
2238         u32 item_size;
2239         int ret;
2240
2241         key.objectid = bytenr;
2242         key.offset = (u64)-1;
2243         key.type = BTRFS_EXTENT_ITEM_KEY;
2244
2245         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
2246         if (ret < 0)
2247                 goto out;
2248         BUG_ON(ret == 0);
2249
2250         ret = -ENOENT;
2251         if (path->slots[0] == 0)
2252                 goto out;
2253
2254         path->slots[0]--;
2255         leaf = path->nodes[0];
2256         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2257
2258         if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
2259                 goto out;
2260
2261         ret = 1;
2262         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2263 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2264         if (item_size < sizeof(*ei)) {
2265                 WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
2266                 goto out;
2267         }
2268 #endif
2269         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2270
2271         if (item_size != sizeof(*ei) +
2272             btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
2273                 goto out;
2274
2275         if (btrfs_extent_generation(leaf, ei) <=
2276             btrfs_root_last_snapshot(&root->root_item))
2277                 goto out;
2278
2279         iref = (struct btrfs_extent_inline_ref *)(ei + 1);
2280         if (btrfs_extent_inline_ref_type(leaf, iref) !=
2281             BTRFS_EXTENT_DATA_REF_KEY)
2282                 goto out;
2283
2284         ref = (struct btrfs_extent_data_ref *)(&iref->offset);
2285         if (btrfs_extent_refs(leaf, ei) !=
2286             btrfs_extent_data_ref_count(leaf, ref) ||
2287             btrfs_extent_data_ref_root(leaf, ref) !=
2288             root->root_key.objectid ||
2289             btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
2290             btrfs_extent_data_ref_offset(leaf, ref) != offset)
2291                 goto out;
2292
2293         ret = 0;
2294 out:
2295         return ret;
2296 }
2297
2298 int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
2299                           struct btrfs_root *root,
2300                           u64 objectid, u64 offset, u64 bytenr)
2301 {
2302         struct btrfs_path *path;
2303         int ret;
2304         int ret2;
2305
2306         path = btrfs_alloc_path();
2307         if (!path)
2308                 return -ENOENT;
2309
2310         do {
2311                 ret = check_committed_ref(trans, root, path, objectid,
2312                                           offset, bytenr);
2313                 if (ret && ret != -ENOENT)
2314                         goto out;
2315
2316                 ret2 = check_delayed_ref(trans, root, path, objectid,
2317                                          offset, bytenr);
2318         } while (ret2 == -EAGAIN);
2319
2320         if (ret2 && ret2 != -ENOENT) {
2321                 ret = ret2;
2322                 goto out;
2323         }
2324
2325         if (ret != -ENOENT || ret2 != -ENOENT)
2326                 ret = 0;
2327 out:
2328         btrfs_free_path(path);
2329         return ret;
2330 }
2331
2332 #if 0
2333 int btrfs_cache_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2334                     struct extent_buffer *buf, u32 nr_extents)
2335 {
2336         struct btrfs_key key;
2337         struct btrfs_file_extent_item *fi;
2338         u64 root_gen;
2339         u32 nritems;
2340         int i;
2341         int level;
2342         int ret = 0;
2343         int shared = 0;
2344
2345         if (!root->ref_cows)
2346                 return 0;
2347
2348         if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
2349                 shared = 0;
2350                 root_gen = root->root_key.offset;
2351         } else {
2352                 shared = 1;
2353                 root_gen = trans->transid - 1;
2354         }
2355
2356         level = btrfs_header_level(buf);
2357         nritems = btrfs_header_nritems(buf);
2358
2359         if (level == 0) {
2360                 struct btrfs_leaf_ref *ref;
2361                 struct btrfs_extent_info *info;
2362
2363                 ref = btrfs_alloc_leaf_ref(root, nr_extents);
2364                 if (!ref) {
2365                         ret = -ENOMEM;
2366                         goto out;
2367                 }
2368
2369                 ref->root_gen = root_gen;
2370                 ref->bytenr = buf->start;
2371                 ref->owner = btrfs_header_owner(buf);
2372                 ref->generation = btrfs_header_generation(buf);
2373                 ref->nritems = nr_extents;
2374                 info = ref->extents;
2375
2376                 for (i = 0; nr_extents > 0 && i < nritems; i++) {
2377                         u64 disk_bytenr;
2378                         btrfs_item_key_to_cpu(buf, &key, i);
2379                         if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
2380                                 continue;
2381                         fi = btrfs_item_ptr(buf, i,
2382                                             struct btrfs_file_extent_item);
2383                         if (btrfs_file_extent_type(buf, fi) ==
2384                             BTRFS_FILE_EXTENT_INLINE)
2385                                 continue;
2386                         disk_bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
2387                         if (disk_bytenr == 0)
2388                                 continue;
2389
2390                         info->bytenr = disk_bytenr;
2391                         info->num_bytes =
2392                                 btrfs_file_extent_disk_num_bytes(buf, fi);
2393                         info->objectid = key.objectid;
2394                         info->offset = key.offset;
2395                         info++;
2396                 }
2397
2398                 ret = btrfs_add_leaf_ref(root, ref, shared);
2399                 if (ret == -EEXIST && shared) {
2400                         struct btrfs_leaf_ref *old;
2401                         old = btrfs_lookup_leaf_ref(root, ref->bytenr);
2402                         BUG_ON(!old);
2403                         btrfs_remove_leaf_ref(root, old);
2404                         btrfs_free_leaf_ref(root, old);
2405                         ret = btrfs_add_leaf_ref(root, ref, shared);
2406                 }
2407                 WARN_ON(ret);
2408                 btrfs_free_leaf_ref(root, ref);
2409         }
2410 out:
2411         return ret;
2412 }
2413
2414 /* when a block goes through cow, we update the reference counts of
2415  * everything that block points to.  The internal pointers of the block
2416  * can be in just about any order, and it is likely to have clusters of
2417  * things that are close together and clusters of things that are not.
2418  *
2419  * To help reduce the seeks that come with updating all of these reference
2420  * counts, sort them by byte number before actual updates are done.
2421  *
2422  * struct refsort is used to match byte number to slot in the btree block.
2423  * we sort based on the byte number and then use the slot to actually
2424  * find the item.
2425  *
2426  * struct refsort is smaller than strcut btrfs_item and smaller than
2427  * struct btrfs_key_ptr.  Since we're currently limited to the page size
2428  * for a btree block, there's no way for a kmalloc of refsorts for a
2429  * single node to be bigger than a page.
2430  */
2431 struct refsort {
2432         u64 bytenr;
2433         u32 slot;
2434 };
2435
2436 /*
2437  * for passing into sort()
2438  */
2439 static int refsort_cmp(const void *a_void, const void *b_void)
2440 {
2441         const struct refsort *a = a_void;
2442         const struct refsort *b = b_void;
2443
2444         if (a->bytenr < b->bytenr)
2445                 return -1;
2446         if (a->bytenr > b->bytenr)
2447                 return 1;
2448         return 0;
2449 }
2450 #endif
2451
2452 static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
2453                            struct btrfs_root *root,
2454                            struct extent_buffer *buf,
2455                            int full_backref, int inc)
2456 {
2457         u64 bytenr;
2458         u64 num_bytes;
2459         u64 parent;
2460         u64 ref_root;
2461         u32 nritems;
2462         struct btrfs_key key;
2463         struct btrfs_file_extent_item *fi;
2464         int i;
2465         int level;
2466         int ret = 0;
2467         int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
2468                             u64, u64, u64, u64, u64, u64);
2469
2470         ref_root = btrfs_header_owner(buf);
2471         nritems = btrfs_header_nritems(buf);
2472         level = btrfs_header_level(buf);
2473
2474         if (!root->ref_cows && level == 0)
2475                 return 0;
2476
2477         if (inc)
2478                 process_func = btrfs_inc_extent_ref;
2479         else
2480                 process_func = btrfs_free_extent;
2481
2482         if (full_backref)
2483                 parent = buf->start;
2484         else
2485                 parent = 0;
2486
2487         for (i = 0; i < nritems; i++) {
2488                 if (level == 0) {
2489                         btrfs_item_key_to_cpu(buf, &key, i);
2490                         if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
2491                                 continue;
2492                         fi = btrfs_item_ptr(buf, i,
2493                                             struct btrfs_file_extent_item);
2494                         if (btrfs_file_extent_type(buf, fi) ==
2495                             BTRFS_FILE_EXTENT_INLINE)
2496                                 continue;
2497                         bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
2498                         if (bytenr == 0)
2499                                 continue;
2500
2501                         num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
2502                         key.offset -= btrfs_file_extent_offset(buf, fi);
2503                         ret = process_func(trans, root, bytenr, num_bytes,
2504                                            parent, ref_root, key.objectid,
2505                                            key.offset);
2506                         if (ret)
2507                                 goto fail;
2508                 } else {
2509                         bytenr = btrfs_node_blockptr(buf, i);
2510                         num_bytes = btrfs_level_size(root, level - 1);
2511                         ret = process_func(trans, root, bytenr, num_bytes,
2512                                            parent, ref_root, level - 1, 0);
2513                         if (ret)
2514                                 goto fail;
2515                 }
2516         }
2517         return 0;
2518 fail:
2519         BUG();
2520         return ret;
2521 }
2522
2523 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2524                   struct extent_buffer *buf, int full_backref)
2525 {
2526         return __btrfs_mod_ref(trans, root, buf, full_backref, 1);
2527 }
2528
2529 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2530                   struct extent_buffer *buf, int full_backref)
2531 {
2532         return __btrfs_mod_ref(trans, root, buf, full_backref, 0);
2533 }
2534
2535 static int write_one_cache_group(struct btrfs_trans_handle *trans,
2536                                  struct btrfs_root *root,
2537                                  struct btrfs_path *path,
2538                                  struct btrfs_block_group_cache *cache)
2539 {
2540         int ret;
2541         struct btrfs_root *extent_root = root->fs_info->extent_root;
2542         unsigned long bi;
2543         struct extent_buffer *leaf;
2544
2545         ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
2546         if (ret < 0)
2547                 goto fail;
2548         BUG_ON(ret);
2549
2550         leaf = path->nodes[0];
2551         bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
2552         write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
2553         btrfs_mark_buffer_dirty(leaf);
2554         btrfs_release_path(extent_root, path);
2555 fail:
2556         if (ret)
2557                 return ret;
2558         return 0;
2559
2560 }
2561
2562 static struct btrfs_block_group_cache *
2563 next_block_group(struct btrfs_root *root,
2564                  struct btrfs_block_group_cache *cache)
2565 {
2566         struct rb_node *node;
2567         spin_lock(&root->fs_info->block_group_cache_lock);
2568         node = rb_next(&cache->cache_node);
2569         btrfs_put_block_group(cache);
2570         if (node) {
2571                 cache = rb_entry(node, struct btrfs_block_group_cache,
2572                                  cache_node);
2573                 atomic_inc(&cache->count);
2574         } else
2575                 cache = NULL;
2576         spin_unlock(&root->fs_info->block_group_cache_lock);
2577         return cache;
2578 }
2579
2580 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
2581                                    struct btrfs_root *root)
2582 {
2583         struct btrfs_block_group_cache *cache;
2584         int err = 0;
2585         struct btrfs_path *path;
2586         u64 last = 0;
2587
2588         path = btrfs_alloc_path();
2589         if (!path)
2590                 return -ENOMEM;
2591
2592         while (1) {
2593                 if (last == 0) {
2594                         err = btrfs_run_delayed_refs(trans, root,
2595                                                      (unsigned long)-1);
2596                         BUG_ON(err);
2597                 }
2598
2599                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
2600                 while (cache) {
2601                         if (cache->dirty)
2602                                 break;
2603                         cache = next_block_group(root, cache);
2604                 }
2605                 if (!cache) {
2606                         if (last == 0)
2607                                 break;
2608                         last = 0;
2609                         continue;
2610                 }
2611
2612                 cache->dirty = 0;
2613                 last = cache->key.objectid + cache->key.offset;
2614
2615                 err = write_one_cache_group(trans, root, path, cache);
2616                 BUG_ON(err);
2617                 btrfs_put_block_group(cache);
2618         }
2619
2620         btrfs_free_path(path);
2621         return 0;
2622 }
2623
2624 int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
2625 {
2626         struct btrfs_block_group_cache *block_group;
2627         int readonly = 0;
2628
2629         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
2630         if (!block_group || block_group->ro)
2631                 readonly = 1;
2632         if (block_group)
2633                 btrfs_put_block_group(block_group);
2634         return readonly;
2635 }
2636
2637 static int update_space_info(struct btrfs_fs_info *info, u64 flags,
2638                              u64 total_bytes, u64 bytes_used,
2639                              struct btrfs_space_info **space_info)
2640 {
2641         struct btrfs_space_info *found;
2642
2643         found = __find_space_info(info, flags);
2644         if (found) {
2645                 spin_lock(&found->lock);
2646                 found->total_bytes += total_bytes;
2647                 found->bytes_used += bytes_used;
2648                 found->full = 0;
2649                 spin_unlock(&found->lock);
2650                 *space_info = found;
2651                 return 0;
2652         }
2653         found = kzalloc(sizeof(*found), GFP_NOFS);
2654         if (!found)
2655                 return -ENOMEM;
2656
2657         INIT_LIST_HEAD(&found->block_groups);
2658         init_rwsem(&found->groups_sem);
2659         spin_lock_init(&found->lock);
2660         found->flags = flags;
2661         found->total_bytes = total_bytes;
2662         found->bytes_used = bytes_used;
2663         found->bytes_pinned = 0;
2664         found->bytes_reserved = 0;
2665         found->bytes_readonly = 0;
2666         found->bytes_delalloc = 0;
2667         found->full = 0;
2668         found->force_alloc = 0;
2669         *space_info = found;
2670         list_add_rcu(&found->list, &info->space_info);
2671         atomic_set(&found->caching_threads, 0);
2672         return 0;
2673 }
2674
2675 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
2676 {
2677         u64 extra_flags = flags & (BTRFS_BLOCK_GROUP_RAID0 |
2678                                    BTRFS_BLOCK_GROUP_RAID1 |
2679                                    BTRFS_BLOCK_GROUP_RAID10 |
2680                                    BTRFS_BLOCK_GROUP_DUP);
2681         if (extra_flags) {
2682                 if (flags & BTRFS_BLOCK_GROUP_DATA)
2683                         fs_info->avail_data_alloc_bits |= extra_flags;
2684                 if (flags & BTRFS_BLOCK_GROUP_METADATA)
2685                         fs_info->avail_metadata_alloc_bits |= extra_flags;
2686                 if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
2687                         fs_info->avail_system_alloc_bits |= extra_flags;
2688         }
2689 }
2690
2691 static void set_block_group_readonly(struct btrfs_block_group_cache *cache)
2692 {
2693         spin_lock(&cache->space_info->lock);
2694         spin_lock(&cache->lock);
2695         if (!cache->ro) {
2696                 cache->space_info->bytes_readonly += cache->key.offset -
2697                                         btrfs_block_group_used(&cache->item);
2698                 cache->ro = 1;
2699         }
2700         spin_unlock(&cache->lock);
2701         spin_unlock(&cache->space_info->lock);
2702 }
2703
2704 u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
2705 {
2706         u64 num_devices = root->fs_info->fs_devices->rw_devices;
2707
2708         if (num_devices == 1)
2709                 flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0);
2710         if (num_devices < 4)
2711                 flags &= ~BTRFS_BLOCK_GROUP_RAID10;
2712
2713         if ((flags & BTRFS_BLOCK_GROUP_DUP) &&
2714             (flags & (BTRFS_BLOCK_GROUP_RAID1 |
2715                       BTRFS_BLOCK_GROUP_RAID10))) {
2716                 flags &= ~BTRFS_BLOCK_GROUP_DUP;
2717         }
2718
2719         if ((flags & BTRFS_BLOCK_GROUP_RAID1) &&
2720             (flags & BTRFS_BLOCK_GROUP_RAID10)) {
2721                 flags &= ~BTRFS_BLOCK_GROUP_RAID1;
2722         }
2723
2724         if ((flags & BTRFS_BLOCK_GROUP_RAID0) &&
2725             ((flags & BTRFS_BLOCK_GROUP_RAID1) |
2726              (flags & BTRFS_BLOCK_GROUP_RAID10) |
2727              (flags & BTRFS_BLOCK_GROUP_DUP)))
2728                 flags &= ~BTRFS_BLOCK_GROUP_RAID0;
2729         return flags;
2730 }
2731
2732 static u64 btrfs_get_alloc_profile(struct btrfs_root *root, u64 data)
2733 {
2734         struct btrfs_fs_info *info = root->fs_info;
2735         u64 alloc_profile;
2736
2737         if (data) {
2738                 alloc_profile = info->avail_data_alloc_bits &
2739                         info->data_alloc_profile;
2740                 data = BTRFS_BLOCK_GROUP_DATA | alloc_profile;
2741         } else if (root == root->fs_info->chunk_root) {
2742                 alloc_profile = info->avail_system_alloc_bits &
2743                         info->system_alloc_profile;
2744                 data = BTRFS_BLOCK_GROUP_SYSTEM | alloc_profile;
2745         } else {
2746                 alloc_profile = info->avail_metadata_alloc_bits &
2747                         info->metadata_alloc_profile;
2748                 data = BTRFS_BLOCK_GROUP_METADATA | alloc_profile;
2749         }
2750
2751         return btrfs_reduce_alloc_profile(root, data);
2752 }
2753
2754 void btrfs_set_inode_space_info(struct btrfs_root *root, struct inode *inode)
2755 {
2756         u64 alloc_target;
2757
2758         alloc_target = btrfs_get_alloc_profile(root, 1);
2759         BTRFS_I(inode)->space_info = __find_space_info(root->fs_info,
2760                                                        alloc_target);
2761 }
2762
2763 /*
2764  * for now this just makes sure we have at least 5% of our metadata space free
2765  * for use.
2766  */
2767 int btrfs_check_metadata_free_space(struct btrfs_root *root)
2768 {
2769         struct btrfs_fs_info *info = root->fs_info;
2770         struct btrfs_space_info *meta_sinfo;
2771         u64 alloc_target, thresh;
2772         int committed = 0, ret;
2773
2774         /* get the space info for where the metadata will live */
2775         alloc_target = btrfs_get_alloc_profile(root, 0);
2776         meta_sinfo = __find_space_info(info, alloc_target);
2777
2778 again:
2779         spin_lock(&meta_sinfo->lock);
2780         if (!meta_sinfo->full)
2781                 thresh = meta_sinfo->total_bytes * 80;
2782         else
2783                 thresh = meta_sinfo->total_bytes * 95;
2784
2785         do_div(thresh, 100);
2786
2787         if (meta_sinfo->bytes_used + meta_sinfo->bytes_reserved +
2788             meta_sinfo->bytes_pinned + meta_sinfo->bytes_readonly > thresh) {
2789                 struct btrfs_trans_handle *trans;
2790                 if (!meta_sinfo->full) {
2791                         meta_sinfo->force_alloc = 1;
2792                         spin_unlock(&meta_sinfo->lock);
2793
2794                         trans = btrfs_start_transaction(root, 1);
2795                         if (!trans)
2796                                 return -ENOMEM;
2797
2798                         ret = do_chunk_alloc(trans, root->fs_info->extent_root,
2799                                              2 * 1024 * 1024, alloc_target, 0);
2800                         btrfs_end_transaction(trans, root);
2801                         goto again;
2802                 }
2803                 spin_unlock(&meta_sinfo->lock);
2804
2805                 if (!committed) {
2806                         committed = 1;
2807                         trans = btrfs_join_transaction(root, 1);
2808                         if (!trans)
2809                                 return -ENOMEM;
2810                         ret = btrfs_commit_transaction(trans, root);
2811                         if (ret)
2812                                 return ret;
2813                         goto again;
2814                 }
2815                 return -ENOSPC;
2816         }
2817         spin_unlock(&meta_sinfo->lock);
2818
2819         return 0;
2820 }
2821
2822 /*
2823  * This will check the space that the inode allocates from to make sure we have
2824  * enough space for bytes.
2825  */
2826 int btrfs_check_data_free_space(struct btrfs_root *root, struct inode *inode,
2827                                 u64 bytes)
2828 {
2829         struct btrfs_space_info *data_sinfo;
2830         int ret = 0, committed = 0;
2831
2832         /* make sure bytes are sectorsize aligned */
2833         bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
2834
2835         data_sinfo = BTRFS_I(inode)->space_info;
2836 again:
2837         /* make sure we have enough space to handle the data first */
2838         spin_lock(&data_sinfo->lock);
2839         if (data_sinfo->total_bytes - data_sinfo->bytes_used -
2840             data_sinfo->bytes_delalloc - data_sinfo->bytes_reserved -
2841             data_sinfo->bytes_pinned - data_sinfo->bytes_readonly -
2842             data_sinfo->bytes_may_use < bytes) {
2843                 struct btrfs_trans_handle *trans;
2844
2845                 /*
2846                  * if we don't have enough free bytes in this space then we need
2847                  * to alloc a new chunk.
2848                  */
2849                 if (!data_sinfo->full) {
2850                         u64 alloc_target;
2851
2852                         data_sinfo->force_alloc = 1;
2853                         spin_unlock(&data_sinfo->lock);
2854
2855                         alloc_target = btrfs_get_alloc_profile(root, 1);
2856                         trans = btrfs_start_transaction(root, 1);
2857                         if (!trans)
2858                                 return -ENOMEM;
2859
2860                         ret = do_chunk_alloc(trans, root->fs_info->extent_root,
2861                                              bytes + 2 * 1024 * 1024,
2862                                              alloc_target, 0);
2863                         btrfs_end_transaction(trans, root);
2864                         if (ret)
2865                                 return ret;
2866                         goto again;
2867                 }
2868                 spin_unlock(&data_sinfo->lock);
2869
2870                 /* commit the current transaction and try again */
2871                 if (!committed) {
2872                         committed = 1;
2873                         trans = btrfs_join_transaction(root, 1);
2874                         if (!trans)
2875                                 return -ENOMEM;
2876                         ret = btrfs_commit_transaction(trans, root);
2877                         if (ret)
2878                                 return ret;
2879                         goto again;
2880                 }
2881
2882                 printk(KERN_ERR "no space left, need %llu, %llu delalloc bytes"
2883                        ", %llu bytes_used, %llu bytes_reserved, "
2884                        "%llu bytes_pinned, %llu bytes_readonly, %llu may use "
2885                        "%llu total\n", (unsigned long long)bytes,
2886                        (unsigned long long)data_sinfo->bytes_delalloc,
2887                        (unsigned long long)data_sinfo->bytes_used,
2888                        (unsigned long long)data_sinfo->bytes_reserved,
2889                        (unsigned long long)data_sinfo->bytes_pinned,
2890                        (unsigned long long)data_sinfo->bytes_readonly,
2891                        (unsigned long long)data_sinfo->bytes_may_use,
2892                        (unsigned long long)data_sinfo->total_bytes);
2893                 return -ENOSPC;
2894         }
2895         data_sinfo->bytes_may_use += bytes;
2896         BTRFS_I(inode)->reserved_bytes += bytes;
2897         spin_unlock(&data_sinfo->lock);
2898
2899         return btrfs_check_metadata_free_space(root);
2900 }
2901
2902 /*
2903  * if there was an error for whatever reason after calling
2904  * btrfs_check_data_free_space, call this so we can cleanup the counters.
2905  */
2906 void btrfs_free_reserved_data_space(struct btrfs_root *root,
2907                                     struct inode *inode, u64 bytes)
2908 {
2909         struct btrfs_space_info *data_sinfo;
2910
2911         /* make sure bytes are sectorsize aligned */
2912         bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
2913
2914         data_sinfo = BTRFS_I(inode)->space_info;
2915         spin_lock(&data_sinfo->lock);
2916         data_sinfo->bytes_may_use -= bytes;
2917         BTRFS_I(inode)->reserved_bytes -= bytes;
2918         spin_unlock(&data_sinfo->lock);
2919 }
2920
2921 /* called when we are adding a delalloc extent to the inode's io_tree */
2922 void btrfs_delalloc_reserve_space(struct btrfs_root *root, struct inode *inode,
2923                                   u64 bytes)
2924 {
2925         struct btrfs_space_info *data_sinfo;
2926
2927         /* get the space info for where this inode will be storing its data */
2928         data_sinfo = BTRFS_I(inode)->space_info;
2929
2930         /* make sure we have enough space to handle the data first */
2931         spin_lock(&data_sinfo->lock);
2932         data_sinfo->bytes_delalloc += bytes;
2933
2934         /*
2935          * we are adding a delalloc extent without calling
2936          * btrfs_check_data_free_space first.  This happens on a weird
2937          * writepage condition, but shouldn't hurt our accounting
2938          */
2939         if (unlikely(bytes > BTRFS_I(inode)->reserved_bytes)) {
2940                 data_sinfo->bytes_may_use -= BTRFS_I(inode)->reserved_bytes;
2941                 BTRFS_I(inode)->reserved_bytes = 0;
2942         } else {
2943                 data_sinfo->bytes_may_use -= bytes;
2944                 BTRFS_I(inode)->reserved_bytes -= bytes;
2945         }
2946
2947         spin_unlock(&data_sinfo->lock);
2948 }
2949
2950 /* called when we are clearing an delalloc extent from the inode's io_tree */
2951 void btrfs_delalloc_free_space(struct btrfs_root *root, struct inode *inode,
2952                               u64 bytes)
2953 {
2954         struct btrfs_space_info *info;
2955
2956         info = BTRFS_I(inode)->space_info;
2957
2958         spin_lock(&info->lock);
2959         info->bytes_delalloc -= bytes;
2960         spin_unlock(&info->lock);
2961 }
2962
2963 static void force_metadata_allocation(struct btrfs_fs_info *info)
2964 {
2965         struct list_head *head = &info->space_info;
2966         struct btrfs_space_info *found;
2967
2968         rcu_read_lock();
2969         list_for_each_entry_rcu(found, head, list) {
2970                 if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
2971                         found->force_alloc = 1;
2972         }
2973         rcu_read_unlock();
2974 }
2975
2976 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
2977                           struct btrfs_root *extent_root, u64 alloc_bytes,
2978                           u64 flags, int force)
2979 {
2980         struct btrfs_space_info *space_info;
2981         struct btrfs_fs_info *fs_info = extent_root->fs_info;
2982         u64 thresh;
2983         int ret = 0;
2984
2985         mutex_lock(&fs_info->chunk_mutex);
2986
2987         flags = btrfs_reduce_alloc_profile(extent_root, flags);
2988
2989         space_info = __find_space_info(extent_root->fs_info, flags);
2990         if (!space_info) {
2991                 ret = update_space_info(extent_root->fs_info, flags,
2992                                         0, 0, &space_info);
2993                 BUG_ON(ret);
2994         }
2995         BUG_ON(!space_info);
2996
2997         spin_lock(&space_info->lock);
2998         if (space_info->force_alloc) {
2999                 force = 1;
3000                 space_info->force_alloc = 0;
3001         }
3002         if (space_info->full) {
3003                 spin_unlock(&space_info->lock);
3004                 goto out;
3005         }
3006
3007         thresh = space_info->total_bytes - space_info->bytes_readonly;
3008         thresh = div_factor(thresh, 6);
3009         if (!force &&
3010            (space_info->bytes_used + space_info->bytes_pinned +
3011             space_info->bytes_reserved + alloc_bytes) < thresh) {
3012                 spin_unlock(&space_info->lock);
3013                 goto out;
3014         }
3015         spin_unlock(&space_info->lock);
3016
3017         /*
3018          * if we're doing a data chunk, go ahead and make sure that
3019          * we keep a reasonable number of metadata chunks allocated in the
3020          * FS as well.
3021          */
3022         if (flags & BTRFS_BLOCK_GROUP_DATA) {
3023                 fs_info->data_chunk_allocations++;
3024                 if (!(fs_info->data_chunk_allocations %
3025                       fs_info->metadata_ratio))
3026                         force_metadata_allocation(fs_info);
3027         }
3028
3029         ret = btrfs_alloc_chunk(trans, extent_root, flags);
3030         if (ret)
3031                 space_info->full = 1;
3032 out:
3033         mutex_unlock(&extent_root->fs_info->chunk_mutex);
3034         return ret;
3035 }
3036
3037 static int update_block_group(struct btrfs_trans_handle *trans,
3038                               struct btrfs_root *root,
3039                               u64 bytenr, u64 num_bytes, int alloc,
3040                               int mark_free)
3041 {
3042         struct btrfs_block_group_cache *cache;
3043         struct btrfs_fs_info *info = root->fs_info;
3044         u64 total = num_bytes;
3045         u64 old_val;
3046         u64 byte_in_group;
3047
3048         /* block accounting for super block */
3049         spin_lock(&info->delalloc_lock);
3050         old_val = btrfs_super_bytes_used(&info->super_copy);
3051         if (alloc)
3052                 old_val += num_bytes;
3053         else
3054                 old_val -= num_bytes;
3055         btrfs_set_super_bytes_used(&info->super_copy, old_val);
3056
3057         /* block accounting for root item */
3058         old_val = btrfs_root_used(&root->root_item);
3059         if (alloc)
3060                 old_val += num_bytes;
3061         else
3062                 old_val -= num_bytes;
3063         btrfs_set_root_used(&root->root_item, old_val);
3064         spin_unlock(&info->delalloc_lock);
3065
3066         while (total) {
3067                 cache = btrfs_lookup_block_group(info, bytenr);
3068                 if (!cache)
3069                         return -1;
3070                 byte_in_group = bytenr - cache->key.objectid;
3071                 WARN_ON(byte_in_group > cache->key.offset);
3072
3073                 spin_lock(&cache->space_info->lock);
3074                 spin_lock(&cache->lock);
3075                 cache->dirty = 1;
3076                 old_val = btrfs_block_group_used(&cache->item);
3077                 num_bytes = min(total, cache->key.offset - byte_in_group);
3078                 if (alloc) {
3079                         old_val += num_bytes;
3080                         btrfs_set_block_group_used(&cache->item, old_val);
3081                         cache->reserved -= num_bytes;
3082                         cache->space_info->bytes_used += num_bytes;
3083                         cache->space_info->bytes_reserved -= num_bytes;
3084                         if (cache->ro)
3085                                 cache->space_info->bytes_readonly -= num_bytes;
3086                         spin_unlock(&cache->lock);
3087                         spin_unlock(&cache->space_info->lock);
3088                 } else {
3089                         old_val -= num_bytes;
3090                         cache->space_info->bytes_used -= num_bytes;
3091                         if (cache->ro)
3092                                 cache->space_info->bytes_readonly += num_bytes;
3093                         btrfs_set_block_group_used(&cache->item, old_val);
3094                         spin_unlock(&cache->lock);
3095                         spin_unlock(&cache->space_info->lock);
3096                         if (mark_free) {
3097                                 int ret;
3098
3099                                 ret = btrfs_discard_extent(root, bytenr,
3100                                                            num_bytes);
3101                                 WARN_ON(ret);
3102
3103                                 ret = btrfs_add_free_space(cache, bytenr,
3104                                                            num_bytes);
3105                                 WARN_ON(ret);
3106                         }
3107                 }
3108                 btrfs_put_block_group(cache);
3109                 total -= num_bytes;
3110                 bytenr += num_bytes;
3111         }
3112         return 0;
3113 }
3114
3115 static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
3116 {
3117         struct btrfs_block_group_cache *cache;
3118         u64 bytenr;
3119
3120         cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
3121         if (!cache)
3122                 return 0;
3123
3124         bytenr = cache->key.objectid;
3125         btrfs_put_block_group(cache);
3126
3127         return bytenr;
3128 }
3129
3130 /*
3131  * this function must be called within transaction
3132  */
3133 int btrfs_pin_extent(struct btrfs_root *root,
3134                      u64 bytenr, u64 num_bytes, int reserved)
3135 {
3136         struct btrfs_fs_info *fs_info = root->fs_info;
3137         struct btrfs_block_group_cache *cache;
3138
3139         cache = btrfs_lookup_block_group(fs_info, bytenr);
3140         BUG_ON(!cache);
3141
3142         spin_lock(&cache->space_info->lock);
3143         spin_lock(&cache->lock);
3144         cache->pinned += num_bytes;
3145         cache->space_info->bytes_pinned += num_bytes;
3146         if (reserved) {
3147                 cache->reserved -= num_bytes;
3148                 cache->space_info->bytes_reserved -= num_bytes;
3149         }
3150         spin_unlock(&cache->lock);
3151         spin_unlock(&cache->space_info->lock);
3152
3153         btrfs_put_block_group(cache);
3154
3155         set_extent_dirty(fs_info->pinned_extents,
3156                          bytenr, bytenr + num_bytes - 1, GFP_NOFS);
3157         return 0;
3158 }
3159
3160 static int update_reserved_extents(struct btrfs_block_group_cache *cache,
3161                                    u64 num_bytes, int reserve)
3162 {
3163         spin_lock(&cache->space_info->lock);
3164         spin_lock(&cache->lock);
3165         if (reserve) {
3166                 cache->reserved += num_bytes;
3167                 cache->space_info->bytes_reserved += num_bytes;
3168         } else {
3169                 cache->reserved -= num_bytes;
3170                 cache->space_info->bytes_reserved -= num_bytes;
3171         }
3172         spin_unlock(&cache->lock);
3173         spin_unlock(&cache->space_info->lock);
3174         return 0;
3175 }
3176
3177 int btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
3178                                 struct btrfs_root *root)
3179 {
3180         struct btrfs_fs_info *fs_info = root->fs_info;
3181         struct btrfs_caching_control *next;
3182         struct btrfs_caching_control *caching_ctl;
3183         struct btrfs_block_group_cache *cache;
3184
3185         down_write(&fs_info->extent_commit_sem);
3186
3187         list_for_each_entry_safe(caching_ctl, next,
3188                                  &fs_info->caching_block_groups, list) {
3189                 cache = caching_ctl->block_group;
3190                 if (block_group_cache_done(cache)) {
3191                         cache->last_byte_to_unpin = (u64)-1;
3192                         list_del_init(&caching_ctl->list);
3193                         put_caching_control(caching_ctl);
3194                 } else {
3195                         cache->last_byte_to_unpin = caching_ctl->progress;
3196                 }
3197         }
3198
3199         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
3200                 fs_info->pinned_extents = &fs_info->freed_extents[1];
3201         else
3202                 fs_info->pinned_extents = &fs_info->freed_extents[0];
3203
3204         up_write(&fs_info->extent_commit_sem);
3205         return 0;
3206 }
3207
3208 static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
3209 {
3210         struct btrfs_fs_info *fs_info = root->fs_info;
3211         struct btrfs_block_group_cache *cache = NULL;
3212         u64 len;
3213
3214         while (start <= end) {
3215                 if (!cache ||
3216                     start >= cache->key.objectid + cache->key.offset) {
3217                         if (cache)
3218                                 btrfs_put_block_group(cache);
3219                         cache = btrfs_lookup_block_group(fs_info, start);
3220                         BUG_ON(!cache);
3221                 }
3222
3223                 len = cache->key.objectid + cache->key.offset - start;
3224                 len = min(len, end + 1 - start);
3225
3226                 if (start < cache->last_byte_to_unpin) {
3227                         len = min(len, cache->last_byte_to_unpin - start);
3228                         btrfs_add_free_space(cache, start, len);
3229                 }
3230
3231                 spin_lock(&cache->space_info->lock);
3232                 spin_lock(&cache->lock);
3233                 cache->pinned -= len;
3234                 cache->space_info->bytes_pinned -= len;
3235                 spin_unlock(&cache->lock);
3236                 spin_unlock(&cache->space_info->lock);
3237
3238                 start += len;
3239         }
3240
3241         if (cache)
3242                 btrfs_put_block_group(cache);
3243         return 0;
3244 }
3245
3246 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
3247                                struct btrfs_root *root)
3248 {
3249         struct btrfs_fs_info *fs_info = root->fs_info;
3250         struct extent_io_tree *unpin;
3251         u64 start;
3252         u64 end;
3253         int ret;
3254
3255         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
3256                 unpin = &fs_info->freed_extents[1];
3257         else
3258                 unpin = &fs_info->freed_extents[0];
3259
3260         while (1) {
3261                 ret = find_first_extent_bit(unpin, 0, &start, &end,
3262                                             EXTENT_DIRTY);
3263                 if (ret)
3264                         break;
3265
3266                 ret = btrfs_discard_extent(root, start, end + 1 - start);
3267
3268                 clear_extent_dirty(unpin, start, end, GFP_NOFS);
3269                 unpin_extent_range(root, start, end);
3270                 cond_resched();
3271         }
3272
3273         return ret;
3274 }
3275
3276 static int pin_down_bytes(struct btrfs_trans_handle *trans,
3277                           struct btrfs_root *root,
3278                           struct btrfs_path *path,
3279                           u64 bytenr, u64 num_bytes,
3280                           int is_data, int reserved,
3281                           struct extent_buffer **must_clean)
3282 {
3283         int err = 0;
3284         struct extent_buffer *buf;
3285
3286         if (is_data)
3287                 goto pinit;
3288
3289         buf = btrfs_find_tree_block(root, bytenr, num_bytes);
3290         if (!buf)
3291                 goto pinit;
3292
3293         /* we can reuse a block if it hasn't been written
3294          * and it is from this transaction.  We can't
3295          * reuse anything from the tree log root because
3296          * it has tiny sub-transactions.
3297          */
3298         if (btrfs_buffer_uptodate(buf, 0) &&
3299             btrfs_try_tree_lock(buf)) {
3300                 u64 header_owner = btrfs_header_owner(buf);
3301                 u64 header_transid = btrfs_header_generation(buf);
3302                 if (header_owner != BTRFS_TREE_LOG_OBJECTID &&
3303                     header_transid == trans->transid &&
3304                     !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
3305                         *must_clean = buf;
3306                         return 1;
3307                 }
3308                 btrfs_tree_unlock(buf);
3309         }
3310         free_extent_buffer(buf);
3311 pinit:
3312         if (path)
3313                 btrfs_set_path_blocking(path);
3314         /* unlocks the pinned mutex */
3315         btrfs_pin_extent(root, bytenr, num_bytes, reserved);
3316
3317         BUG_ON(err < 0);
3318         return 0;
3319 }
3320
3321 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
3322                                 struct btrfs_root *root,
3323                                 u64 bytenr, u64 num_bytes, u64 parent,
3324                                 u64 root_objectid, u64 owner_objectid,
3325                                 u64 owner_offset, int refs_to_drop,
3326                                 struct btrfs_delayed_extent_op *extent_op)
3327 {
3328         struct btrfs_key key;
3329         struct btrfs_path *path;
3330         struct btrfs_fs_info *info = root->fs_info;
3331         struct btrfs_root *extent_root = info->extent_root;
3332         struct extent_buffer *leaf;
3333         struct btrfs_extent_item *ei;
3334         struct btrfs_extent_inline_ref *iref;
3335         int ret;
3336         int is_data;
3337         int extent_slot = 0;
3338         int found_extent = 0;
3339         int num_to_del = 1;
3340         u32 item_size;
3341         u64 refs;
3342
3343         path = btrfs_alloc_path();
3344         if (!path)
3345                 return -ENOMEM;
3346
3347         path->reada = 1;
3348         path->leave_spinning = 1;
3349
3350         is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
3351         BUG_ON(!is_data && refs_to_drop != 1);
3352
3353         ret = lookup_extent_backref(trans, extent_root, path, &iref,
3354                                     bytenr, num_bytes, parent,
3355                                     root_objectid, owner_objectid,
3356                                     owner_offset);
3357         if (ret == 0) {
3358                 extent_slot = path->slots[0];
3359                 while (extent_slot >= 0) {
3360                         btrfs_item_key_to_cpu(path->nodes[0], &key,
3361                                               extent_slot);
3362                         if (key.objectid != bytenr)
3363                                 break;
3364                         if (key.type == BTRFS_EXTENT_ITEM_KEY &&
3365                             key.offset == num_bytes) {
3366                                 found_extent = 1;
3367                                 break;
3368                         }
3369                         if (path->slots[0] - extent_slot > 5)
3370                                 break;
3371                         extent_slot--;
3372                 }
3373 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
3374                 item_size = btrfs_item_size_nr(path->nodes[0], extent_slot);
3375                 if (found_extent && item_size < sizeof(*ei))
3376                         found_extent = 0;
3377 #endif
3378                 if (!found_extent) {
3379                         BUG_ON(iref);
3380                         ret = remove_extent_backref(trans, extent_root, path,
3381                                                     NULL, refs_to_drop,
3382                                                     is_data);
3383                         BUG_ON(ret);
3384                         btrfs_release_path(extent_root, path);
3385                         path->leave_spinning = 1;
3386
3387                         key.objectid = bytenr;
3388                         key.type = BTRFS_EXTENT_ITEM_KEY;
3389                         key.offset = num_bytes;
3390
3391                         ret = btrfs_search_slot(trans, extent_root,
3392                                                 &key, path, -1, 1);
3393                         if (ret) {
3394                                 printk(KERN_ERR "umm, got %d back from search"
3395                                        ", was looking for %llu\n", ret,
3396                                        (unsigned long long)bytenr);
3397                                 btrfs_print_leaf(extent_root, path->nodes[0]);
3398                         }
3399                         BUG_ON(ret);
3400                         extent_slot = path->slots[0];
3401                 }
3402         } else {
3403                 btrfs_print_leaf(extent_root, path->nodes[0]);
3404                 WARN_ON(1);
3405                 printk(KERN_ERR "btrfs unable to find ref byte nr %llu "
3406                        "parent %llu root %llu  owner %llu offset %llu\n",
3407                        (unsigned long long)bytenr,
3408                        (unsigned long long)parent,
3409                        (unsigned long long)root_objectid,
3410                        (unsigned long long)owner_objectid,
3411                        (unsigned long long)owner_offset);
3412         }
3413
3414         leaf = path->nodes[0];
3415         item_size = btrfs_item_size_nr(leaf, extent_slot);
3416 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
3417         if (item_size < sizeof(*ei)) {
3418                 BUG_ON(found_extent || extent_slot != path->slots[0]);
3419                 ret = convert_extent_item_v0(trans, extent_root, path,
3420                                              owner_objectid, 0);
3421                 BUG_ON(ret < 0);
3422
3423                 btrfs_release_path(extent_root, path);
3424                 path->leave_spinning = 1;
3425
3426                 key.objectid = bytenr;
3427                 key.type = BTRFS_EXTENT_ITEM_KEY;
3428                 key.offset = num_bytes;
3429
3430                 ret = btrfs_search_slot(trans, extent_root, &key, path,
3431                                         -1, 1);
3432                 if (ret) {
3433                         printk(KERN_ERR "umm, got %d back from search"
3434                                ", was looking for %llu\n", ret,
3435                                (unsigned long long)bytenr);
3436                         btrfs_print_leaf(extent_root, path->nodes[0]);
3437                 }
3438                 BUG_ON(ret);
3439                 extent_slot = path->slots[0];
3440                 leaf = path->nodes[0];
3441                 item_size = btrfs_item_size_nr(leaf, extent_slot);
3442         }
3443 #endif
3444         BUG_ON(item_size < sizeof(*ei));
3445         ei = btrfs_item_ptr(leaf, extent_slot,
3446                             struct btrfs_extent_item);
3447         if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID) {
3448                 struct btrfs_tree_block_info *bi;
3449                 BUG_ON(item_size < sizeof(*ei) + sizeof(*bi));
3450                 bi = (struct btrfs_tree_block_info *)(ei + 1);
3451                 WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
3452         }
3453
3454         refs = btrfs_extent_refs(leaf, ei);
3455         BUG_ON(refs < refs_to_drop);
3456         refs -= refs_to_drop;
3457
3458         if (refs > 0) {
3459                 if (extent_op)
3460                         __run_delayed_extent_op(extent_op, leaf, ei);
3461                 /*
3462                  * In the case of inline back ref, reference count will
3463                  * be updated by remove_extent_backref
3464                  */
3465                 if (iref) {
3466                         BUG_ON(!found_extent);
3467                 } else {
3468                         btrfs_set_extent_refs(leaf, ei, refs);
3469                         btrfs_mark_buffer_dirty(leaf);
3470                 }
3471                 if (found_extent) {
3472                         ret = remove_extent_backref(trans, extent_root, path,
3473                                                     iref, refs_to_drop,
3474                                                     is_data);
3475                         BUG_ON(ret);
3476                 }
3477         } else {
3478                 int mark_free = 0;
3479                 struct extent_buffer *must_clean = NULL;
3480
3481                 if (found_extent) {
3482                         BUG_ON(is_data && refs_to_drop !=
3483                                extent_data_ref_count(root, path, iref));
3484                         if (iref) {
3485                                 BUG_ON(path->slots[0] != extent_slot);
3486                         } else {
3487                                 BUG_ON(path->slots[0] != extent_slot + 1);
3488                                 path->slots[0] = extent_slot;
3489                                 num_to_del = 2;
3490                         }
3491                 }
3492
3493                 ret = pin_down_bytes(trans, root, path, bytenr,
3494                                      num_bytes, is_data, 0, &must_clean);
3495                 if (ret > 0)
3496                         mark_free = 1;
3497                 BUG_ON(ret < 0);
3498                 /*
3499                  * it is going to be very rare for someone to be waiting
3500                  * on the block we're freeing.  del_items might need to
3501                  * schedule, so rather than get fancy, just force it
3502                  * to blocking here
3503                  */
3504                 if (must_clean)
3505                         btrfs_set_lock_blocking(must_clean);
3506
3507                 ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
3508                                       num_to_del);
3509                 BUG_ON(ret);
3510                 btrfs_release_path(extent_root, path);
3511
3512                 if (must_clean) {
3513                         clean_tree_block(NULL, root, must_clean);
3514                         btrfs_tree_unlock(must_clean);
3515                         free_extent_buffer(must_clean);
3516                 }
3517
3518                 if (is_data) {
3519                         ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
3520                         BUG_ON(ret);
3521                 } else {
3522                         invalidate_mapping_pages(info->btree_inode->i_mapping,
3523                              bytenr >> PAGE_CACHE_SHIFT,
3524                              (bytenr + num_bytes - 1) >> PAGE_CACHE_SHIFT);
3525                 }
3526
3527                 ret = update_block_group(trans, root, bytenr, num_bytes, 0,
3528                                          mark_free);
3529                 BUG_ON(ret);
3530         }
3531         btrfs_free_path(path);
3532         return ret;
3533 }
3534
3535 /*
3536  * when we free an extent, it is possible (and likely) that we free the last
3537  * delayed ref for that extent as well.  This searches the delayed ref tree for
3538  * a given extent, and if there are no other delayed refs to be processed, it
3539  * removes it from the tree.
3540  */
3541 static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
3542                                       struct btrfs_root *root, u64 bytenr)
3543 {
3544         struct btrfs_delayed_ref_head *head;
3545         struct btrfs_delayed_ref_root *delayed_refs;
3546         struct btrfs_delayed_ref_node *ref;
3547         struct rb_node *node;
3548         int ret;
3549
3550         delayed_refs = &trans->transaction->delayed_refs;
3551         spin_lock(&delayed_refs->lock);
3552         head = btrfs_find_delayed_ref_head(trans, bytenr);
3553         if (!head)
3554                 goto out;
3555
3556         node = rb_prev(&head->node.rb_node);
3557         if (!node)
3558                 goto out;
3559
3560         ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
3561
3562         /* there are still entries for this ref, we can't drop it */
3563         if (ref->bytenr == bytenr)
3564                 goto out;
3565
3566         if (head->extent_op) {
3567                 if (!head->must_insert_reserved)
3568                         goto out;
3569                 kfree(head->extent_op);
3570                 head->extent_op = NULL;
3571         }
3572
3573         /*
3574          * waiting for the lock here would deadlock.  If someone else has it
3575          * locked they are already in the process of dropping it anyway
3576          */
3577         if (!mutex_trylock(&head->mutex))
3578                 goto out;
3579
3580         /*
3581          * at this point we have a head with no other entries.  Go
3582          * ahead and process it.
3583          */
3584         head->node.in_tree = 0;
3585         rb_erase(&head->node.rb_node, &delayed_refs->root);
3586
3587         delayed_refs->num_entries--;
3588
3589         /*
3590          * we don't take a ref on the node because we're removing it from the
3591          * tree, so we just steal the ref the tree was holding.
3592          */
3593         delayed_refs->num_heads--;
3594         if (list_empty(&head->cluster))
3595                 delayed_refs->num_heads_ready--;
3596
3597         list_del_init(&head->cluster);
3598         spin_unlock(&delayed_refs->lock);
3599
3600         ret = run_one_delayed_ref(trans, root->fs_info->tree_root,
3601                                   &head->node, head->extent_op,
3602                                   head->must_insert_reserved);
3603         BUG_ON(ret);
3604         btrfs_put_delayed_ref(&head->node);
3605         return 0;
3606 out:
3607         spin_unlock(&delayed_refs->lock);
3608         return 0;
3609 }
3610
3611 int btrfs_free_extent(struct btrfs_trans_handle *trans,
3612                       struct btrfs_root *root,
3613                       u64 bytenr, u64 num_bytes, u64 parent,
3614                       u64 root_objectid, u64 owner, u64 offset)
3615 {
3616         int ret;
3617
3618         /*
3619          * tree log blocks never actually go into the extent allocation
3620          * tree, just update pinning info and exit early.
3621          */
3622         if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
3623                 WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
3624                 /* unlocks the pinned mutex */
3625                 btrfs_pin_extent(root, bytenr, num_bytes, 1);
3626                 ret = 0;
3627         } else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
3628                 ret = btrfs_add_delayed_tree_ref(trans, bytenr, num_bytes,
3629                                         parent, root_objectid, (int)owner,
3630                                         BTRFS_DROP_DELAYED_REF, NULL);
3631                 BUG_ON(ret);
3632                 ret = check_ref_cleanup(trans, root, bytenr);
3633                 BUG_ON(ret);
3634         } else {
3635                 ret = btrfs_add_delayed_data_ref(trans, bytenr, num_bytes,
3636                                         parent, root_objectid, owner,
3637                                         offset, BTRFS_DROP_DELAYED_REF, NULL);
3638                 BUG_ON(ret);
3639         }
3640         return ret;
3641 }
3642
3643 static u64 stripe_align(struct btrfs_root *root, u64 val)
3644 {
3645         u64 mask = ((u64)root->stripesize - 1);
3646         u64 ret = (val + mask) & ~mask;
3647         return ret;
3648 }
3649
3650 /*
3651  * when we wait for progress in the block group caching, its because
3652  * our allocation attempt failed at least once.  So, we must sleep
3653  * and let some progress happen before we try again.
3654  *
3655  * This function will sleep at least once waiting for new free space to
3656  * show up, and then it will check the block group free space numbers
3657  * for our min num_bytes.  Another option is to have it go ahead
3658  * and look in the rbtree for a free extent of a given size, but this
3659  * is a good start.
3660  */
3661 static noinline int
3662 wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
3663                                 u64 num_bytes)
3664 {
3665         struct btrfs_caching_control *caching_ctl;
3666         DEFINE_WAIT(wait);
3667
3668         caching_ctl = get_caching_control(cache);
3669         if (!caching_ctl)
3670                 return 0;
3671
3672         wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
3673                    (cache->free_space >= num_bytes));
3674
3675         put_caching_control(caching_ctl);
3676         return 0;
3677 }
3678
3679 static noinline int
3680 wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
3681 {
3682         struct btrfs_caching_control *caching_ctl;
3683         DEFINE_WAIT(wait);
3684
3685         caching_ctl = get_caching_control(cache);
3686         if (!caching_ctl)
3687                 return 0;
3688
3689         wait_event(caching_ctl->wait, block_group_cache_done(cache));
3690
3691         put_caching_control(caching_ctl);
3692         return 0;
3693 }
3694
3695 enum btrfs_loop_type {
3696         LOOP_CACHED_ONLY = 0,
3697         LOOP_CACHING_NOWAIT = 1,
3698         LOOP_CACHING_WAIT = 2,
3699         LOOP_ALLOC_CHUNK = 3,
3700         LOOP_NO_EMPTY_SIZE = 4,
3701 };
3702
3703 /*
3704  * walks the btree of allocated extents and find a hole of a given size.
3705  * The key ins is changed to record the hole:
3706  * ins->objectid == block start
3707  * ins->flags = BTRFS_EXTENT_ITEM_KEY
3708  * ins->offset == number of blocks
3709  * Any available blocks before search_start are skipped.
3710  */
3711 static noinline int find_free_extent(struct btrfs_trans_handle *trans,
3712                                      struct btrfs_root *orig_root,
3713                                      u64 num_bytes, u64 empty_size,
3714                                      u64 search_start, u64 search_end,
3715                                      u64 hint_byte, struct btrfs_key *ins,
3716                                      u64 exclude_start, u64 exclude_nr,
3717                                      int data)
3718 {
3719         int ret = 0;
3720         struct btrfs_root *root = orig_root->fs_info->extent_root;
3721         struct btrfs_free_cluster *last_ptr = NULL;
3722         struct btrfs_block_group_cache *block_group = NULL;
3723         int empty_cluster = 2 * 1024 * 1024;
3724         int allowed_chunk_alloc = 0;
3725         struct btrfs_space_info *space_info;
3726         int last_ptr_loop = 0;
3727         int loop = 0;
3728         bool found_uncached_bg = false;
3729         bool failed_cluster_refill = false;
3730
3731         WARN_ON(num_bytes < root->sectorsize);
3732         btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
3733         ins->objectid = 0;
3734         ins->offset = 0;
3735
3736         space_info = __find_space_info(root->fs_info, data);
3737
3738         if (orig_root->ref_cows || empty_size)
3739                 allowed_chunk_alloc = 1;
3740
3741         if (data & BTRFS_BLOCK_GROUP_METADATA) {
3742                 last_ptr = &root->fs_info->meta_alloc_cluster;
3743                 if (!btrfs_test_opt(root, SSD))
3744                         empty_cluster = 64 * 1024;
3745         }
3746
3747         if ((data & BTRFS_BLOCK_GROUP_DATA) && btrfs_test_opt(root, SSD)) {
3748                 last_ptr = &root->fs_info->data_alloc_cluster;
3749         }
3750
3751         if (last_ptr) {
3752                 spin_lock(&last_ptr->lock);
3753                 if (last_ptr->block_group)
3754                         hint_byte = last_ptr->window_start;
3755                 spin_unlock(&last_ptr->lock);
3756         }
3757
3758         search_start = max(search_start, first_logical_byte(root, 0));
3759         search_start = max(search_start, hint_byte);
3760
3761         if (!last_ptr)
3762                 empty_cluster = 0;
3763
3764         if (search_start == hint_byte) {
3765                 block_group = btrfs_lookup_block_group(root->fs_info,
3766                                                        search_start);
3767                 /*
3768                  * we don't want to use the block group if it doesn't match our
3769                  * allocation bits, or if its not cached.
3770                  */
3771                 if (block_group && block_group_bits(block_group, data) &&
3772                     block_group_cache_done(block_group)) {
3773                         down_read(&space_info->groups_sem);
3774                         if (list_empty(&block_group->list) ||
3775                             block_group->ro) {
3776                                 /*
3777                                  * someone is removing this block group,
3778                                  * we can't jump into the have_block_group
3779                                  * target because our list pointers are not
3780                                  * valid
3781                                  */
3782                                 btrfs_put_block_group(block_group);
3783                                 up_read(&space_info->groups_sem);
3784                         } else
3785                                 goto have_block_group;
3786                 } else if (block_group) {
3787                         btrfs_put_block_group(block_group);
3788                 }
3789         }
3790
3791 search:
3792         down_read(&space_info->groups_sem);
3793         list_for_each_entry(block_group, &space_info->block_groups, list) {
3794                 u64 offset;
3795                 int cached;
3796
3797                 atomic_inc(&block_group->count);
3798                 search_start = block_group->key.objectid;
3799
3800 have_block_group:
3801                 if (unlikely(block_group->cached == BTRFS_CACHE_NO)) {
3802                         /*
3803                          * we want to start caching kthreads, but not too many
3804                          * right off the bat so we don't overwhelm the system,
3805                          * so only start them if there are less than 2 and we're
3806                          * in the initial allocation phase.
3807                          */
3808                         if (loop > LOOP_CACHING_NOWAIT ||
3809                             atomic_read(&space_info->caching_threads) < 2) {
3810                                 ret = cache_block_group(block_group);
3811                                 BUG_ON(ret);
3812                         }
3813                 }
3814
3815                 cached = block_group_cache_done(block_group);
3816                 if (unlikely(!cached)) {
3817                         found_uncached_bg = true;
3818
3819                         /* if we only want cached bgs, loop */
3820                         if (loop == LOOP_CACHED_ONLY)
3821                                 goto loop;
3822                 }
3823
3824                 if (unlikely(block_group->ro))
3825                         goto loop;
3826
3827                 /*
3828                  * Ok we want to try and use the cluster allocator, so lets look
3829                  * there, unless we are on LOOP_NO_EMPTY_SIZE, since we will
3830                  * have tried the cluster allocator plenty of times at this
3831                  * point and not have found anything, so we are likely way too
3832                  * fragmented for the clustering stuff to find anything, so lets
3833                  * just skip it and let the allocator find whatever block it can
3834                  * find
3835                  */
3836                 if (last_ptr && loop < LOOP_NO_EMPTY_SIZE) {
3837                         /*
3838                          * the refill lock keeps out other
3839                          * people trying to start a new cluster
3840                          */
3841                         spin_lock(&last_ptr->refill_lock);
3842                         if (last_ptr->block_group &&
3843                             (last_ptr->block_group->ro ||
3844                             !block_group_bits(last_ptr->block_group, data))) {
3845                                 offset = 0;
3846                                 goto refill_cluster;
3847                         }
3848
3849                         offset = btrfs_alloc_from_cluster(block_group, last_ptr,
3850                                                  num_bytes, search_start);
3851                         if (offset) {
3852                                 /* we have a block, we're done */
3853                                 spin_unlock(&last_ptr->refill_lock);
3854                                 goto checks;
3855                         }
3856
3857                         spin_lock(&last_ptr->lock);
3858                         /*
3859                          * whoops, this cluster doesn't actually point to
3860                          * this block group.  Get a ref on the block
3861                          * group is does point to and try again
3862                          */
3863                         if (!last_ptr_loop && last_ptr->block_group &&
3864                             last_ptr->block_group != block_group) {
3865
3866                                 btrfs_put_block_group(block_group);
3867                                 block_group = last_ptr->block_group;
3868                                 atomic_inc(&block_group->count);
3869                                 spin_unlock(&last_ptr->lock);
3870                                 spin_unlock(&last_ptr->refill_lock);
3871
3872                                 last_ptr_loop = 1;
3873                                 search_start = block_group->key.objectid;
3874                                 /*
3875                                  * we know this block group is properly
3876                                  * in the list because
3877                                  * btrfs_remove_block_group, drops the
3878                                  * cluster before it removes the block
3879                                  * group from the list
3880                                  */
3881                                 goto have_block_group;
3882                         }
3883                         spin_unlock(&last_ptr->lock);
3884 refill_cluster:
3885                         /*
3886                          * this cluster didn't work out, free it and
3887                          * start over
3888                          */
3889                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
3890
3891                         last_ptr_loop = 0;
3892
3893                         /* allocate a cluster in this block group */
3894                         ret = btrfs_find_space_cluster(trans, root,
3895                                                block_group, last_ptr,
3896                                                offset, num_bytes,
3897                                                empty_cluster + empty_size);
3898                         if (ret == 0) {
3899                                 /*
3900                                  * now pull our allocation out of this
3901                                  * cluster
3902                                  */
3903                                 offset = btrfs_alloc_from_cluster(block_group,
3904                                                   last_ptr, num_bytes,
3905                                                   search_start);
3906                                 if (offset) {
3907                                         /* we found one, proceed */
3908                                         spin_unlock(&last_ptr->refill_lock);
3909                                         goto checks;
3910                                 }
3911                         } else if (!cached && loop > LOOP_CACHING_NOWAIT
3912                                    && !failed_cluster_refill) {
3913                                 spin_unlock(&last_ptr->refill_lock);
3914
3915                                 failed_cluster_refill = true;
3916                                 wait_block_group_cache_progress(block_group,
3917                                        num_bytes + empty_cluster + empty_size);
3918                                 goto have_block_group;
3919                         }
3920
3921                         /*
3922                          * at this point we either didn't find a cluster
3923                          * or we weren't able to allocate a block from our
3924                          * cluster.  Free the cluster we've been trying
3925                          * to use, and go to the next block group
3926                          */
3927                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
3928                         spin_unlock(&last_ptr->refill_lock);
3929                         goto loop;
3930                 }
3931
3932                 offset = btrfs_find_space_for_alloc(block_group, search_start,
3933                                                     num_bytes, empty_size);
3934                 if (!offset && (cached || (!cached &&
3935                                            loop == LOOP_CACHING_NOWAIT))) {
3936                         goto loop;
3937                 } else if (!offset && (!cached &&
3938                                        loop > LOOP_CACHING_NOWAIT)) {
3939                         wait_block_group_cache_progress(block_group,
3940                                         num_bytes + empty_size);
3941                         goto have_block_group;
3942                 }
3943 checks:
3944                 search_start = stripe_align(root, offset);
3945                 /* move on to the next group */
3946                 if (search_start + num_bytes >= search_end) {
3947                         btrfs_add_free_space(block_group, offset, num_bytes);
3948                         goto loop;
3949                 }
3950
3951                 /* move on to the next group */
3952                 if (search_start + num_bytes >
3953                     block_group->key.objectid + block_group->key.offset) {
3954                         btrfs_add_free_space(block_group, offset, num_bytes);
3955                         goto loop;
3956                 }
3957
3958                 if (exclude_nr > 0 &&
3959                     (search_start + num_bytes > exclude_start &&
3960                      search_start < exclude_start + exclude_nr)) {
3961                         search_start = exclude_start + exclude_nr;
3962
3963                         btrfs_add_free_space(block_group, offset, num_bytes);
3964                         /*
3965                          * if search_start is still in this block group
3966                          * then we just re-search this block group
3967                          */
3968                         if (search_start >= block_group->key.objectid &&
3969                             search_start < (block_group->key.objectid +
3970                                             block_group->key.offset))
3971                                 goto have_block_group;
3972                         goto loop;
3973                 }
3974
3975                 ins->objectid = search_start;
3976                 ins->offset = num_bytes;
3977
3978                 if (offset < search_start)
3979                         btrfs_add_free_space(block_group, offset,
3980                                              search_start - offset);
3981                 BUG_ON(offset > search_start);
3982
3983                 update_reserved_extents(block_group, num_bytes, 1);
3984
3985                 /* we are all good, lets return */
3986                 break;
3987 loop:
3988                 failed_cluster_refill = false;
3989                 btrfs_put_block_group(block_group);
3990         }
3991         up_read(&space_info->groups_sem);
3992
3993         /* LOOP_CACHED_ONLY, only search fully cached block groups
3994          * LOOP_CACHING_NOWAIT, search partially cached block groups, but
3995          *                      dont wait foR them to finish caching
3996          * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
3997          * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
3998          * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
3999          *                      again
4000          */
4001         if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE &&
4002             (found_uncached_bg || empty_size || empty_cluster ||
4003              allowed_chunk_alloc)) {
4004                 if (found_uncached_bg) {
4005                         found_uncached_bg = false;
4006                         if (loop < LOOP_CACHING_WAIT) {
4007                                 loop++;
4008                                 goto search;
4009                         }
4010                 }
4011
4012                 if (loop == LOOP_ALLOC_CHUNK) {
4013                         empty_size = 0;
4014                         empty_cluster = 0;
4015                 }
4016
4017                 if (allowed_chunk_alloc) {
4018                         ret = do_chunk_alloc(trans, root, num_bytes +
4019                                              2 * 1024 * 1024, data, 1);
4020                         allowed_chunk_alloc = 0;
4021                 } else {
4022                         space_info->force_alloc = 1;
4023                 }
4024
4025                 if (loop < LOOP_NO_EMPTY_SIZE) {
4026                         loop++;
4027                         goto search;
4028                 }
4029                 ret = -ENOSPC;
4030         } else if (!ins->objectid) {
4031                 ret = -ENOSPC;
4032         }
4033
4034         /* we found what we needed */
4035         if (ins->objectid) {
4036                 if (!(data & BTRFS_BLOCK_GROUP_DATA))
4037                         trans->block_group = block_group->key.objectid;
4038
4039                 btrfs_put_block_group(block_group);
4040                 ret = 0;
4041         }
4042
4043         return ret;
4044 }
4045
4046 static void dump_space_info(struct btrfs_space_info *info, u64 bytes)
4047 {
4048         struct btrfs_block_group_cache *cache;
4049
4050         printk(KERN_INFO "space_info has %llu free, is %sfull\n",
4051                (unsigned long long)(info->total_bytes - info->bytes_used -
4052                                     info->bytes_pinned - info->bytes_reserved),
4053                (info->full) ? "" : "not ");
4054         printk(KERN_INFO "space_info total=%llu, pinned=%llu, delalloc=%llu,"
4055                " may_use=%llu, used=%llu\n",
4056                (unsigned long long)info->total_bytes,
4057                (unsigned long long)info->bytes_pinned,
4058                (unsigned long long)info->bytes_delalloc,
4059                (unsigned long long)info->bytes_may_use,
4060                (unsigned long long)info->bytes_used);
4061
4062         down_read(&info->groups_sem);
4063         list_for_each_entry(cache, &info->block_groups, list) {
4064                 spin_lock(&cache->lock);
4065                 printk(KERN_INFO "block group %llu has %llu bytes, %llu used "
4066                        "%llu pinned %llu reserved\n",
4067                        (unsigned long long)cache->key.objectid,
4068                        (unsigned long long)cache->key.offset,
4069                        (unsigned long long)btrfs_block_group_used(&cache->item),
4070                        (unsigned long long)cache->pinned,
4071                        (unsigned long long)cache->reserved);
4072                 btrfs_dump_free_space(cache, bytes);
4073                 spin_unlock(&cache->lock);
4074         }
4075         up_read(&info->groups_sem);
4076 }
4077
4078 int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
4079                          struct btrfs_root *root,
4080                          u64 num_bytes, u64 min_alloc_size,
4081                          u64 empty_size, u64 hint_byte,
4082                          u64 search_end, struct btrfs_key *ins,
4083                          u64 data)
4084 {
4085         int ret;
4086         u64 search_start = 0;
4087         struct btrfs_fs_info *info = root->fs_info;
4088
4089         data = btrfs_get_alloc_profile(root, data);
4090 again:
4091         /*
4092          * the only place that sets empty_size is btrfs_realloc_node, which
4093          * is not called recursively on allocations
4094          */
4095         if (empty_size || root->ref_cows) {
4096                 if (!(data & BTRFS_BLOCK_GROUP_METADATA)) {
4097                         ret = do_chunk_alloc(trans, root->fs_info->extent_root,
4098                                      2 * 1024 * 1024,
4099                                      BTRFS_BLOCK_GROUP_METADATA |
4100                                      (info->metadata_alloc_profile &
4101                                       info->avail_metadata_alloc_bits), 0);
4102                 }
4103                 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
4104                                      num_bytes + 2 * 1024 * 1024, data, 0);
4105         }
4106
4107         WARN_ON(num_bytes < root->sectorsize);
4108         ret = find_free_extent(trans, root, num_bytes, empty_size,
4109                                search_start, search_end, hint_byte, ins,
4110                                trans->alloc_exclude_start,
4111                                trans->alloc_exclude_nr, data);
4112
4113         if (ret == -ENOSPC && num_bytes > min_alloc_size) {
4114                 num_bytes = num_bytes >> 1;
4115                 num_bytes = num_bytes & ~(root->sectorsize - 1);
4116                 num_bytes = max(num_bytes, min_alloc_size);
4117                 do_chunk_alloc(trans, root->fs_info->extent_root,
4118                                num_bytes, data, 1);
4119                 goto again;
4120         }
4121         if (ret == -ENOSPC) {
4122                 struct btrfs_space_info *sinfo;
4123
4124                 sinfo = __find_space_info(root->fs_info, data);
4125                 printk(KERN_ERR "btrfs allocation failed flags %llu, "
4126                        "wanted %llu\n", (unsigned long long)data,
4127                        (unsigned long long)num_bytes);
4128                 dump_space_info(sinfo, num_bytes);
4129         }
4130
4131         return ret;
4132 }
4133
4134 int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len)
4135 {
4136         struct btrfs_block_group_cache *cache;
4137         int ret = 0;
4138
4139         cache = btrfs_lookup_block_group(root->fs_info, start);
4140         if (!cache) {
4141                 printk(KERN_ERR "Unable to find block group for %llu\n",
4142                        (unsigned long long)start);
4143                 return -ENOSPC;
4144         }
4145
4146         ret = btrfs_discard_extent(root, start, len);
4147
4148         btrfs_add_free_space(cache, start, len);
4149         update_reserved_extents(cache, len, 0);
4150         btrfs_put_block_group(cache);
4151
4152         return ret;
4153 }
4154
4155 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
4156                                       struct btrfs_root *root,
4157                                       u64 parent, u64 root_objectid,
4158                                       u64 flags, u64 owner, u64 offset,
4159                                       struct btrfs_key *ins, int ref_mod)
4160 {
4161         int ret;
4162         struct btrfs_fs_info *fs_info = root->fs_info;
4163         struct btrfs_extent_item *extent_item;
4164         struct btrfs_extent_inline_ref *iref;
4165         struct btrfs_path *path;
4166         struct extent_buffer *leaf;
4167         int type;
4168         u32 size;
4169
4170         if (parent > 0)
4171                 type = BTRFS_SHARED_DATA_REF_KEY;
4172         else
4173                 type = BTRFS_EXTENT_DATA_REF_KEY;
4174
4175         size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
4176
4177         path = btrfs_alloc_path();
4178         BUG_ON(!path);
4179
4180         path->leave_spinning = 1;
4181         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
4182                                       ins, size);
4183         BUG_ON(ret);
4184
4185         leaf = path->nodes[0];
4186         extent_item = btrfs_item_ptr(leaf, path->slots[0],
4187                                      struct btrfs_extent_item);
4188         btrfs_set_extent_refs(leaf, extent_item, ref_mod);
4189         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
4190         btrfs_set_extent_flags(leaf, extent_item,
4191                                flags | BTRFS_EXTENT_FLAG_DATA);
4192
4193         iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
4194         btrfs_set_extent_inline_ref_type(leaf, iref, type);
4195         if (parent > 0) {
4196                 struct btrfs_shared_data_ref *ref;
4197                 ref = (struct btrfs_shared_data_ref *)(iref + 1);
4198                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
4199                 btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
4200         } else {
4201                 struct btrfs_extent_data_ref *ref;
4202                 ref = (struct btrfs_extent_data_ref *)(&iref->offset);
4203                 btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
4204                 btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
4205                 btrfs_set_extent_data_ref_offset(leaf, ref, offset);
4206                 btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
4207         }
4208
4209         btrfs_mark_buffer_dirty(path->nodes[0]);
4210         btrfs_free_path(path);
4211
4212         ret = update_block_group(trans, root, ins->objectid, ins->offset,
4213                                  1, 0);
4214         if (ret) {
4215                 printk(KERN_ERR "btrfs update block group failed for %llu "
4216                        "%llu\n", (unsigned long long)ins->objectid,
4217                        (unsigned long long)ins->offset);
4218                 BUG();
4219         }
4220         return ret;
4221 }
4222
4223 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
4224                                      struct btrfs_root *root,
4225                                      u64 parent, u64 root_objectid,
4226                                      u64 flags, struct btrfs_disk_key *key,
4227                                      int level, struct btrfs_key *ins)
4228 {
4229         int ret;
4230         struct btrfs_fs_info *fs_info = root->fs_info;
4231         struct btrfs_extent_item *extent_item;
4232         struct btrfs_tree_block_info *block_info;
4233         struct btrfs_extent_inline_ref *iref;
4234         struct btrfs_path *path;
4235         struct extent_buffer *leaf;
4236         u32 size = sizeof(*extent_item) + sizeof(*block_info) + sizeof(*iref);
4237
4238         path = btrfs_alloc_path();
4239         BUG_ON(!path);
4240
4241         path->leave_spinning = 1;
4242         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
4243                                       ins, size);
4244         BUG_ON(ret);
4245
4246         leaf = path->nodes[0];
4247         extent_item = btrfs_item_ptr(leaf, path->slots[0],
4248                                      struct btrfs_extent_item);
4249         btrfs_set_extent_refs(leaf, extent_item, 1);
4250         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
4251         btrfs_set_extent_flags(leaf, extent_item,
4252                                flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
4253         block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
4254
4255         btrfs_set_tree_block_key(leaf, block_info, key);
4256         btrfs_set_tree_block_level(leaf, block_info, level);
4257
4258         iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
4259         if (parent > 0) {
4260                 BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
4261                 btrfs_set_extent_inline_ref_type(leaf, iref,
4262                                                  BTRFS_SHARED_BLOCK_REF_KEY);
4263                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
4264         } else {
4265                 btrfs_set_extent_inline_ref_type(leaf, iref,
4266                                                  BTRFS_TREE_BLOCK_REF_KEY);
4267                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
4268         }
4269
4270         btrfs_mark_buffer_dirty(leaf);
4271         btrfs_free_path(path);
4272
4273         ret = update_block_group(trans, root, ins->objectid, ins->offset,
4274                                  1, 0);
4275         if (ret) {
4276                 printk(KERN_ERR "btrfs update block group failed for %llu "
4277                        "%llu\n", (unsigned long long)ins->objectid,
4278                        (unsigned long long)ins->offset);
4279                 BUG();
4280         }
4281         return ret;
4282 }
4283
4284 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
4285                                      struct btrfs_root *root,
4286                                      u64 root_objectid, u64 owner,
4287                                      u64 offset, struct btrfs_key *ins)
4288 {
4289         int ret;
4290
4291         BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID);
4292
4293         ret = btrfs_add_delayed_data_ref(trans, ins->objectid, ins->offset,
4294                                          0, root_objectid, owner, offset,
4295                                          BTRFS_ADD_DELAYED_EXTENT, NULL);
4296         return ret;
4297 }
4298
4299 /*
4300  * this is used by the tree logging recovery code.  It records that
4301  * an extent has been allocated and makes sure to clear the free
4302  * space cache bits as well
4303  */
4304 int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
4305                                    struct btrfs_root *root,
4306                                    u64 root_objectid, u64 owner, u64 offset,
4307                                    struct btrfs_key *ins)
4308 {
4309         int ret;
4310         struct btrfs_block_group_cache *block_group;
4311         struct btrfs_caching_control *caching_ctl;
4312         u64 start = ins->objectid;
4313         u64 num_bytes = ins->offset;
4314
4315         block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
4316         cache_block_group(block_group);
4317         caching_ctl = get_caching_control(block_group);
4318
4319         if (!caching_ctl) {
4320                 BUG_ON(!block_group_cache_done(block_group));
4321                 ret = btrfs_remove_free_space(block_group, start, num_bytes);
4322                 BUG_ON(ret);
4323         } else {
4324                 mutex_lock(&caching_ctl->mutex);
4325
4326                 if (start >= caching_ctl->progress) {
4327                         ret = add_excluded_extent(root, start, num_bytes);
4328                         BUG_ON(ret);
4329                 } else if (start + num_bytes <= caching_ctl->progress) {
4330                         ret = btrfs_remove_free_space(block_group,
4331                                                       start, num_bytes);
4332                         BUG_ON(ret);
4333                 } else {
4334                         num_bytes = caching_ctl->progress - start;
4335                         ret = btrfs_remove_free_space(block_group,
4336                                                       start, num_bytes);
4337                         BUG_ON(ret);
4338
4339                         start = caching_ctl->progress;
4340                         num_bytes = ins->objectid + ins->offset -
4341                                     caching_ctl->progress;
4342                         ret = add_excluded_extent(root, start, num_bytes);
4343                         BUG_ON(ret);
4344                 }
4345
4346                 mutex_unlock(&caching_ctl->mutex);
4347                 put_caching_control(caching_ctl);
4348         }
4349
4350         update_reserved_extents(block_group, ins->offset, 1);
4351         btrfs_put_block_group(block_group);
4352         ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
4353                                          0, owner, offset, ins, 1);
4354         return ret;
4355 }
4356
4357 /*
4358  * finds a free extent and does all the dirty work required for allocation
4359  * returns the key for the extent through ins, and a tree buffer for
4360  * the first block of the extent through buf.
4361  *
4362  * returns 0 if everything worked, non-zero otherwise.
4363  */
4364 static int alloc_tree_block(struct btrfs_trans_handle *trans,
4365                             struct btrfs_root *root,
4366                             u64 num_bytes, u64 parent, u64 root_objectid,
4367                             struct btrfs_disk_key *key, int level,
4368                             u64 empty_size, u64 hint_byte, u64 search_end,
4369                             struct btrfs_key *ins)
4370 {
4371         int ret;
4372         u64 flags = 0;
4373
4374         ret = btrfs_reserve_extent(trans, root, num_bytes, num_bytes,
4375                                    empty_size, hint_byte, search_end,
4376                                    ins, 0);
4377         if (ret)
4378                 return ret;
4379
4380         if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
4381                 if (parent == 0)
4382                         parent = ins->objectid;
4383                 flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
4384         } else
4385                 BUG_ON(parent > 0);
4386
4387         if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
4388                 struct btrfs_delayed_extent_op *extent_op;
4389                 extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
4390                 BUG_ON(!extent_op);
4391                 if (key)
4392                         memcpy(&extent_op->key, key, sizeof(extent_op->key));
4393                 else
4394                         memset(&extent_op->key, 0, sizeof(extent_op->key));
4395                 extent_op->flags_to_set = flags;
4396                 extent_op->update_key = 1;
4397                 extent_op->update_flags = 1;
4398                 extent_op->is_data = 0;
4399
4400                 ret = btrfs_add_delayed_tree_ref(trans, ins->objectid,
4401                                         ins->offset, parent, root_objectid,
4402                                         level, BTRFS_ADD_DELAYED_EXTENT,
4403                                         extent_op);
4404                 BUG_ON(ret);
4405         }
4406         return ret;
4407 }
4408
4409 struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
4410                                             struct btrfs_root *root,
4411                                             u64 bytenr, u32 blocksize,
4412                                             int level)
4413 {
4414         struct extent_buffer *buf;
4415
4416         buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
4417         if (!buf)
4418                 return ERR_PTR(-ENOMEM);
4419         btrfs_set_header_generation(buf, trans->transid);
4420         btrfs_set_buffer_lockdep_class(buf, level);
4421         btrfs_tree_lock(buf);
4422         clean_tree_block(trans, root, buf);
4423
4424         btrfs_set_lock_blocking(buf);
4425         btrfs_set_buffer_uptodate(buf);
4426
4427         if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
4428                 set_extent_dirty(&root->dirty_log_pages, buf->start,
4429                          buf->start + buf->len - 1, GFP_NOFS);
4430         } else {
4431                 set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
4432                          buf->start + buf->len - 1, GFP_NOFS);
4433         }
4434         trans->blocks_used++;
4435         /* this returns a buffer locked for blocking */
4436         return buf;
4437 }
4438
4439 /*
4440  * helper function to allocate a block for a given tree
4441  * returns the tree buffer or NULL.
4442  */
4443 struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
4444                                         struct btrfs_root *root, u32 blocksize,
4445                                         u64 parent, u64 root_objectid,
4446                                         struct btrfs_disk_key *key, int level,
4447                                         u64 hint, u64 empty_size)
4448 {
4449         struct btrfs_key ins;
4450         int ret;
4451         struct extent_buffer *buf;
4452
4453         ret = alloc_tree_block(trans, root, blocksize, parent, root_objectid,
4454                                key, level, empty_size, hint, (u64)-1, &ins);
4455         if (ret) {
4456                 BUG_ON(ret > 0);
4457                 return ERR_PTR(ret);
4458         }
4459
4460         buf = btrfs_init_new_buffer(trans, root, ins.objectid,
4461                                     blocksize, level);
4462         return buf;
4463 }
4464
4465 struct walk_control {
4466         u64 refs[BTRFS_MAX_LEVEL];
4467         u64 flags[BTRFS_MAX_LEVEL];
4468         struct btrfs_key update_progress;
4469         int stage;
4470         int level;
4471         int shared_level;
4472         int update_ref;
4473         int keep_locks;
4474         int reada_slot;
4475         int reada_count;
4476 };
4477
4478 #define DROP_REFERENCE  1
4479 #define UPDATE_BACKREF  2
4480
4481 static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
4482                                      struct btrfs_root *root,
4483                                      struct walk_control *wc,
4484                                      struct btrfs_path *path)
4485 {
4486         u64 bytenr;
4487         u64 generation;
4488         u64 refs;
4489         u64 last = 0;
4490         u32 nritems;
4491         u32 blocksize;
4492         struct btrfs_key key;
4493         struct extent_buffer *eb;
4494         int ret;
4495         int slot;
4496         int nread = 0;
4497
4498         if (path->slots[wc->level] < wc->reada_slot) {
4499                 wc->reada_count = wc->reada_count * 2 / 3;
4500                 wc->reada_count = max(wc->reada_count, 2);
4501         } else {
4502                 wc->reada_count = wc->reada_count * 3 / 2;
4503                 wc->reada_count = min_t(int, wc->reada_count,
4504                                         BTRFS_NODEPTRS_PER_BLOCK(root));
4505         }
4506
4507         eb = path->nodes[wc->level];
4508         nritems = btrfs_header_nritems(eb);
4509         blocksize = btrfs_level_size(root, wc->level - 1);
4510
4511         for (slot = path->slots[wc->level]; slot < nritems; slot++) {
4512                 if (nread >= wc->reada_count)
4513                         break;
4514
4515                 cond_resched();
4516                 bytenr = btrfs_node_blockptr(eb, slot);
4517                 generation = btrfs_node_ptr_generation(eb, slot);
4518
4519                 if (slot == path->slots[wc->level])
4520                         goto reada;
4521
4522                 if (wc->stage == UPDATE_BACKREF &&
4523                     generation <= root->root_key.offset)
4524                         continue;
4525
4526                 if (wc->stage == DROP_REFERENCE) {
4527                         ret = btrfs_lookup_extent_info(trans, root,
4528                                                 bytenr, blocksize,
4529                                                 &refs, NULL);
4530                         BUG_ON(ret);
4531                         BUG_ON(refs == 0);
4532                         if (refs == 1)
4533                                 goto reada;
4534
4535                         if (!wc->update_ref ||
4536                             generation <= root->root_key.offset)
4537                                 continue;
4538                         btrfs_node_key_to_cpu(eb, &key, slot);
4539                         ret = btrfs_comp_cpu_keys(&key,
4540                                                   &wc->update_progress);
4541                         if (ret < 0)
4542                                 continue;
4543                 }
4544 reada:
4545                 ret = readahead_tree_block(root, bytenr, blocksize,
4546                                            generation);
4547                 if (ret)
4548                         break;
4549                 last = bytenr + blocksize;
4550                 nread++;
4551         }
4552         wc->reada_slot = slot;
4553 }
4554
4555 /*
4556  * hepler to process tree block while walking down the tree.
4557  *
4558  * when wc->stage == UPDATE_BACKREF, this function updates
4559  * back refs for pointers in the block.
4560  *
4561  * NOTE: return value 1 means we should stop walking down.
4562  */
4563 static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
4564                                    struct btrfs_root *root,
4565                                    struct btrfs_path *path,
4566                                    struct walk_control *wc)
4567 {
4568         int level = wc->level;
4569         struct extent_buffer *eb = path->nodes[level];
4570         u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
4571         int ret;
4572
4573         if (wc->stage == UPDATE_BACKREF &&
4574             btrfs_header_owner(eb) != root->root_key.objectid)
4575                 return 1;
4576
4577         /*
4578          * when reference count of tree block is 1, it won't increase
4579          * again. once full backref flag is set, we never clear it.
4580          */
4581         if ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
4582             (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag))) {
4583                 BUG_ON(!path->locks[level]);
4584                 ret = btrfs_lookup_extent_info(trans, root,
4585                                                eb->start, eb->len,
4586                                                &wc->refs[level],
4587                                                &wc->flags[level]);
4588                 BUG_ON(ret);
4589                 BUG_ON(wc->refs[level] == 0);
4590         }
4591
4592         if (wc->stage == DROP_REFERENCE) {
4593                 if (wc->refs[level] > 1)
4594                         return 1;
4595
4596                 if (path->locks[level] && !wc->keep_locks) {
4597                         btrfs_tree_unlock(eb);
4598                         path->locks[level] = 0;
4599                 }
4600                 return 0;
4601         }
4602
4603         /* wc->stage == UPDATE_BACKREF */
4604         if (!(wc->flags[level] & flag)) {
4605                 BUG_ON(!path->locks[level]);
4606                 ret = btrfs_inc_ref(trans, root, eb, 1);
4607                 BUG_ON(ret);
4608                 ret = btrfs_dec_ref(trans, root, eb, 0);
4609                 BUG_ON(ret);
4610                 ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
4611                                                   eb->len, flag, 0);
4612                 BUG_ON(ret);
4613                 wc->flags[level] |= flag;
4614         }
4615
4616         /*
4617          * the block is shared by multiple trees, so it's not good to
4618          * keep the tree lock
4619          */
4620         if (path->locks[level] && level > 0) {
4621                 btrfs_tree_unlock(eb);
4622                 path->locks[level] = 0;
4623         }
4624         return 0;
4625 }
4626
4627 /*
4628  * hepler to process tree block pointer.
4629  *
4630  * when wc->stage == DROP_REFERENCE, this function checks
4631  * reference count of the block pointed to. if the block
4632  * is shared and we need update back refs for the subtree
4633  * rooted at the block, this function changes wc->stage to
4634  * UPDATE_BACKREF. if the block is shared and there is no
4635  * need to update back, this function drops the reference
4636  * to the block.
4637  *
4638  * NOTE: return value 1 means we should stop walking down.
4639  */
4640 static noinline int do_walk_down(struct btrfs_trans_handle *trans,
4641                                  struct btrfs_root *root,
4642                                  struct btrfs_path *path,
4643                                  struct walk_control *wc)
4644 {
4645         u64 bytenr;
4646         u64 generation;
4647         u64 parent;
4648         u32 blocksize;
4649         struct btrfs_key key;
4650         struct extent_buffer *next;
4651         int level = wc->level;
4652         int reada = 0;
4653         int ret = 0;
4654
4655         generation = btrfs_node_ptr_generation(path->nodes[level],
4656                                                path->slots[level]);
4657         /*
4658          * if the lower level block was created before the snapshot
4659          * was created, we know there is no need to update back refs
4660          * for the subtree
4661          */
4662         if (wc->stage == UPDATE_BACKREF &&
4663             generation <= root->root_key.offset)
4664                 return 1;
4665
4666         bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
4667         blocksize = btrfs_level_size(root, level - 1);
4668
4669         next = btrfs_find_tree_block(root, bytenr, blocksize);
4670         if (!next) {
4671                 next = btrfs_find_create_tree_block(root, bytenr, blocksize);
4672                 reada = 1;
4673         }
4674         btrfs_tree_lock(next);
4675         btrfs_set_lock_blocking(next);
4676
4677         if (wc->stage == DROP_REFERENCE) {
4678                 ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize,
4679                                                &wc->refs[level - 1],
4680                                                &wc->flags[level - 1]);
4681                 BUG_ON(ret);
4682                 BUG_ON(wc->refs[level - 1] == 0);
4683
4684                 if (wc->refs[level - 1] > 1) {
4685                         if (!wc->update_ref ||
4686                             generation <= root->root_key.offset)
4687                                 goto skip;
4688
4689                         btrfs_node_key_to_cpu(path->nodes[level], &key,
4690                                               path->slots[level]);
4691                         ret = btrfs_comp_cpu_keys(&key, &wc->update_progress);
4692                         if (ret < 0)
4693                                 goto skip;
4694
4695                         wc->stage = UPDATE_BACKREF;
4696                         wc->shared_level = level - 1;
4697                 }
4698         }
4699
4700         if (!btrfs_buffer_uptodate(next, generation)) {
4701                 btrfs_tree_unlock(next);
4702                 free_extent_buffer(next);
4703                 next = NULL;
4704         }
4705
4706         if (!next) {
4707                 if (reada && level == 1)
4708                         reada_walk_down(trans, root, wc, path);
4709                 next = read_tree_block(root, bytenr, blocksize, generation);
4710                 btrfs_tree_lock(next);
4711                 btrfs_set_lock_blocking(next);
4712         }
4713
4714         level--;
4715         BUG_ON(level != btrfs_header_level(next));
4716         path->nodes[level] = next;
4717         path->slots[level] = 0;
4718         path->locks[level] = 1;
4719         wc->level = level;
4720         if (wc->level == 1)
4721                 wc->reada_slot = 0;
4722         return 0;
4723 skip:
4724         wc->refs[level - 1] = 0;
4725         wc->flags[level - 1] = 0;
4726
4727         if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
4728                 parent = path->nodes[level]->start;
4729         } else {
4730                 BUG_ON(root->root_key.objectid !=
4731                        btrfs_header_owner(path->nodes[level]));
4732                 parent = 0;
4733         }
4734
4735         ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
4736                                 root->root_key.objectid, level - 1, 0);
4737         BUG_ON(ret);
4738
4739         btrfs_tree_unlock(next);
4740         free_extent_buffer(next);
4741         return 1;
4742 }
4743
4744 /*
4745  * hepler to process tree block while walking up the tree.
4746  *
4747  * when wc->stage == DROP_REFERENCE, this function drops
4748  * reference count on the block.
4749  *
4750  * when wc->stage == UPDATE_BACKREF, this function changes
4751  * wc->stage back to DROP_REFERENCE if we changed wc->stage
4752  * to UPDATE_BACKREF previously while processing the block.
4753  *
4754  * NOTE: return value 1 means we should stop walking up.
4755  */
4756 static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
4757                                  struct btrfs_root *root,
4758                                  struct btrfs_path *path,
4759                                  struct walk_control *wc)
4760 {
4761         int ret = 0;
4762         int level = wc->level;
4763         struct extent_buffer *eb = path->nodes[level];
4764         u64 parent = 0;
4765
4766         if (wc->stage == UPDATE_BACKREF) {
4767                 BUG_ON(wc->shared_level < level);
4768                 if (level < wc->shared_level)
4769                         goto out;
4770
4771                 ret = find_next_key(path, level + 1, &wc->update_progress);
4772                 if (ret > 0)
4773                         wc->update_ref = 0;
4774
4775                 wc->stage = DROP_REFERENCE;
4776                 wc->shared_level = -1;
4777                 path->slots[level] = 0;
4778
4779                 /*
4780                  * check reference count again if the block isn't locked.
4781                  * we should start walking down the tree again if reference
4782                  * count is one.
4783                  */
4784                 if (!path->locks[level]) {
4785                         BUG_ON(level == 0);
4786                         btrfs_tree_lock(eb);
4787                         btrfs_set_lock_blocking(eb);
4788                         path->locks[level] = 1;
4789
4790                         ret = btrfs_lookup_extent_info(trans, root,
4791                                                        eb->start, eb->len,
4792                                                        &wc->refs[level],
4793                                                        &wc->flags[level]);
4794                         BUG_ON(ret);
4795                         BUG_ON(wc->refs[level] == 0);
4796                         if (wc->refs[level] == 1) {
4797                                 btrfs_tree_unlock(eb);
4798                                 path->locks[level] = 0;
4799                                 return 1;
4800                         }
4801                 }
4802         }
4803
4804         /* wc->stage == DROP_REFERENCE */
4805         BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
4806
4807         if (wc->refs[level] == 1) {
4808                 if (level == 0) {
4809                         if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
4810                                 ret = btrfs_dec_ref(trans, root, eb, 1);
4811                         else
4812                                 ret = btrfs_dec_ref(trans, root, eb, 0);
4813                         BUG_ON(ret);
4814                 }
4815                 /* make block locked assertion in clean_tree_block happy */
4816                 if (!path->locks[level] &&
4817                     btrfs_header_generation(eb) == trans->transid) {
4818                         btrfs_tree_lock(eb);
4819                         btrfs_set_lock_blocking(eb);
4820                         path->locks[level] = 1;
4821                 }
4822                 clean_tree_block(trans, root, eb);
4823         }
4824
4825         if (eb == root->node) {
4826                 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
4827                         parent = eb->start;
4828                 else
4829                         BUG_ON(root->root_key.objectid !=
4830                                btrfs_header_owner(eb));
4831         } else {
4832                 if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
4833                         parent = path->nodes[level + 1]->start;
4834                 else
4835                         BUG_ON(root->root_key.objectid !=
4836                                btrfs_header_owner(path->nodes[level + 1]));
4837         }
4838
4839         ret = btrfs_free_extent(trans, root, eb->start, eb->len, parent,
4840                                 root->root_key.objectid, level, 0);
4841         BUG_ON(ret);
4842 out:
4843         wc->refs[level] = 0;
4844         wc->flags[level] = 0;
4845         return ret;
4846 }
4847
4848 static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
4849                                    struct btrfs_root *root,
4850                                    struct btrfs_path *path,
4851                                    struct walk_control *wc)
4852 {
4853         int level = wc->level;
4854         int ret;
4855
4856         while (level >= 0) {
4857                 if (path->slots[level] >=
4858                     btrfs_header_nritems(path->nodes[level]))
4859                         break;
4860
4861                 ret = walk_down_proc(trans, root, path, wc);
4862                 if (ret > 0)
4863                         break;
4864
4865                 if (level == 0)
4866                         break;
4867
4868                 ret = do_walk_down(trans, root, path, wc);
4869                 if (ret > 0) {
4870                         path->slots[level]++;
4871                         continue;
4872                 }
4873                 level = wc->level;
4874         }
4875         return 0;
4876 }
4877
4878 static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
4879                                  struct btrfs_root *root,
4880                                  struct btrfs_path *path,
4881                                  struct walk_control *wc, int max_level)
4882 {
4883         int level = wc->level;
4884         int ret;
4885
4886         path->slots[level] = btrfs_header_nritems(path->nodes[level]);
4887         while (level < max_level && path->nodes[level]) {
4888                 wc->level = level;
4889                 if (path->slots[level] + 1 <
4890                     btrfs_header_nritems(path->nodes[level])) {
4891                         path->slots[level]++;
4892                         return 0;
4893                 } else {
4894                         ret = walk_up_proc(trans, root, path, wc);
4895                         if (ret > 0)
4896                                 return 0;
4897
4898                         if (path->locks[level]) {
4899                                 btrfs_tree_unlock(path->nodes[level]);
4900                                 path->locks[level] = 0;
4901                         }
4902                         free_extent_buffer(path->nodes[level]);
4903                         path->nodes[level] = NULL;
4904                         level++;
4905                 }
4906         }
4907         return 1;
4908 }
4909
4910 /*
4911  * drop a subvolume tree.
4912  *
4913  * this function traverses the tree freeing any blocks that only
4914  * referenced by the tree.
4915  *
4916  * when a shared tree block is found. this function decreases its
4917  * reference count by one. if update_ref is true, this function
4918  * also make sure backrefs for the shared block and all lower level
4919  * blocks are properly updated.
4920  */
4921 int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref)
4922 {
4923         struct btrfs_path *path;
4924         struct btrfs_trans_handle *trans;
4925         struct btrfs_root *tree_root = root->fs_info->tree_root;
4926         struct btrfs_root_item *root_item = &root->root_item;
4927         struct walk_control *wc;
4928         struct btrfs_key key;
4929         int err = 0;
4930         int ret;
4931         int level;
4932
4933         path = btrfs_alloc_path();
4934         BUG_ON(!path);
4935
4936         wc = kzalloc(sizeof(*wc), GFP_NOFS);
4937         BUG_ON(!wc);
4938
4939         trans = btrfs_start_transaction(tree_root, 1);
4940
4941         if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
4942                 level = btrfs_header_level(root->node);
4943                 path->nodes[level] = btrfs_lock_root_node(root);
4944                 btrfs_set_lock_blocking(path->nodes[level]);
4945                 path->slots[level] = 0;
4946                 path->locks[level] = 1;
4947                 memset(&wc->update_progress, 0,
4948                        sizeof(wc->update_progress));
4949         } else {
4950                 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
4951                 memcpy(&wc->update_progress, &key,
4952                        sizeof(wc->update_progress));
4953
4954                 level = root_item->drop_level;
4955                 BUG_ON(level == 0);
4956                 path->lowest_level = level;
4957                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4958                 path->lowest_level = 0;
4959                 if (ret < 0) {
4960                         err = ret;
4961                         goto out;
4962                 }
4963                 WARN_ON(ret > 0);
4964
4965                 /*
4966                  * unlock our path, this is safe because only this
4967                  * function is allowed to delete this snapshot
4968                  */
4969                 btrfs_unlock_up_safe(path, 0);
4970
4971                 level = btrfs_header_level(root->node);
4972                 while (1) {
4973                         btrfs_tree_lock(path->nodes[level]);
4974                         btrfs_set_lock_blocking(path->nodes[level]);
4975
4976                         ret = btrfs_lookup_extent_info(trans, root,
4977                                                 path->nodes[level]->start,
4978                                                 path->nodes[level]->len,
4979                                                 &wc->refs[level],
4980                                                 &wc->flags[level]);
4981                         BUG_ON(ret);
4982                         BUG_ON(wc->refs[level] == 0);
4983
4984                         if (level == root_item->drop_level)
4985                                 break;
4986
4987                         btrfs_tree_unlock(path->nodes[level]);
4988                         WARN_ON(wc->refs[level] != 1);
4989                         level--;
4990                 }
4991         }
4992
4993         wc->level = level;
4994         wc->shared_level = -1;
4995         wc->stage = DROP_REFERENCE;
4996         wc->update_ref = update_ref;
4997         wc->keep_locks = 0;
4998         wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
4999
5000         while (1) {
5001                 ret = walk_down_tree(trans, root, path, wc);
5002                 if (ret < 0) {
5003                         err = ret;
5004                         break;
5005                 }
5006
5007                 ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
5008                 if (ret < 0) {
5009                         err = ret;
5010                         break;
5011                 }
5012
5013                 if (ret > 0) {
5014                         BUG_ON(wc->stage != DROP_REFERENCE);
5015                         break;
5016                 }
5017
5018                 if (wc->stage == DROP_REFERENCE) {
5019                         level = wc->level;
5020                         btrfs_node_key(path->nodes[level],
5021                                        &root_item->drop_progress,
5022                                        path->slots[level]);
5023                         root_item->drop_level = level;
5024                 }
5025
5026                 BUG_ON(wc->level == 0);
5027                 if (trans->transaction->in_commit ||
5028                     trans->transaction->delayed_refs.flushing) {
5029                         ret = btrfs_update_root(trans, tree_root,
5030                                                 &root->root_key,
5031                                                 root_item);
5032                         BUG_ON(ret);
5033
5034                         btrfs_end_transaction(trans, tree_root);
5035                         trans = btrfs_start_transaction(tree_root, 1);
5036                 } else {
5037                         unsigned long update;
5038                         update = trans->delayed_ref_updates;
5039                         trans->delayed_ref_updates = 0;
5040                         if (update)
5041                                 btrfs_run_delayed_refs(trans, tree_root,
5042                                                        update);
5043                 }
5044         }
5045         btrfs_release_path(root, path);
5046         BUG_ON(err);
5047
5048         ret = btrfs_del_root(trans, tree_root, &root->root_key);
5049         BUG_ON(ret);
5050
5051         if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
5052                 ret = btrfs_find_last_root(tree_root, root->root_key.objectid,
5053                                            NULL, NULL);
5054                 BUG_ON(ret < 0);
5055                 if (ret > 0) {
5056                         ret = btrfs_del_orphan_item(trans, tree_root,
5057                                                     root->root_key.objectid);
5058                         BUG_ON(ret);
5059                 }
5060         }
5061
5062         if (root->in_radix) {
5063                 btrfs_free_fs_root(tree_root->fs_info, root);
5064         } else {
5065                 free_extent_buffer(root->node);
5066                 free_extent_buffer(root->commit_root);
5067                 kfree(root);
5068         }
5069 out:
5070         btrfs_end_transaction(trans, tree_root);
5071         kfree(wc);
5072         btrfs_free_path(path);
5073         return err;
5074 }
5075
5076 /*
5077  * drop subtree rooted at tree block 'node'.
5078  *
5079  * NOTE: this function will unlock and release tree block 'node'
5080  */
5081 int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
5082                         struct btrfs_root *root,
5083                         struct extent_buffer *node,
5084                         struct extent_buffer *parent)
5085 {
5086         struct btrfs_path *path;
5087         struct walk_control *wc;
5088         int level;
5089         int parent_level;
5090         int ret = 0;
5091         int wret;
5092
5093         BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
5094
5095         path = btrfs_alloc_path();
5096         BUG_ON(!path);
5097
5098         wc = kzalloc(sizeof(*wc), GFP_NOFS);
5099         BUG_ON(!wc);
5100
5101         btrfs_assert_tree_locked(parent);
5102         parent_level = btrfs_header_level(parent);
5103         extent_buffer_get(parent);
5104         path->nodes[parent_level] = parent;
5105         path->slots[parent_level] = btrfs_header_nritems(parent);
5106
5107         btrfs_assert_tree_locked(node);
5108         level = btrfs_header_level(node);
5109         path->nodes[level] = node;
5110         path->slots[level] = 0;
5111         path->locks[level] = 1;
5112
5113         wc->refs[parent_level] = 1;
5114         wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
5115         wc->level = level;
5116         wc->shared_level = -1;
5117         wc->stage = DROP_REFERENCE;
5118         wc->update_ref = 0;
5119         wc->keep_locks = 1;
5120         wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
5121
5122         while (1) {
5123                 wret = walk_down_tree(trans, root, path, wc);
5124                 if (wret < 0) {
5125                         ret = wret;
5126                         break;
5127                 }
5128
5129                 wret = walk_up_tree(trans, root, path, wc, parent_level);
5130                 if (wret < 0)
5131                         ret = wret;
5132                 if (wret != 0)
5133                         break;
5134         }
5135
5136         kfree(wc);
5137         btrfs_free_path(path);
5138         return ret;
5139 }
5140
5141 #if 0
5142 static unsigned long calc_ra(unsigned long start, unsigned long last,
5143                              unsigned long nr)
5144 {
5145         return min(last, start + nr - 1);
5146 }
5147
5148 static noinline int relocate_inode_pages(struct inode *inode, u64 start,
5149                                          u64 len)
5150 {
5151         u64 page_start;
5152         u64 page_end;
5153         unsigned long first_index;
5154         unsigned long last_index;
5155         unsigned long i;
5156         struct page *page;
5157         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
5158         struct file_ra_state *ra;
5159         struct btrfs_ordered_extent *ordered;
5160         unsigned int total_read = 0;
5161         unsigned int total_dirty = 0;
5162         int ret = 0;
5163
5164         ra = kzalloc(sizeof(*ra), GFP_NOFS);
5165
5166         mutex_lock(&inode->i_mutex);
5167         first_index = start >> PAGE_CACHE_SHIFT;
5168         last_index = (start + len - 1) >> PAGE_CACHE_SHIFT;
5169
5170         /* make sure the dirty trick played by the caller work */
5171         ret = invalidate_inode_pages2_range(inode->i_mapping,
5172                                             first_index, last_index);
5173         if (ret)
5174                 goto out_unlock;
5175
5176         file_ra_state_init(ra, inode->i_mapping);
5177
5178         for (i = first_index ; i <= last_index; i++) {
5179                 if (total_read % ra->ra_pages == 0) {
5180                         btrfs_force_ra(inode->i_mapping, ra, NULL, i,
5181                                        calc_ra(i, last_index, ra->ra_pages));
5182                 }
5183                 total_read++;
5184 again:
5185                 if (((u64)i << PAGE_CACHE_SHIFT) > i_size_read(inode))
5186                         BUG_ON(1);
5187                 page = grab_cache_page(inode->i_mapping, i);
5188                 if (!page) {
5189                         ret = -ENOMEM;
5190                         goto out_unlock;
5191                 }
5192                 if (!PageUptodate(page)) {
5193                         btrfs_readpage(NULL, page);
5194                         lock_page(page);
5195                         if (!PageUptodate(page)) {
5196                                 unlock_page(page);
5197                                 page_cache_release(page);
5198                                 ret = -EIO;
5199                                 goto out_unlock;
5200                         }
5201                 }
5202                 wait_on_page_writeback(page);
5203
5204                 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
5205                 page_end = page_start + PAGE_CACHE_SIZE - 1;
5206                 lock_extent(io_tree, page_start, page_end, GFP_NOFS);
5207
5208                 ordered = btrfs_lookup_ordered_extent(inode, page_start);
5209                 if (ordered) {
5210                         unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
5211                         unlock_page(page);
5212                         page_cache_release(page);
5213                         btrfs_start_ordered_extent(inode, ordered, 1);
5214                         btrfs_put_ordered_extent(ordered);
5215                         goto again;
5216                 }
5217                 set_page_extent_mapped(page);
5218
5219                 if (i == first_index)
5220                         set_extent_bits(io_tree, page_start, page_end,
5221                                         EXTENT_BOUNDARY, GFP_NOFS);
5222                 btrfs_set_extent_delalloc(inode, page_start, page_end);
5223
5224                 set_page_dirty(page);
5225                 total_dirty++;
5226
5227                 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
5228                 unlock_page(page);
5229                 page_cache_release(page);
5230         }
5231
5232 out_unlock:
5233         kfree(ra);
5234         mutex_unlock(&inode->i_mutex);
5235         balance_dirty_pages_ratelimited_nr(inode->i_mapping, total_dirty);
5236         return ret;
5237 }
5238
5239 static noinline int relocate_data_extent(struct inode *reloc_inode,
5240                                          struct btrfs_key *extent_key,
5241                                          u64 offset)
5242 {
5243         struct btrfs_root *root = BTRFS_I(reloc_inode)->root;
5244         struct extent_map_tree *em_tree = &BTRFS_I(reloc_inode)->extent_tree;
5245         struct extent_map *em;
5246         u64 start = extent_key->objectid - offset;
5247         u64 end = start + extent_key->offset - 1;
5248
5249         em = alloc_extent_map(GFP_NOFS);
5250         BUG_ON(!em || IS_ERR(em));
5251
5252         em->start = start;
5253         em->len = extent_key->offset;
5254         em->block_len = extent_key->offset;
5255         em->block_start = extent_key->objectid;
5256         em->bdev = root->fs_info->fs_devices->latest_bdev;
5257         set_bit(EXTENT_FLAG_PINNED, &em->flags);
5258
5259         /* setup extent map to cheat btrfs_readpage */
5260         lock_extent(&BTRFS_I(reloc_inode)->io_tree, start, end, GFP_NOFS);
5261         while (1) {
5262                 int ret;
5263                 write_lock(&em_tree->lock);
5264                 ret = add_extent_mapping(em_tree, em);
5265                 write_unlock(&em_tree->lock);
5266                 if (ret != -EEXIST) {
5267                         free_extent_map(em);
5268                         break;
5269                 }
5270                 btrfs_drop_extent_cache(reloc_inode, start, end, 0);
5271         }
5272         unlock_extent(&BTRFS_I(reloc_inode)->io_tree, start, end, GFP_NOFS);
5273
5274         return relocate_inode_pages(reloc_inode, start, extent_key->offset);
5275 }
5276
5277 struct btrfs_ref_path {
5278         u64 extent_start;
5279         u64 nodes[BTRFS_MAX_LEVEL];
5280         u64 root_objectid;
5281         u64 root_generation;
5282         u64 owner_objectid;
5283         u32 num_refs;
5284         int lowest_level;
5285         int current_level;
5286         int shared_level;
5287
5288         struct btrfs_key node_keys[BTRFS_MAX_LEVEL];
5289         u64 new_nodes[BTRFS_MAX_LEVEL];
5290 };
5291
5292 struct disk_extent {
5293         u64 ram_bytes;
5294         u64 disk_bytenr;
5295         u64 disk_num_bytes;
5296         u64 offset;
5297         u64 num_bytes;
5298         u8 compression;
5299         u8 encryption;
5300         u16 other_encoding;
5301 };
5302
5303 static int is_cowonly_root(u64 root_objectid)
5304 {
5305         if (root_objectid == BTRFS_ROOT_TREE_OBJECTID ||
5306             root_objectid == BTRFS_EXTENT_TREE_OBJECTID ||
5307             root_objectid == BTRFS_CHUNK_TREE_OBJECTID ||
5308             root_objectid == BTRFS_DEV_TREE_OBJECTID ||
5309             root_objectid == BTRFS_TREE_LOG_OBJECTID ||
5310             root_objectid == BTRFS_CSUM_TREE_OBJECTID)
5311                 return 1;
5312         return 0;
5313 }
5314
5315 static noinline int __next_ref_path(struct btrfs_trans_handle *trans,
5316                                     struct btrfs_root *extent_root,
5317                                     struct btrfs_ref_path *ref_path,
5318                                     int first_time)
5319 {
5320         struct extent_buffer *leaf;
5321         struct btrfs_path *path;
5322         struct btrfs_extent_ref *ref;
5323         struct btrfs_key key;
5324         struct btrfs_key found_key;
5325         u64 bytenr;
5326         u32 nritems;
5327         int level;
5328         int ret = 1;
5329
5330         path = btrfs_alloc_path();
5331         if (!path)
5332                 return -ENOMEM;
5333
5334         if (first_time) {
5335                 ref_path->lowest_level = -1;
5336                 ref_path->current_level = -1;
5337                 ref_path->shared_level = -1;
5338                 goto walk_up;
5339         }
5340 walk_down:
5341         level = ref_path->current_level - 1;
5342         while (level >= -1) {
5343                 u64 parent;
5344                 if (level < ref_path->lowest_level)
5345                         break;
5346
5347                 if (level >= 0)
5348                         bytenr = ref_path->nodes[level];
5349                 else
5350                         bytenr = ref_path->extent_start;
5351                 BUG_ON(bytenr == 0);
5352
5353                 parent = ref_path->nodes[level + 1];
5354                 ref_path->nodes[level + 1] = 0;
5355                 ref_path->current_level = level;
5356                 BUG_ON(parent == 0);
5357
5358                 key.objectid = bytenr;
5359                 key.offset = parent + 1;
5360                 key.type = BTRFS_EXTENT_REF_KEY;
5361
5362                 ret = btrfs_search_slot(trans, extent_root, &key, path, 0, 0);
5363                 if (ret < 0)
5364                         goto out;
5365                 BUG_ON(ret == 0);
5366
5367                 leaf = path->nodes[0];
5368                 nritems = btrfs_header_nritems(leaf);
5369                 if (path->slots[0] >= nritems) {
5370                         ret = btrfs_next_leaf(extent_root, path);
5371                         if (ret < 0)
5372                                 goto out;
5373                         if (ret > 0)
5374                                 goto next;
5375                         leaf = path->nodes[0];
5376                 }
5377
5378                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5379                 if (found_key.objectid == bytenr &&
5380                     found_key.type == BTRFS_EXTENT_REF_KEY) {
5381                         if (level < ref_path->shared_level)
5382                                 ref_path->shared_level = level;
5383                         goto found;
5384                 }
5385 next:
5386                 level--;
5387                 btrfs_release_path(extent_root, path);
5388                 cond_resched();
5389         }
5390         /* reached lowest level */
5391         ret = 1;
5392         goto out;
5393 walk_up:
5394         level = ref_path->current_level;
5395         while (level < BTRFS_MAX_LEVEL - 1) {
5396                 u64 ref_objectid;
5397
5398                 if (level >= 0)
5399                         bytenr = ref_path->nodes[level];
5400                 else
5401                         bytenr = ref_path->extent_start;
5402
5403                 BUG_ON(bytenr == 0);
5404
5405                 key.objectid = bytenr;
5406                 key.offset = 0;
5407                 key.type = BTRFS_EXTENT_REF_KEY;
5408
5409                 ret = btrfs_search_slot(trans, extent_root, &key, path, 0, 0);
5410                 if (ret < 0)
5411                         goto out;
5412
5413                 leaf = path->nodes[0];
5414                 nritems = btrfs_header_nritems(leaf);
5415                 if (path->slots[0] >= nritems) {
5416                         ret = btrfs_next_leaf(extent_root, path);
5417                         if (ret < 0)
5418                                 goto out;
5419                         if (ret > 0) {
5420                                 /* the extent was freed by someone */
5421                                 if (ref_path->lowest_level == level)
5422                                         goto out;
5423                                 btrfs_release_path(extent_root, path);
5424                                 goto walk_down;
5425                         }
5426                         leaf = path->nodes[0];
5427                 }
5428
5429                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5430                 if (found_key.objectid != bytenr ||
5431                                 found_key.type != BTRFS_EXTENT_REF_KEY) {
5432                         /* the extent was freed by someone */
5433                         if (ref_path->lowest_level == level) {
5434                                 ret = 1;
5435                                 goto out;
5436                         }
5437                         btrfs_release_path(extent_root, path);
5438                         goto walk_down;
5439                 }
5440 found:
5441                 ref = btrfs_item_ptr(leaf, path->slots[0],
5442                                 struct btrfs_extent_ref);
5443                 ref_objectid = btrfs_ref_objectid(leaf, ref);
5444                 if (ref_objectid < BTRFS_FIRST_FREE_OBJECTID) {
5445                         if (first_time) {
5446                                 level = (int)ref_objectid;
5447                                 BUG_ON(level >= BTRFS_MAX_LEVEL);
5448                                 ref_path->lowest_level = level;
5449                                 ref_path->current_level = level;
5450                                 ref_path->nodes[level] = bytenr;
5451                         } else {
5452                                 WARN_ON(ref_objectid != level);
5453                         }
5454                 } else {
5455                         WARN_ON(level != -1);
5456                 }
5457                 first_time = 0;
5458
5459                 if (ref_path->lowest_level == level) {
5460                         ref_path->owner_objectid = ref_objectid;
5461                         ref_path->num_refs = btrfs_ref_num_refs(leaf, ref);
5462                 }
5463
5464                 /*
5465                  * the block is tree root or the block isn't in reference
5466                  * counted tree.
5467                  */
5468                 if (found_key.objectid == found_key.offset ||
5469                     is_cowonly_root(btrfs_ref_root(leaf, ref))) {
5470                         ref_path->root_objectid = btrfs_ref_root(leaf, ref);
5471                         ref_path->root_generation =
5472                                 btrfs_ref_generation(leaf, ref);
5473                         if (level < 0) {
5474                                 /* special reference from the tree log */
5475                                 ref_path->nodes[0] = found_key.offset;
5476                                 ref_path->current_level = 0;
5477                         }
5478                         ret = 0;
5479                         goto out;
5480                 }
5481
5482                 level++;
5483                 BUG_ON(ref_path->nodes[level] != 0);
5484                 ref_path->nodes[level] = found_key.offset;
5485                 ref_path->current_level = level;
5486
5487                 /*
5488                  * the reference was created in the running transaction,
5489                  * no need to continue walking up.
5490                  */
5491                 if (btrfs_ref_generation(leaf, ref) == trans->transid) {
5492                         ref_path->root_objectid = btrfs_ref_root(leaf, ref);
5493                         ref_path->root_generation =
5494                                 btrfs_ref_generation(leaf, ref);
5495                         ret = 0;
5496                         goto out;
5497                 }
5498
5499                 btrfs_release_path(extent_root, path);
5500                 cond_resched();
5501         }
5502         /* reached max tree level, but no tree root found. */
5503         BUG();
5504 out:
5505         btrfs_free_path(path);
5506         return ret;
5507 }
5508
5509 static int btrfs_first_ref_path(struct btrfs_trans_handle *trans,
5510                                 struct btrfs_root *extent_root,
5511                                 struct btrfs_ref_path *ref_path,
5512                                 u64 extent_start)
5513 {
5514         memset(ref_path, 0, sizeof(*ref_path));
5515         ref_path->extent_start = extent_start;
5516
5517         return __next_ref_path(trans, extent_root, ref_path, 1);
5518 }
5519
5520 static int btrfs_next_ref_path(struct btrfs_trans_handle *trans,
5521                                struct btrfs_root *extent_root,
5522                                struct btrfs_ref_path *ref_path)
5523 {
5524         return __next_ref_path(trans, extent_root, ref_path, 0);
5525 }
5526
5527 static noinline int get_new_locations(struct inode *reloc_inode,
5528                                       struct btrfs_key *extent_key,
5529                                       u64 offset, int no_fragment,
5530                                       struct disk_extent **extents,
5531                                       int *nr_extents)
5532 {
5533         struct btrfs_root *root = BTRFS_I(reloc_inode)->root;
5534         struct btrfs_path *path;
5535         struct btrfs_file_extent_item *fi;
5536         struct extent_buffer *leaf;
5537         struct disk_extent *exts = *extents;
5538         struct btrfs_key found_key;
5539         u64 cur_pos;
5540         u64 last_byte;
5541         u32 nritems;
5542         int nr = 0;
5543         int max = *nr_extents;
5544         int ret;
5545
5546         WARN_ON(!no_fragment && *extents);
5547         if (!exts) {
5548                 max = 1;
5549                 exts = kmalloc(sizeof(*exts) * max, GFP_NOFS);
5550                 if (!exts)
5551                         return -ENOMEM;
5552         }
5553
5554         path = btrfs_alloc_path();
5555         BUG_ON(!path);
5556
5557         cur_pos = extent_key->objectid - offset;
5558         last_byte = extent_key->objectid + extent_key->offset;
5559         ret = btrfs_lookup_file_extent(NULL, root, path, reloc_inode->i_ino,
5560                                        cur_pos, 0);
5561         if (ret < 0)
5562                 goto out;
5563         if (ret > 0) {
5564                 ret = -ENOENT;
5565                 goto out;
5566         }
5567
5568         while (1) {
5569                 leaf = path->nodes[0];
5570                 nritems = btrfs_header_nritems(leaf);
5571                 if (path->slots[0] >= nritems) {
5572                         ret = btrfs_next_leaf(root, path);
5573                         if (ret < 0)
5574                                 goto out;
5575                         if (ret > 0)
5576                                 break;
5577                         leaf = path->nodes[0];
5578                 }
5579
5580                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5581                 if (found_key.offset != cur_pos ||
5582                     found_key.type != BTRFS_EXTENT_DATA_KEY ||
5583                     found_key.objectid != reloc_inode->i_ino)
5584                         break;
5585
5586                 fi = btrfs_item_ptr(leaf, path->slots[0],
5587                                     struct btrfs_file_extent_item);
5588                 if (btrfs_file_extent_type(leaf, fi) !=
5589                     BTRFS_FILE_EXTENT_REG ||
5590                     btrfs_file_extent_disk_bytenr(leaf, fi) == 0)
5591                         break;
5592
5593                 if (nr == max) {
5594                         struct disk_extent *old = exts;
5595                         max *= 2;
5596                         exts = kzalloc(sizeof(*exts) * max, GFP_NOFS);
5597                         memcpy(exts, old, sizeof(*exts) * nr);
5598                         if (old != *extents)
5599                                 kfree(old);
5600                 }
5601
5602                 exts[nr].disk_bytenr =
5603                         btrfs_file_extent_disk_bytenr(leaf, fi);
5604                 exts[nr].disk_num_bytes =
5605                         btrfs_file_extent_disk_num_bytes(leaf, fi);
5606                 exts[nr].offset = btrfs_file_extent_offset(leaf, fi);
5607                 exts[nr].num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
5608                 exts[nr].ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
5609                 exts[nr].compression = btrfs_file_extent_compression(leaf, fi);
5610                 exts[nr].encryption = btrfs_file_extent_encryption(leaf, fi);
5611                 exts[nr].other_encoding = btrfs_file_extent_other_encoding(leaf,
5612                                                                            fi);
5613                 BUG_ON(exts[nr].offset > 0);
5614                 BUG_ON(exts[nr].compression || exts[nr].encryption);
5615                 BUG_ON(exts[nr].num_bytes != exts[nr].disk_num_bytes);
5616
5617                 cur_pos += exts[nr].num_bytes;
5618                 nr++;
5619
5620                 if (cur_pos + offset >= last_byte)
5621                         break;
5622
5623                 if (no_fragment) {
5624                         ret = 1;
5625                         goto out;
5626                 }
5627                 path->slots[0]++;
5628         }
5629
5630         BUG_ON(cur_pos + offset > last_byte);
5631         if (cur_pos + offset < last_byte) {
5632                 ret = -ENOENT;
5633                 goto out;
5634         }
5635         ret = 0;
5636 out:
5637         btrfs_free_path(path);
5638         if (ret) {
5639                 if (exts != *extents)
5640                         kfree(exts);
5641         } else {
5642                 *extents = exts;
5643                 *nr_extents = nr;
5644         }
5645         return ret;
5646 }
5647
5648 static noinline int replace_one_extent(struct btrfs_trans_handle *trans,
5649                                         struct btrfs_root *root,
5650                                         struct btrfs_path *path,
5651                                         struct btrfs_key *extent_key,
5652                                         struct btrfs_key *leaf_key,
5653                                         struct btrfs_ref_path *ref_path,
5654                                         struct disk_extent *new_extents,
5655                                         int nr_extents)
5656 {
5657         struct extent_buffer *leaf;
5658         struct btrfs_file_extent_item *fi;
5659         struct inode *inode = NULL;
5660         struct btrfs_key key;
5661         u64 lock_start = 0;
5662         u64 lock_end = 0;
5663         u64 num_bytes;
5664         u64 ext_offset;
5665         u64 search_end = (u64)-1;
5666         u32 nritems;
5667         int nr_scaned = 0;
5668         int extent_locked = 0;
5669         int extent_type;
5670         int ret;
5671
5672         memcpy(&key, leaf_key, sizeof(key));
5673         if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS) {
5674                 if (key.objectid < ref_path->owner_objectid ||
5675                     (key.objectid == ref_path->owner_objectid &&
5676                      key.type < BTRFS_EXTENT_DATA_KEY)) {
5677                         key.objectid = ref_path->owner_objectid;
5678                         key.type = BTRFS_EXTENT_DATA_KEY;
5679                         key.offset = 0;
5680                 }
5681         }
5682
5683         while (1) {
5684                 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
5685                 if (ret < 0)
5686                         goto out;
5687
5688                 leaf = path->nodes[0];
5689                 nritems = btrfs_header_nritems(leaf);
5690 next:
5691                 if (extent_locked && ret > 0) {
5692                         /*
5693                          * the file extent item was modified by someone
5694                          * before the extent got locked.
5695                          */
5696                         unlock_extent(&BTRFS_I(inode)->io_tree, lock_start,
5697                                       lock_end, GFP_NOFS);
5698                         extent_locked = 0;
5699                 }
5700
5701                 if (path->slots[0] >= nritems) {
5702                         if (++nr_scaned > 2)
5703                                 break;
5704
5705                         BUG_ON(extent_locked);
5706                         ret = btrfs_next_leaf(root, path);
5707                         if (ret < 0)
5708                                 goto out;
5709                         if (ret > 0)
5710                                 break;
5711                         leaf = path->nodes[0];
5712                         nritems = btrfs_header_nritems(leaf);
5713                 }
5714
5715                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
5716
5717                 if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS) {
5718                         if ((key.objectid > ref_path->owner_objectid) ||
5719                             (key.objectid == ref_path->owner_objectid &&
5720                              key.type > BTRFS_EXTENT_DATA_KEY) ||
5721                             key.offset >= search_end)
5722                                 break;
5723                 }
5724
5725                 if (inode && key.objectid != inode->i_ino) {
5726                         BUG_ON(extent_locked);
5727                         btrfs_release_path(root, path);
5728                         mutex_unlock(&inode->i_mutex);
5729                         iput(inode);
5730                         inode = NULL;
5731                         continue;
5732                 }
5733
5734                 if (key.type != BTRFS_EXTENT_DATA_KEY) {
5735                         path->slots[0]++;
5736                         ret = 1;
5737                         goto next;
5738                 }
5739                 fi = btrfs_item_ptr(leaf, path->slots[0],
5740                                     struct btrfs_file_extent_item);
5741                 extent_type = btrfs_file_extent_type(leaf, fi);
5742                 if ((extent_type != BTRFS_FILE_EXTENT_REG &&
5743                      extent_type != BTRFS_FILE_EXTENT_PREALLOC) ||
5744                     (btrfs_file_extent_disk_bytenr(leaf, fi) !=
5745                      extent_key->objectid)) {
5746                         path->slots[0]++;
5747                         ret = 1;
5748                         goto next;
5749                 }
5750
5751                 num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
5752                 ext_offset = btrfs_file_extent_offset(leaf, fi);
5753
5754                 if (search_end == (u64)-1) {
5755                         search_end = key.offset - ext_offset +
5756                                 btrfs_file_extent_ram_bytes(leaf, fi);
5757                 }
5758
5759                 if (!extent_locked) {
5760                         lock_start = key.offset;
5761                         lock_end = lock_start + num_bytes - 1;
5762                 } else {
5763                         if (lock_start > key.offset ||
5764                             lock_end + 1 < key.offset + num_bytes) {
5765                                 unlock_extent(&BTRFS_I(inode)->io_tree,
5766                                               lock_start, lock_end, GFP_NOFS);
5767                                 extent_locked = 0;
5768                         }
5769                 }
5770
5771                 if (!inode) {
5772                         btrfs_release_path(root, path);
5773
5774                         inode = btrfs_iget_locked(root->fs_info->sb,
5775                                                   key.objectid, root);
5776                         if (inode->i_state & I_NEW) {
5777                                 BTRFS_I(inode)->root = root;
5778                                 BTRFS_I(inode)->location.objectid =
5779                                         key.objectid;
5780                                 BTRFS_I(inode)->location.type =
5781                                         BTRFS_INODE_ITEM_KEY;
5782                                 BTRFS_I(inode)->location.offset = 0;
5783                                 btrfs_read_locked_inode(inode);
5784                                 unlock_new_inode(inode);
5785                         }
5786                         /*
5787                          * some code call btrfs_commit_transaction while
5788                          * holding the i_mutex, so we can't use mutex_lock
5789                          * here.
5790                          */
5791                         if (is_bad_inode(inode) ||
5792                             !mutex_trylock(&inode->i_mutex)) {
5793                                 iput(inode);
5794                                 inode = NULL;
5795                                 key.offset = (u64)-1;
5796                                 goto skip;
5797                         }
5798                 }
5799
5800                 if (!extent_locked) {
5801                         struct btrfs_ordered_extent *ordered;
5802
5803                         btrfs_release_path(root, path);
5804
5805                         lock_extent(&BTRFS_I(inode)->io_tree, lock_start,
5806                                     lock_end, GFP_NOFS);
5807                         ordered = btrfs_lookup_first_ordered_extent(inode,
5808                                                                     lock_end);
5809                         if (ordered &&
5810                             ordered->file_offset <= lock_end &&
5811                             ordered->file_offset + ordered->len > lock_start) {
5812                                 unlock_extent(&BTRFS_I(inode)->io_tree,
5813                                               lock_start, lock_end, GFP_NOFS);
5814                                 btrfs_start_ordered_extent(inode, ordered, 1);
5815                                 btrfs_put_ordered_extent(ordered);
5816                                 key.offset += num_bytes;
5817                                 goto skip;
5818                         }
5819                         if (ordered)
5820                                 btrfs_put_ordered_extent(ordered);
5821
5822                         extent_locked = 1;
5823                         continue;
5824                 }
5825
5826                 if (nr_extents == 1) {
5827                         /* update extent pointer in place */
5828                         btrfs_set_file_extent_disk_bytenr(leaf, fi,
5829                                                 new_extents[0].disk_bytenr);
5830                         btrfs_set_file_extent_disk_num_bytes(leaf, fi,
5831                                                 new_extents[0].disk_num_bytes);
5832                         btrfs_mark_buffer_dirty(leaf);
5833
5834                         btrfs_drop_extent_cache(inode, key.offset,
5835                                                 key.offset + num_bytes - 1, 0);
5836
5837                         ret = btrfs_inc_extent_ref(trans, root,
5838                                                 new_extents[0].disk_bytenr,
5839                                                 new_extents[0].disk_num_bytes,
5840                                                 leaf->start,
5841                                                 root->root_key.objectid,
5842                                                 trans->transid,
5843                                                 key.objectid);
5844                         BUG_ON(ret);
5845
5846                         ret = btrfs_free_extent(trans, root,
5847                                                 extent_key->objectid,
5848                                                 extent_key->offset,
5849                                                 leaf->start,
5850                                                 btrfs_header_owner(leaf),
5851                                                 btrfs_header_generation(leaf),
5852                                                 key.objectid, 0);
5853                         BUG_ON(ret);
5854
5855                         btrfs_release_path(root, path);
5856                         key.offset += num_bytes;
5857                 } else {
5858                         BUG_ON(1);
5859 #if 0
5860                         u64 alloc_hint;
5861                         u64 extent_len;
5862                         int i;
5863                         /*
5864                          * drop old extent pointer at first, then insert the
5865                          * new pointers one bye one
5866                          */
5867                         btrfs_release_path(root, path);
5868                         ret = btrfs_drop_extents(trans, root, inode, key.offset,
5869                                                  key.offset + num_bytes,
5870                                                  key.offset, &alloc_hint);
5871                         BUG_ON(ret);
5872
5873                         for (i = 0; i < nr_extents; i++) {
5874                                 if (ext_offset >= new_extents[i].num_bytes) {
5875                                         ext_offset -= new_extents[i].num_bytes;
5876                                         continue;
5877                                 }
5878                                 extent_len = min(new_extents[i].num_bytes -
5879                                                  ext_offset, num_bytes);
5880
5881                                 ret = btrfs_insert_empty_item(trans, root,
5882                                                               path, &key,
5883                                                               sizeof(*fi));
5884                                 BUG_ON(ret);
5885
5886                                 leaf = path->nodes[0];
5887                                 fi = btrfs_item_ptr(leaf, path->slots[0],
5888                                                 struct btrfs_file_extent_item);
5889                                 btrfs_set_file_extent_generation(leaf, fi,
5890                                                         trans->transid);
5891                                 btrfs_set_file_extent_type(leaf, fi,
5892                                                         BTRFS_FILE_EXTENT_REG);
5893                                 btrfs_set_file_extent_disk_bytenr(leaf, fi,
5894                                                 new_extents[i].disk_bytenr);
5895                                 btrfs_set_file_extent_disk_num_bytes(leaf, fi,
5896                                                 new_extents[i].disk_num_bytes);
5897                                 btrfs_set_file_extent_ram_bytes(leaf, fi,
5898                                                 new_extents[i].ram_bytes);
5899
5900                                 btrfs_set_file_extent_compression(leaf, fi,
5901                                                 new_extents[i].compression);
5902                                 btrfs_set_file_extent_encryption(leaf, fi,
5903                                                 new_extents[i].encryption);
5904                                 btrfs_set_file_extent_other_encoding(leaf, fi,
5905                                                 new_extents[i].other_encoding);
5906
5907                                 btrfs_set_file_extent_num_bytes(leaf, fi,
5908                                                         extent_len);
5909                                 ext_offset += new_extents[i].offset;
5910                                 btrfs_set_file_extent_offset(leaf, fi,
5911                                                         ext_offset);
5912                                 btrfs_mark_buffer_dirty(leaf);
5913
5914                                 btrfs_drop_extent_cache(inode, key.offset,
5915                                                 key.offset + extent_len - 1, 0);
5916
5917                                 ret = btrfs_inc_extent_ref(trans, root,
5918                                                 new_extents[i].disk_bytenr,
5919                                                 new_extents[i].disk_num_bytes,
5920                                                 leaf->start,
5921                                                 root->root_key.objectid,
5922                                                 trans->transid, key.objectid);
5923                                 BUG_ON(ret);
5924                                 btrfs_release_path(root, path);
5925
5926                                 inode_add_bytes(inode, extent_len);
5927
5928                                 ext_offset = 0;
5929                                 num_bytes -= extent_len;
5930                                 key.offset += extent_len;
5931
5932                                 if (num_bytes == 0)
5933                                         break;
5934                         }
5935                         BUG_ON(i >= nr_extents);
5936 #endif
5937                 }
5938
5939                 if (extent_locked) {
5940                         unlock_extent(&BTRFS_I(inode)->io_tree, lock_start,
5941                                       lock_end, GFP_NOFS);
5942                         extent_locked = 0;
5943                 }
5944 skip:
5945                 if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS &&
5946                     key.offset >= search_end)
5947                         break;
5948
5949                 cond_resched();
5950         }
5951         ret = 0;
5952 out:
5953         btrfs_release_path(root, path);
5954         if (inode) {
5955                 mutex_unlock(&inode->i_mutex);
5956                 if (extent_locked) {
5957                         unlock_extent(&BTRFS_I(inode)->io_tree, lock_start,
5958                                       lock_end, GFP_NOFS);
5959                 }
5960                 iput(inode);
5961         }
5962         return ret;
5963 }
5964
5965 int btrfs_reloc_tree_cache_ref(struct btrfs_trans_handle *trans,
5966                                struct btrfs_root *root,
5967                                struct extent_buffer *buf, u64 orig_start)
5968 {
5969         int level;
5970         int ret;
5971
5972         BUG_ON(btrfs_header_generation(buf) != trans->transid);
5973         BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
5974
5975         level = btrfs_header_level(buf);
5976         if (level == 0) {
5977                 struct btrfs_leaf_ref *ref;
5978                 struct btrfs_leaf_ref *orig_ref;
5979
5980                 orig_ref = btrfs_lookup_leaf_ref(root, orig_start);
5981                 if (!orig_ref)
5982                         return -ENOENT;
5983
5984                 ref = btrfs_alloc_leaf_ref(root, orig_ref->nritems);
5985                 if (!ref) {
5986                         btrfs_free_leaf_ref(root, orig_ref);
5987                         return -ENOMEM;
5988                 }
5989
5990                 ref->nritems = orig_ref->nritems;
5991                 memcpy(ref->extents, orig_ref->extents,
5992                         sizeof(ref->extents[0]) * ref->nritems);
5993
5994                 btrfs_free_leaf_ref(root, orig_ref);
5995
5996                 ref->root_gen = trans->transid;
5997                 ref->bytenr = buf->start;
5998                 ref->owner = btrfs_header_owner(buf);
5999                 ref->generation = btrfs_header_generation(buf);
6000
6001                 ret = btrfs_add_leaf_ref(root, ref, 0);
6002                 WARN_ON(ret);
6003                 btrfs_free_leaf_ref(root, ref);
6004         }
6005         return 0;
6006 }
6007
6008 static noinline int invalidate_extent_cache(struct btrfs_root *root,
6009                                         struct extent_buffer *leaf,
6010                                         struct btrfs_block_group_cache *group,
6011                                         struct btrfs_root *target_root)
6012 {
6013         struct btrfs_key key;
6014         struct inode *inode = NULL;
6015         struct btrfs_file_extent_item *fi;
6016         u64 num_bytes;
6017         u64 skip_objectid = 0;
6018         u32 nritems;
6019         u32 i;
6020
6021         nritems = btrfs_header_nritems(leaf);
6022         for (i = 0; i < nritems; i++) {
6023                 btrfs_item_key_to_cpu(leaf, &key, i);
6024                 if (key.objectid == skip_objectid ||
6025                     key.type != BTRFS_EXTENT_DATA_KEY)
6026                         continue;
6027                 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
6028                 if (btrfs_file_extent_type(leaf, fi) ==
6029                     BTRFS_FILE_EXTENT_INLINE)
6030                         continue;
6031                 if (btrfs_file_extent_disk_bytenr(leaf, fi) == 0)
6032                         continue;
6033                 if (!inode || inode->i_ino != key.objectid) {
6034                         iput(inode);
6035                         inode = btrfs_ilookup(target_root->fs_info->sb,
6036                                               key.objectid, target_root, 1);
6037                 }
6038                 if (!inode) {
6039                         skip_objectid = key.objectid;
6040                         continue;
6041                 }
6042                 num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
6043
6044                 lock_extent(&BTRFS_I(inode)->io_tree, key.offset,
6045                             key.offset + num_bytes - 1, GFP_NOFS);
6046                 btrfs_drop_extent_cache(inode, key.offset,
6047                                         key.offset + num_bytes - 1, 1);
6048                 unlock_extent(&BTRFS_I(inode)->io_tree, key.offset,
6049                               key.offset + num_bytes - 1, GFP_NOFS);
6050                 cond_resched();
6051         }
6052         iput(inode);
6053         return 0;
6054 }
6055
6056 static noinline int replace_extents_in_leaf(struct btrfs_trans_handle *trans,
6057                                         struct btrfs_root *root,
6058                                         struct extent_buffer *leaf,
6059                                         struct btrfs_block_group_cache *group,
6060                                         struct inode *reloc_inode)
6061 {
6062         struct btrfs_key key;
6063         struct btrfs_key extent_key;
6064         struct btrfs_file_extent_item *fi;
6065         struct btrfs_leaf_ref *ref;
6066         struct disk_extent *new_extent;
6067         u64 bytenr;
6068         u64 num_bytes;
6069         u32 nritems;
6070         u32 i;
6071         int ext_index;
6072         int nr_extent;
6073         int ret;
6074
6075         new_extent = kmalloc(sizeof(*new_extent), GFP_NOFS);
6076         BUG_ON(!new_extent);
6077
6078         ref = btrfs_lookup_leaf_ref(root, leaf->start);
6079         BUG_ON(!ref);
6080
6081         ext_index = -1;
6082         nritems = btrfs_header_nritems(leaf);
6083         for (i = 0; i < nritems; i++) {
6084                 btrfs_item_key_to_cpu(leaf, &key, i);
6085                 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
6086                         continue;
6087                 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
6088                 if (btrfs_file_extent_type(leaf, fi) ==
6089                     BTRFS_FILE_EXTENT_INLINE)
6090                         continue;
6091                 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
6092                 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
6093                 if (bytenr == 0)
6094                         continue;
6095
6096                 ext_index++;
6097                 if (bytenr >= group->key.objectid + group->key.offset ||
6098                     bytenr + num_bytes <= group->key.objectid)
6099                         continue;
6100
6101                 extent_key.objectid = bytenr;
6102                 extent_key.offset = num_bytes;
6103                 extent_key.type = BTRFS_EXTENT_ITEM_KEY;
6104                 nr_extent = 1;
6105                 ret = get_new_locations(reloc_inode, &extent_key,
6106                                         group->key.objectid, 1,
6107                                         &new_extent, &nr_extent);
6108                 if (ret > 0)
6109                         continue;
6110                 BUG_ON(ret < 0);
6111
6112                 BUG_ON(ref->extents[ext_index].bytenr != bytenr);
6113                 BUG_ON(ref->extents[ext_index].num_bytes != num_bytes);
6114                 ref->extents[ext_index].bytenr = new_extent->disk_bytenr;
6115                 ref->extents[ext_index].num_bytes = new_extent->disk_num_bytes;
6116
6117                 btrfs_set_file_extent_disk_bytenr(leaf, fi,
6118                                                 new_extent->disk_bytenr);
6119                 btrfs_set_file_extent_disk_num_bytes(leaf, fi,
6120                                                 new_extent->disk_num_bytes);
6121                 btrfs_mark_buffer_dirty(leaf);
6122
6123                 ret = btrfs_inc_extent_ref(trans, root,
6124                                         new_extent->disk_bytenr,
6125                                         new_extent->disk_num_bytes,
6126                                         leaf->start,
6127                                         root->root_key.objectid,
6128                                         trans->transid, key.objectid);
6129                 BUG_ON(ret);
6130
6131                 ret = btrfs_free_extent(trans, root,
6132                                         bytenr, num_bytes, leaf->start,
6133                                         btrfs_header_owner(leaf),
6134                                         btrfs_header_generation(leaf),
6135                                         key.objectid, 0);
6136                 BUG_ON(ret);
6137                 cond_resched();
6138         }
6139         kfree(new_extent);
6140         BUG_ON(ext_index + 1 != ref->nritems);
6141         btrfs_free_leaf_ref(root, ref);
6142         return 0;
6143 }
6144
6145 int btrfs_free_reloc_root(struct btrfs_trans_handle *trans,
6146                           struct btrfs_root *root)
6147 {
6148         struct btrfs_root *reloc_root;
6149         int ret;
6150
6151         if (root->reloc_root) {
6152                 reloc_root = root->reloc_root;
6153                 root->reloc_root = NULL;
6154                 list_add(&reloc_root->dead_list,
6155                          &root->fs_info->dead_reloc_roots);
6156
6157                 btrfs_set_root_bytenr(&reloc_root->root_item,
6158                                       reloc_root->node->start);
6159                 btrfs_set_root_level(&root->root_item,
6160                                      btrfs_header_level(reloc_root->node));
6161                 memset(&reloc_root->root_item.drop_progress, 0,
6162                         sizeof(struct btrfs_disk_key));
6163                 reloc_root->root_item.drop_level = 0;
6164
6165                 ret = btrfs_update_root(trans, root->fs_info->tree_root,
6166                                         &reloc_root->root_key,
6167                                         &reloc_root->root_item);
6168                 BUG_ON(ret);
6169         }
6170         return 0;
6171 }
6172
6173 int btrfs_drop_dead_reloc_roots(struct btrfs_root *root)
6174 {
6175         struct btrfs_trans_handle *trans;
6176         struct btrfs_root *reloc_root;
6177         struct btrfs_root *prev_root = NULL;
6178         struct list_head dead_roots;
6179         int ret;
6180         unsigned long nr;
6181
6182         INIT_LIST_HEAD(&dead_roots);
6183         list_splice_init(&root->fs_info->dead_reloc_roots, &dead_roots);
6184
6185         while (!list_empty(&dead_roots)) {
6186                 reloc_root = list_entry(dead_roots.prev,
6187                                         struct btrfs_root, dead_list);
6188                 list_del_init(&reloc_root->dead_list);
6189
6190                 BUG_ON(reloc_root->commit_root != NULL);
6191                 while (1) {
6192                         trans = btrfs_join_transaction(root, 1);
6193                         BUG_ON(!trans);
6194
6195                         mutex_lock(&root->fs_info->drop_mutex);
6196                         ret = btrfs_drop_snapshot(trans, reloc_root);
6197                         if (ret != -EAGAIN)
6198                                 break;
6199                         mutex_unlock(&root->fs_info->drop_mutex);
6200
6201                         nr = trans->blocks_used;
6202                         ret = btrfs_end_transaction(trans, root);
6203                         BUG_ON(ret);
6204                         btrfs_btree_balance_dirty(root, nr);
6205                 }
6206
6207                 free_extent_buffer(reloc_root->node);
6208
6209                 ret = btrfs_del_root(trans, root->fs_info->tree_root,
6210                                      &reloc_root->root_key);
6211                 BUG_ON(ret);
6212                 mutex_unlock(&root->fs_info->drop_mutex);
6213
6214                 nr = trans->blocks_used;
6215                 ret = btrfs_end_transaction(trans, root);
6216                 BUG_ON(ret);
6217                 btrfs_btree_balance_dirty(root, nr);
6218
6219                 kfree(prev_root);
6220                 prev_root = reloc_root;
6221         }
6222         if (prev_root) {
6223                 btrfs_remove_leaf_refs(prev_root, (u64)-1, 0);
6224                 kfree(prev_root);
6225         }
6226         return 0;
6227 }
6228
6229 int btrfs_add_dead_reloc_root(struct btrfs_root *root)
6230 {
6231         list_add(&root->dead_list, &root->fs_info->dead_reloc_roots);
6232         return 0;
6233 }
6234
6235 int btrfs_cleanup_reloc_trees(struct btrfs_root *root)
6236 {
6237         struct btrfs_root *reloc_root;
6238         struct btrfs_trans_handle *trans;
6239         struct btrfs_key location;
6240         int found;
6241         int ret;
6242
6243         mutex_lock(&root->fs_info->tree_reloc_mutex);
6244         ret = btrfs_find_dead_roots(root, BTRFS_TREE_RELOC_OBJECTID, NULL);
6245         BUG_ON(ret);
6246         found = !list_empty(&root->fs_info->dead_reloc_roots);
6247         mutex_unlock(&root->fs_info->tree_reloc_mutex);
6248
6249         if (found) {
6250                 trans = btrfs_start_transaction(root, 1);
6251                 BUG_ON(!trans);
6252                 ret = btrfs_commit_transaction(trans, root);
6253                 BUG_ON(ret);
6254         }
6255
6256         location.objectid = BTRFS_DATA_RELOC_TREE_OBJECTID;
6257         location.offset = (u64)-1;
6258         location.type = BTRFS_ROOT_ITEM_KEY;
6259
6260         reloc_root = btrfs_read_fs_root_no_name(root->fs_info, &location);
6261         BUG_ON(!reloc_root);
6262         btrfs_orphan_cleanup(reloc_root);
6263         return 0;
6264 }
6265
6266 static noinline int init_reloc_tree(struct btrfs_trans_handle *trans,
6267                                     struct btrfs_root *root)
6268 {
6269         struct btrfs_root *reloc_root;
6270         struct extent_buffer *eb;
6271         struct btrfs_root_item *root_item;
6272         struct btrfs_key root_key;
6273         int ret;
6274
6275         BUG_ON(!root->ref_cows);
6276         if (root->reloc_root)
6277                 return 0;
6278
6279         root_item = kmalloc(sizeof(*root_item), GFP_NOFS);
6280         BUG_ON(!root_item);
6281
6282         ret = btrfs_copy_root(trans, root, root->commit_root,
6283                               &eb, BTRFS_TREE_RELOC_OBJECTID);
6284         BUG_ON(ret);
6285
6286         root_key.objectid = BTRFS_TREE_RELOC_OBJECTID;
6287         root_key.offset = root->root_key.objectid;
6288         root_key.type = BTRFS_ROOT_ITEM_KEY;
6289
6290         memcpy(root_item, &root->root_item, sizeof(root_item));
6291         btrfs_set_root_refs(root_item, 0);
6292         btrfs_set_root_bytenr(root_item, eb->start);
6293         btrfs_set_root_level(root_item, btrfs_header_level(eb));
6294         btrfs_set_root_generation(root_item, trans->transid);
6295
6296         btrfs_tree_unlock(eb);
6297         free_extent_buffer(eb);
6298
6299         ret = btrfs_insert_root(trans, root->fs_info->tree_root,
6300                                 &root_key, root_item);
6301         BUG_ON(ret);
6302         kfree(root_item);
6303
6304         reloc_root = btrfs_read_fs_root_no_radix(root->fs_info->tree_root,
6305                                                  &root_key);
6306         BUG_ON(!reloc_root);
6307         reloc_root->last_trans = trans->transid;
6308         reloc_root->commit_root = NULL;
6309         reloc_root->ref_tree = &root->fs_info->reloc_ref_tree;
6310
6311         root->reloc_root = reloc_root;
6312         return 0;
6313 }
6314
6315 /*
6316  * Core function of space balance.
6317  *
6318  * The idea is using reloc trees to relocate tree blocks in reference
6319  * counted roots. There is one reloc tree for each subvol, and all
6320  * reloc trees share same root key objectid. Reloc trees are snapshots
6321  * of the latest committed roots of subvols (root->commit_root).
6322  *
6323  * To relocate a tree block referenced by a subvol, there are two steps.
6324  * COW the block through subvol's reloc tree, then update block pointer
6325  * in the subvol to point to the new block. Since all reloc trees share
6326  * same root key objectid, doing special handing for tree blocks owned
6327  * by them is easy. Once a tree block has been COWed in one reloc tree,
6328  * we can use the resulting new block directly when the same block is
6329  * required to COW again through other reloc trees. By this way, relocated
6330  * tree blocks are shared between reloc trees, so they are also shared
6331  * between subvols.
6332  */
6333 static noinline int relocate_one_path(struct btrfs_trans_handle *trans,
6334                                       struct btrfs_root *root,
6335                                       struct btrfs_path *path,
6336                                       struct btrfs_key *first_key,
6337                                       struct btrfs_ref_path *ref_path,
6338                                       struct btrfs_block_group_cache *group,
6339                                       struct inode *reloc_inode)
6340 {
6341         struct btrfs_root *reloc_root;
6342         struct extent_buffer *eb = NULL;
6343         struct btrfs_key *keys;
6344         u64 *nodes;
6345         int level;
6346         int shared_level;
6347         int lowest_level = 0;
6348         int ret;
6349
6350         if (ref_path->owner_objectid < BTRFS_FIRST_FREE_OBJECTID)
6351                 lowest_level = ref_path->owner_objectid;
6352
6353         if (!root->ref_cows) {
6354                 path->lowest_level = lowest_level;
6355                 ret = btrfs_search_slot(trans, root, first_key, path, 0, 1);
6356                 BUG_ON(ret < 0);
6357                 path->lowest_level = 0;
6358                 btrfs_release_path(root, path);
6359                 return 0;
6360         }
6361
6362         mutex_lock(&root->fs_info->tree_reloc_mutex);
6363         ret = init_reloc_tree(trans, root);
6364         BUG_ON(ret);
6365         reloc_root = root->reloc_root;
6366
6367         shared_level = ref_path->shared_level;
6368         ref_path->shared_level = BTRFS_MAX_LEVEL - 1;
6369
6370         keys = ref_path->node_keys;
6371         nodes = ref_path->new_nodes;
6372         memset(&keys[shared_level + 1], 0,
6373                sizeof(*keys) * (BTRFS_MAX_LEVEL - shared_level - 1));
6374         memset(&nodes[shared_level + 1], 0,
6375                sizeof(*nodes) * (BTRFS_MAX_LEVEL - shared_level - 1));
6376
6377         if (nodes[lowest_level] == 0) {
6378                 path->lowest_level = lowest_level;
6379                 ret = btrfs_search_slot(trans, reloc_root, first_key, path,
6380                                         0, 1);
6381                 BUG_ON(ret);
6382                 for (level = lowest_level; level < BTRFS_MAX_LEVEL; level++) {
6383                         eb = path->nodes[level];
6384                         if (!eb || eb == reloc_root->node)
6385                                 break;
6386                         nodes[level] = eb->start;
6387                         if (level == 0)
6388                                 btrfs_item_key_to_cpu(eb, &keys[level], 0);
6389                         else
6390                                 btrfs_node_key_to_cpu(eb, &keys[level], 0);
6391                 }
6392                 if (nodes[0] &&
6393                     ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
6394                         eb = path->nodes[0];
6395                         ret = replace_extents_in_leaf(trans, reloc_root, eb,
6396                                                       group, reloc_inode);
6397                         BUG_ON(ret);
6398                 }
6399                 btrfs_release_path(reloc_root, path);
6400         } else {
6401                 ret = btrfs_merge_path(trans, reloc_root, keys, nodes,
6402                                        lowest_level);
6403                 BUG_ON(ret);
6404         }
6405
6406         /*
6407          * replace tree blocks in the fs tree with tree blocks in
6408          * the reloc tree.
6409          */
6410         ret = btrfs_merge_path(trans, root, keys, nodes, lowest_level);
6411         BUG_ON(ret < 0);
6412
6413         if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
6414                 ret = btrfs_search_slot(trans, reloc_root, first_key, path,
6415                                         0, 0);
6416                 BUG_ON(ret);
6417                 extent_buffer_get(path->nodes[0]);
6418                 eb = path->nodes[0];
6419                 btrfs_release_path(reloc_root, path);
6420                 ret = invalidate_extent_cache(reloc_root, eb, group, root);
6421                 BUG_ON(ret);
6422                 free_extent_buffer(eb);
6423         }
6424
6425         mutex_unlock(&root->fs_info->tree_reloc_mutex);
6426         path->lowest_level = 0;
6427         return 0;
6428 }
6429
6430 static noinline int relocate_tree_block(struct btrfs_trans_handle *trans,
6431                                         struct btrfs_root *root,
6432                                         struct btrfs_path *path,
6433                                         struct btrfs_key *first_key,
6434                                         struct btrfs_ref_path *ref_path)
6435 {
6436         int ret;
6437
6438         ret = relocate_one_path(trans, root, path, first_key,
6439                                 ref_path, NULL, NULL);
6440         BUG_ON(ret);
6441
6442         return 0;
6443 }
6444
6445 static noinline int del_extent_zero(struct btrfs_trans_handle *trans,
6446                                     struct btrfs_root *extent_root,
6447                                     struct btrfs_path *path,
6448                                     struct btrfs_key *extent_key)
6449 {
6450         int ret;
6451
6452         ret = btrfs_search_slot(trans, extent_root, extent_key, path, -1, 1);
6453         if (ret)
6454                 goto out;
6455         ret = btrfs_del_item(trans, extent_root, path);
6456 out:
6457         btrfs_release_path(extent_root, path);
6458         return ret;
6459 }
6460
6461 static noinline struct btrfs_root *read_ref_root(struct btrfs_fs_info *fs_info,
6462                                                 struct btrfs_ref_path *ref_path)
6463 {
6464         struct btrfs_key root_key;
6465
6466         root_key.objectid = ref_path->root_objectid;
6467         root_key.type = BTRFS_ROOT_ITEM_KEY;
6468         if (is_cowonly_root(ref_path->root_objectid))
6469                 root_key.offset = 0;
6470         else
6471                 root_key.offset = (u64)-1;
6472
6473         return btrfs_read_fs_root_no_name(fs_info, &root_key);
6474 }
6475
6476 static noinline int relocate_one_extent(struct btrfs_root *extent_root,
6477                                         struct btrfs_path *path,
6478                                         struct btrfs_key *extent_key,
6479                                         struct btrfs_block_group_cache *group,
6480                                         struct inode *reloc_inode, int pass)
6481 {
6482         struct btrfs_trans_handle *trans;
6483         struct btrfs_root *found_root;
6484         struct btrfs_ref_path *ref_path = NULL;
6485         struct disk_extent *new_extents = NULL;
6486         int nr_extents = 0;
6487         int loops;
6488         int ret;
6489         int level;
6490         struct btrfs_key first_key;
6491         u64 prev_block = 0;
6492
6493
6494         trans = btrfs_start_transaction(extent_root, 1);
6495         BUG_ON(!trans);
6496
6497         if (extent_key->objectid == 0) {
6498                 ret = del_extent_zero(trans, extent_root, path, extent_key);
6499                 goto out;
6500         }
6501
6502         ref_path = kmalloc(sizeof(*ref_path), GFP_NOFS);
6503         if (!ref_path) {
6504                 ret = -ENOMEM;
6505                 goto out;
6506         }
6507
6508         for (loops = 0; ; loops++) {
6509                 if (loops == 0) {
6510                         ret = btrfs_first_ref_path(trans, extent_root, ref_path,
6511                                                    extent_key->objectid);
6512                 } else {
6513                         ret = btrfs_next_ref_path(trans, extent_root, ref_path);
6514                 }
6515                 if (ret < 0)
6516                         goto out;
6517                 if (ret > 0)
6518                         break;
6519
6520                 if (ref_path->root_objectid == BTRFS_TREE_LOG_OBJECTID ||
6521                     ref_path->root_objectid == BTRFS_TREE_RELOC_OBJECTID)
6522                         continue;
6523
6524                 found_root = read_ref_root(extent_root->fs_info, ref_path);
6525                 BUG_ON(!found_root);
6526                 /*
6527                  * for reference counted tree, only process reference paths
6528                  * rooted at the latest committed root.
6529                  */
6530                 if (found_root->ref_cows &&
6531                     ref_path->root_generation != found_root->root_key.offset)
6532                         continue;
6533
6534                 if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
6535                         if (pass == 0) {
6536                                 /*
6537                                  * copy data extents to new locations
6538                                  */
6539                                 u64 group_start = group->key.objectid;
6540                                 ret = relocate_data_extent(reloc_inode,
6541                                                            extent_key,
6542                                                            group_start);
6543                                 if (ret < 0)
6544                                         goto out;
6545                                 break;
6546                         }
6547                         level = 0;
6548                 } else {
6549                         level = ref_path->owner_objectid;
6550                 }
6551
6552                 if (prev_block != ref_path->nodes[level]) {
6553                         struct extent_buffer *eb;
6554                         u64 block_start = ref_path->nodes[level];
6555                         u64 block_size = btrfs_level_size(found_root, level);
6556
6557                         eb = read_tree_block(found_root, block_start,
6558                                              block_size, 0);
6559                         btrfs_tree_lock(eb);
6560                         BUG_ON(level != btrfs_header_level(eb));
6561
6562                         if (level == 0)
6563                                 btrfs_item_key_to_cpu(eb, &first_key, 0);
6564                         else
6565                                 btrfs_node_key_to_cpu(eb, &first_key, 0);
6566
6567                         btrfs_tree_unlock(eb);
6568                         free_extent_buffer(eb);
6569                         prev_block = block_start;
6570                 }
6571
6572                 mutex_lock(&extent_root->fs_info->trans_mutex);
6573                 btrfs_record_root_in_trans(found_root);
6574                 mutex_unlock(&extent_root->fs_info->trans_mutex);
6575                 if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
6576                         /*
6577                          * try to update data extent references while
6578                          * keeping metadata shared between snapshots.
6579                          */
6580                         if (pass == 1) {
6581                                 ret = relocate_one_path(trans, found_root,
6582                                                 path, &first_key, ref_path,
6583                                                 group, reloc_inode);
6584                                 if (ret < 0)
6585                                         goto out;
6586                                 continue;
6587                         }
6588                         /*
6589                          * use fallback method to process the remaining
6590                          * references.
6591                          */
6592                         if (!new_extents) {
6593                                 u64 group_start = group->key.objectid;
6594                                 new_extents = kmalloc(sizeof(*new_extents),
6595                                                       GFP_NOFS);
6596                                 nr_extents = 1;
6597                                 ret = get_new_locations(reloc_inode,
6598                                                         extent_key,
6599                                                         group_start, 1,
6600                                                         &new_extents,
6601                                                         &nr_extents);
6602                                 if (ret)
6603                                         goto out;
6604                         }
6605                         ret = replace_one_extent(trans, found_root,
6606                                                 path, extent_key,
6607                                                 &first_key, ref_path,
6608                                                 new_extents, nr_extents);
6609                 } else {
6610                         ret = relocate_tree_block(trans, found_root, path,
6611                                                   &first_key, ref_path);
6612                 }
6613                 if (ret < 0)
6614                         goto out;
6615         }
6616         ret = 0;
6617 out:
6618         btrfs_end_transaction(trans, extent_root);
6619         kfree(new_extents);
6620         kfree(ref_path);
6621         return ret;
6622 }
6623 #endif
6624
6625 static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
6626 {
6627         u64 num_devices;
6628         u64 stripped = BTRFS_BLOCK_GROUP_RAID0 |
6629                 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
6630
6631         num_devices = root->fs_info->fs_devices->rw_devices;
6632         if (num_devices == 1) {
6633                 stripped |= BTRFS_BLOCK_GROUP_DUP;
6634                 stripped = flags & ~stripped;
6635
6636                 /* turn raid0 into single device chunks */
6637                 if (flags & BTRFS_BLOCK_GROUP_RAID0)
6638                         return stripped;
6639
6640                 /* turn mirroring into duplication */
6641                 if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
6642                              BTRFS_BLOCK_GROUP_RAID10))
6643                         return stripped | BTRFS_BLOCK_GROUP_DUP;
6644                 return flags;
6645         } else {
6646                 /* they already had raid on here, just return */
6647                 if (flags & stripped)
6648                         return flags;
6649
6650                 stripped |= BTRFS_BLOCK_GROUP_DUP;
6651                 stripped = flags & ~stripped;
6652
6653                 /* switch duplicated blocks with raid1 */
6654                 if (flags & BTRFS_BLOCK_GROUP_DUP)
6655                         return stripped | BTRFS_BLOCK_GROUP_RAID1;
6656
6657                 /* turn single device chunks into raid0 */
6658                 return stripped | BTRFS_BLOCK_GROUP_RAID0;
6659         }
6660         return flags;
6661 }
6662
6663 static int __alloc_chunk_for_shrink(struct btrfs_root *root,
6664                      struct btrfs_block_group_cache *shrink_block_group,
6665                      int force)
6666 {
6667         struct btrfs_trans_handle *trans;
6668         u64 new_alloc_flags;
6669         u64 calc;
6670
6671         spin_lock(&shrink_block_group->lock);
6672         if (btrfs_block_group_used(&shrink_block_group->item) +
6673             shrink_block_group->reserved > 0) {
6674                 spin_unlock(&shrink_block_group->lock);
6675
6676                 trans = btrfs_start_transaction(root, 1);
6677                 spin_lock(&shrink_block_group->lock);
6678
6679                 new_alloc_flags = update_block_group_flags(root,
6680                                                    shrink_block_group->flags);
6681                 if (new_alloc_flags != shrink_block_group->flags) {
6682                         calc =
6683                              btrfs_block_group_used(&shrink_block_group->item);
6684                 } else {
6685                         calc = shrink_block_group->key.offset;
6686                 }
6687                 spin_unlock(&shrink_block_group->lock);
6688
6689                 do_chunk_alloc(trans, root->fs_info->extent_root,
6690                                calc + 2 * 1024 * 1024, new_alloc_flags, force);
6691
6692                 btrfs_end_transaction(trans, root);
6693         } else
6694                 spin_unlock(&shrink_block_group->lock);
6695         return 0;
6696 }
6697
6698
6699 int btrfs_prepare_block_group_relocation(struct btrfs_root *root,
6700                                          struct btrfs_block_group_cache *group)
6701
6702 {
6703         __alloc_chunk_for_shrink(root, group, 1);
6704         set_block_group_readonly(group);
6705         return 0;
6706 }
6707
6708 /*
6709  * checks to see if its even possible to relocate this block group.
6710  *
6711  * @return - -1 if it's not a good idea to relocate this block group, 0 if its
6712  * ok to go ahead and try.
6713  */
6714 int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
6715 {
6716         struct btrfs_block_group_cache *block_group;
6717         struct btrfs_space_info *space_info;
6718         struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
6719         struct btrfs_device *device;
6720         int full = 0;
6721         int ret = 0;
6722
6723         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
6724
6725         /* odd, couldn't find the block group, leave it alone */
6726         if (!block_group)
6727                 return -1;
6728
6729         /* no bytes used, we're good */
6730         if (!btrfs_block_group_used(&block_group->item))
6731                 goto out;
6732
6733         space_info = block_group->space_info;
6734         spin_lock(&space_info->lock);
6735
6736         full = space_info->full;
6737
6738         /*
6739          * if this is the last block group we have in this space, we can't
6740          * relocate it.
6741          */
6742         if (space_info->total_bytes == block_group->key.offset) {
6743                 ret = -1;
6744                 spin_unlock(&space_info->lock);
6745                 goto out;
6746         }
6747
6748         /*
6749          * need to make sure we have room in the space to handle all of the
6750          * extents from this block group.  If we can, we're good
6751          */
6752         if (space_info->bytes_used + space_info->bytes_reserved +
6753             space_info->bytes_pinned + space_info->bytes_readonly +
6754             btrfs_block_group_used(&block_group->item) <
6755             space_info->total_bytes) {
6756                 spin_unlock(&space_info->lock);
6757                 goto out;
6758         }
6759         spin_unlock(&space_info->lock);
6760
6761         /*
6762          * ok we don't have enough space, but maybe we have free space on our
6763          * devices to allocate new chunks for relocation, so loop through our
6764          * alloc devices and guess if we have enough space.  However, if we
6765          * were marked as full, then we know there aren't enough chunks, and we
6766          * can just return.
6767          */
6768         ret = -1;
6769         if (full)
6770                 goto out;
6771
6772         mutex_lock(&root->fs_info->chunk_mutex);
6773         list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
6774                 u64 min_free = btrfs_block_group_used(&block_group->item);
6775                 u64 dev_offset, max_avail;
6776
6777                 /*
6778                  * check to make sure we can actually find a chunk with enough
6779                  * space to fit our block group in.
6780                  */
6781                 if (device->total_bytes > device->bytes_used + min_free) {
6782                         ret = find_free_dev_extent(NULL, device, min_free,
6783                                                    &dev_offset, &max_avail);
6784                         if (!ret)
6785                                 break;
6786                         ret = -1;
6787                 }
6788         }
6789         mutex_unlock(&root->fs_info->chunk_mutex);
6790 out:
6791         btrfs_put_block_group(block_group);
6792         return ret;
6793 }
6794
6795 static int find_first_block_group(struct btrfs_root *root,
6796                 struct btrfs_path *path, struct btrfs_key *key)
6797 {
6798         int ret = 0;
6799         struct btrfs_key found_key;
6800         struct extent_buffer *leaf;
6801         int slot;
6802
6803         ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
6804         if (ret < 0)
6805                 goto out;
6806
6807         while (1) {
6808                 slot = path->slots[0];
6809                 leaf = path->nodes[0];
6810                 if (slot >= btrfs_header_nritems(leaf)) {
6811                         ret = btrfs_next_leaf(root, path);
6812                         if (ret == 0)
6813                                 continue;
6814                         if (ret < 0)
6815                                 goto out;
6816                         break;
6817                 }
6818                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
6819
6820                 if (found_key.objectid >= key->objectid &&
6821                     found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
6822                         ret = 0;
6823                         goto out;
6824                 }
6825                 path->slots[0]++;
6826         }
6827         ret = -ENOENT;
6828 out:
6829         return ret;
6830 }
6831
6832 int btrfs_free_block_groups(struct btrfs_fs_info *info)
6833 {
6834         struct btrfs_block_group_cache *block_group;
6835         struct btrfs_space_info *space_info;
6836         struct btrfs_caching_control *caching_ctl;
6837         struct rb_node *n;
6838
6839         down_write(&info->extent_commit_sem);
6840         while (!list_empty(&info->caching_block_groups)) {
6841                 caching_ctl = list_entry(info->caching_block_groups.next,
6842                                          struct btrfs_caching_control, list);
6843                 list_del(&caching_ctl->list);
6844                 put_caching_control(caching_ctl);
6845         }
6846         up_write(&info->extent_commit_sem);
6847
6848         spin_lock(&info->block_group_cache_lock);
6849         while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
6850                 block_group = rb_entry(n, struct btrfs_block_group_cache,
6851                                        cache_node);
6852                 rb_erase(&block_group->cache_node,
6853                          &info->block_group_cache_tree);
6854                 spin_unlock(&info->block_group_cache_lock);
6855
6856                 down_write(&block_group->space_info->groups_sem);
6857                 list_del(&block_group->list);
6858                 up_write(&block_group->space_info->groups_sem);
6859
6860                 if (block_group->cached == BTRFS_CACHE_STARTED)
6861                         wait_block_group_cache_done(block_group);
6862
6863                 btrfs_remove_free_space_cache(block_group);
6864
6865                 WARN_ON(atomic_read(&block_group->count) != 1);
6866                 kfree(block_group);
6867
6868                 spin_lock(&info->block_group_cache_lock);
6869         }
6870         spin_unlock(&info->block_group_cache_lock);
6871
6872         /* now that all the block groups are freed, go through and
6873          * free all the space_info structs.  This is only called during
6874          * the final stages of unmount, and so we know nobody is
6875          * using them.  We call synchronize_rcu() once before we start,
6876          * just to be on the safe side.
6877          */
6878         synchronize_rcu();
6879
6880         while(!list_empty(&info->space_info)) {
6881                 space_info = list_entry(info->space_info.next,
6882                                         struct btrfs_space_info,
6883                                         list);
6884
6885                 list_del(&space_info->list);
6886                 kfree(space_info);
6887         }
6888         return 0;
6889 }
6890
6891 int btrfs_read_block_groups(struct btrfs_root *root)
6892 {
6893         struct btrfs_path *path;
6894         int ret;
6895         struct btrfs_block_group_cache *cache;
6896         struct btrfs_fs_info *info = root->fs_info;
6897         struct btrfs_space_info *space_info;
6898         struct btrfs_key key;
6899         struct btrfs_key found_key;
6900         struct extent_buffer *leaf;
6901
6902         root = info->extent_root;
6903         key.objectid = 0;
6904         key.offset = 0;
6905         btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY);
6906         path = btrfs_alloc_path();
6907         if (!path)
6908                 return -ENOMEM;
6909
6910         while (1) {
6911                 ret = find_first_block_group(root, path, &key);
6912                 if (ret > 0) {
6913                         ret = 0;
6914                         goto error;
6915                 }
6916                 if (ret != 0)
6917                         goto error;
6918
6919                 leaf = path->nodes[0];
6920                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6921                 cache = kzalloc(sizeof(*cache), GFP_NOFS);
6922                 if (!cache) {
6923                         ret = -ENOMEM;
6924                         break;
6925                 }
6926
6927                 atomic_set(&cache->count, 1);
6928                 spin_lock_init(&cache->lock);
6929                 spin_lock_init(&cache->tree_lock);
6930                 cache->fs_info = info;
6931                 INIT_LIST_HEAD(&cache->list);
6932                 INIT_LIST_HEAD(&cache->cluster_list);
6933
6934                 /*
6935                  * we only want to have 32k of ram per block group for keeping
6936                  * track of free space, and if we pass 1/2 of that we want to
6937                  * start converting things over to using bitmaps
6938                  */
6939                 cache->extents_thresh = ((1024 * 32) / 2) /
6940                         sizeof(struct btrfs_free_space);
6941
6942                 read_extent_buffer(leaf, &cache->item,
6943                                    btrfs_item_ptr_offset(leaf, path->slots[0]),
6944                                    sizeof(cache->item));
6945                 memcpy(&cache->key, &found_key, sizeof(found_key));
6946
6947                 key.objectid = found_key.objectid + found_key.offset;
6948                 btrfs_release_path(root, path);
6949                 cache->flags = btrfs_block_group_flags(&cache->item);
6950                 cache->sectorsize = root->sectorsize;
6951
6952                 /*
6953                  * check for two cases, either we are full, and therefore
6954                  * don't need to bother with the caching work since we won't
6955                  * find any space, or we are empty, and we can just add all
6956                  * the space in and be done with it.  This saves us _alot_ of
6957                  * time, particularly in the full case.
6958                  */
6959                 if (found_key.offset == btrfs_block_group_used(&cache->item)) {
6960                         cache->last_byte_to_unpin = (u64)-1;
6961                         cache->cached = BTRFS_CACHE_FINISHED;
6962                 } else if (btrfs_block_group_used(&cache->item) == 0) {
6963                         exclude_super_stripes(root, cache);
6964                         cache->last_byte_to_unpin = (u64)-1;
6965                         cache->cached = BTRFS_CACHE_FINISHED;
6966                         add_new_free_space(cache, root->fs_info,
6967                                            found_key.objectid,
6968                                            found_key.objectid +
6969                                            found_key.offset);
6970                         free_excluded_extents(root, cache);
6971                 }
6972
6973                 ret = update_space_info(info, cache->flags, found_key.offset,
6974                                         btrfs_block_group_used(&cache->item),
6975                                         &space_info);
6976                 BUG_ON(ret);
6977                 cache->space_info = space_info;
6978                 down_write(&space_info->groups_sem);
6979                 list_add_tail(&cache->list, &space_info->block_groups);
6980                 up_write(&space_info->groups_sem);
6981
6982                 ret = btrfs_add_block_group_cache(root->fs_info, cache);
6983                 BUG_ON(ret);
6984
6985                 set_avail_alloc_bits(root->fs_info, cache->flags);
6986                 if (btrfs_chunk_readonly(root, cache->key.objectid))
6987                         set_block_group_readonly(cache);
6988         }
6989         ret = 0;
6990 error:
6991         btrfs_free_path(path);
6992         return ret;
6993 }
6994
6995 int btrfs_make_block_group(struct btrfs_trans_handle *trans,
6996                            struct btrfs_root *root, u64 bytes_used,
6997                            u64 type, u64 chunk_objectid, u64 chunk_offset,
6998                            u64 size)
6999 {
7000         int ret;
7001         struct btrfs_root *extent_root;
7002         struct btrfs_block_group_cache *cache;
7003
7004         extent_root = root->fs_info->extent_root;
7005
7006         root->fs_info->last_trans_log_full_commit = trans->transid;
7007
7008         cache = kzalloc(sizeof(*cache), GFP_NOFS);
7009         if (!cache)
7010                 return -ENOMEM;
7011
7012         cache->key.objectid = chunk_offset;
7013         cache->key.offset = size;
7014         cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
7015         cache->sectorsize = root->sectorsize;
7016
7017         /*
7018          * we only want to have 32k of ram per block group for keeping track
7019          * of free space, and if we pass 1/2 of that we want to start
7020          * converting things over to using bitmaps
7021          */
7022         cache->extents_thresh = ((1024 * 32) / 2) /
7023                 sizeof(struct btrfs_free_space);
7024         atomic_set(&cache->count, 1);
7025         spin_lock_init(&cache->lock);
7026         spin_lock_init(&cache->tree_lock);
7027         INIT_LIST_HEAD(&cache->list);
7028         INIT_LIST_HEAD(&cache->cluster_list);
7029
7030         btrfs_set_block_group_used(&cache->item, bytes_used);
7031         btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
7032         cache->flags = type;
7033         btrfs_set_block_group_flags(&cache->item, type);
7034
7035         cache->last_byte_to_unpin = (u64)-1;
7036         cache->cached = BTRFS_CACHE_FINISHED;
7037         exclude_super_stripes(root, cache);
7038
7039         add_new_free_space(cache, root->fs_info, chunk_offset,
7040                            chunk_offset + size);
7041
7042         free_excluded_extents(root, cache);
7043
7044         ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
7045                                 &cache->space_info);
7046         BUG_ON(ret);
7047         down_write(&cache->space_info->groups_sem);
7048         list_add_tail(&cache->list, &cache->space_info->block_groups);
7049         up_write(&cache->space_info->groups_sem);
7050
7051         ret = btrfs_add_block_group_cache(root->fs_info, cache);
7052         BUG_ON(ret);
7053
7054         ret = btrfs_insert_item(trans, extent_root, &cache->key, &cache->item,
7055                                 sizeof(cache->item));
7056         BUG_ON(ret);
7057
7058         set_avail_alloc_bits(extent_root->fs_info, type);
7059
7060         return 0;
7061 }
7062
7063 int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
7064                              struct btrfs_root *root, u64 group_start)
7065 {
7066         struct btrfs_path *path;
7067         struct btrfs_block_group_cache *block_group;
7068         struct btrfs_free_cluster *cluster;
7069         struct btrfs_key key;
7070         int ret;
7071
7072         root = root->fs_info->extent_root;
7073
7074         block_group = btrfs_lookup_block_group(root->fs_info, group_start);
7075         BUG_ON(!block_group);
7076         BUG_ON(!block_group->ro);
7077
7078         memcpy(&key, &block_group->key, sizeof(key));
7079
7080         /* make sure this block group isn't part of an allocation cluster */
7081         cluster = &root->fs_info->data_alloc_cluster;
7082         spin_lock(&cluster->refill_lock);
7083         btrfs_return_cluster_to_free_space(block_group, cluster);
7084         spin_unlock(&cluster->refill_lock);
7085
7086         /*
7087          * make sure this block group isn't part of a metadata
7088          * allocation cluster
7089          */
7090         cluster = &root->fs_info->meta_alloc_cluster;
7091         spin_lock(&cluster->refill_lock);
7092         btrfs_return_cluster_to_free_space(block_group, cluster);
7093         spin_unlock(&cluster->refill_lock);
7094
7095         path = btrfs_alloc_path();
7096         BUG_ON(!path);
7097
7098         spin_lock(&root->fs_info->block_group_cache_lock);
7099         rb_erase(&block_group->cache_node,
7100                  &root->fs_info->block_group_cache_tree);
7101         spin_unlock(&root->fs_info->block_group_cache_lock);
7102
7103         down_write(&block_group->space_info->groups_sem);
7104         /*
7105          * we must use list_del_init so people can check to see if they
7106          * are still on the list after taking the semaphore
7107          */
7108         list_del_init(&block_group->list);
7109         up_write(&block_group->space_info->groups_sem);
7110
7111         if (block_group->cached == BTRFS_CACHE_STARTED)
7112                 wait_block_group_cache_done(block_group);
7113
7114         btrfs_remove_free_space_cache(block_group);
7115
7116         spin_lock(&block_group->space_info->lock);
7117         block_group->space_info->total_bytes -= block_group->key.offset;
7118         block_group->space_info->bytes_readonly -= block_group->key.offset;
7119         spin_unlock(&block_group->space_info->lock);
7120
7121         btrfs_clear_space_info_full(root->fs_info);
7122
7123         btrfs_put_block_group(block_group);
7124         btrfs_put_block_group(block_group);
7125
7126         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
7127         if (ret > 0)
7128                 ret = -EIO;
7129         if (ret < 0)
7130                 goto out;
7131
7132         ret = btrfs_del_item(trans, root, path);
7133 out:
7134         btrfs_free_path(path);
7135         return ret;
7136 }