0e32abf53b5bf2a7e6482271d8e9c00115d19ec6
[linux-2.6-block.git] / fs / btrfs / extent-tree.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/writeback.h>
21 #include <linux/blkdev.h>
22 #include <linux/sort.h>
23 #include <linux/rcupdate.h>
24 #include <linux/kthread.h>
25 #include <linux/slab.h>
26 #include <linux/ratelimit.h>
27 #include <linux/percpu_counter.h>
28 #include "hash.h"
29 #include "tree-log.h"
30 #include "disk-io.h"
31 #include "print-tree.h"
32 #include "volumes.h"
33 #include "raid56.h"
34 #include "locking.h"
35 #include "free-space-cache.h"
36 #include "math.h"
37 #include "sysfs.h"
38 #include "qgroup.h"
39
40 #undef SCRAMBLE_DELAYED_REFS
41
42 /*
43  * control flags for do_chunk_alloc's force field
44  * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk
45  * if we really need one.
46  *
47  * CHUNK_ALLOC_LIMITED means to only try and allocate one
48  * if we have very few chunks already allocated.  This is
49  * used as part of the clustering code to help make sure
50  * we have a good pool of storage to cluster in, without
51  * filling the FS with empty chunks
52  *
53  * CHUNK_ALLOC_FORCE means it must try to allocate one
54  *
55  */
56 enum {
57         CHUNK_ALLOC_NO_FORCE = 0,
58         CHUNK_ALLOC_LIMITED = 1,
59         CHUNK_ALLOC_FORCE = 2,
60 };
61
62 /*
63  * Control how reservations are dealt with.
64  *
65  * RESERVE_FREE - freeing a reservation.
66  * RESERVE_ALLOC - allocating space and we need to update bytes_may_use for
67  *   ENOSPC accounting
68  * RESERVE_ALLOC_NO_ACCOUNT - allocating space and we should not update
69  *   bytes_may_use as the ENOSPC accounting is done elsewhere
70  */
71 enum {
72         RESERVE_FREE = 0,
73         RESERVE_ALLOC = 1,
74         RESERVE_ALLOC_NO_ACCOUNT = 2,
75 };
76
77 static int update_block_group(struct btrfs_trans_handle *trans,
78                               struct btrfs_root *root, u64 bytenr,
79                               u64 num_bytes, int alloc);
80 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
81                                 struct btrfs_root *root,
82                                 struct btrfs_delayed_ref_node *node, u64 parent,
83                                 u64 root_objectid, u64 owner_objectid,
84                                 u64 owner_offset, int refs_to_drop,
85                                 struct btrfs_delayed_extent_op *extra_op);
86 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
87                                     struct extent_buffer *leaf,
88                                     struct btrfs_extent_item *ei);
89 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
90                                       struct btrfs_root *root,
91                                       u64 parent, u64 root_objectid,
92                                       u64 flags, u64 owner, u64 offset,
93                                       struct btrfs_key *ins, int ref_mod);
94 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
95                                      struct btrfs_root *root,
96                                      u64 parent, u64 root_objectid,
97                                      u64 flags, struct btrfs_disk_key *key,
98                                      int level, struct btrfs_key *ins,
99                                      int no_quota);
100 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
101                           struct btrfs_root *extent_root, u64 flags,
102                           int force);
103 static int find_next_key(struct btrfs_path *path, int level,
104                          struct btrfs_key *key);
105 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
106                             int dump_block_groups);
107 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
108                                        u64 num_bytes, int reserve,
109                                        int delalloc);
110 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
111                                u64 num_bytes);
112 int btrfs_pin_extent(struct btrfs_root *root,
113                      u64 bytenr, u64 num_bytes, int reserved);
114
115 static noinline int
116 block_group_cache_done(struct btrfs_block_group_cache *cache)
117 {
118         smp_mb();
119         return cache->cached == BTRFS_CACHE_FINISHED ||
120                 cache->cached == BTRFS_CACHE_ERROR;
121 }
122
123 static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
124 {
125         return (cache->flags & bits) == bits;
126 }
127
128 static void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
129 {
130         atomic_inc(&cache->count);
131 }
132
133 void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
134 {
135         if (atomic_dec_and_test(&cache->count)) {
136                 WARN_ON(cache->pinned > 0);
137                 WARN_ON(cache->reserved > 0);
138                 kfree(cache->free_space_ctl);
139                 kfree(cache);
140         }
141 }
142
143 /*
144  * this adds the block group to the fs_info rb tree for the block group
145  * cache
146  */
147 static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
148                                 struct btrfs_block_group_cache *block_group)
149 {
150         struct rb_node **p;
151         struct rb_node *parent = NULL;
152         struct btrfs_block_group_cache *cache;
153
154         spin_lock(&info->block_group_cache_lock);
155         p = &info->block_group_cache_tree.rb_node;
156
157         while (*p) {
158                 parent = *p;
159                 cache = rb_entry(parent, struct btrfs_block_group_cache,
160                                  cache_node);
161                 if (block_group->key.objectid < cache->key.objectid) {
162                         p = &(*p)->rb_left;
163                 } else if (block_group->key.objectid > cache->key.objectid) {
164                         p = &(*p)->rb_right;
165                 } else {
166                         spin_unlock(&info->block_group_cache_lock);
167                         return -EEXIST;
168                 }
169         }
170
171         rb_link_node(&block_group->cache_node, parent, p);
172         rb_insert_color(&block_group->cache_node,
173                         &info->block_group_cache_tree);
174
175         if (info->first_logical_byte > block_group->key.objectid)
176                 info->first_logical_byte = block_group->key.objectid;
177
178         spin_unlock(&info->block_group_cache_lock);
179
180         return 0;
181 }
182
183 /*
184  * This will return the block group at or after bytenr if contains is 0, else
185  * it will return the block group that contains the bytenr
186  */
187 static struct btrfs_block_group_cache *
188 block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
189                               int contains)
190 {
191         struct btrfs_block_group_cache *cache, *ret = NULL;
192         struct rb_node *n;
193         u64 end, start;
194
195         spin_lock(&info->block_group_cache_lock);
196         n = info->block_group_cache_tree.rb_node;
197
198         while (n) {
199                 cache = rb_entry(n, struct btrfs_block_group_cache,
200                                  cache_node);
201                 end = cache->key.objectid + cache->key.offset - 1;
202                 start = cache->key.objectid;
203
204                 if (bytenr < start) {
205                         if (!contains && (!ret || start < ret->key.objectid))
206                                 ret = cache;
207                         n = n->rb_left;
208                 } else if (bytenr > start) {
209                         if (contains && bytenr <= end) {
210                                 ret = cache;
211                                 break;
212                         }
213                         n = n->rb_right;
214                 } else {
215                         ret = cache;
216                         break;
217                 }
218         }
219         if (ret) {
220                 btrfs_get_block_group(ret);
221                 if (bytenr == 0 && info->first_logical_byte > ret->key.objectid)
222                         info->first_logical_byte = ret->key.objectid;
223         }
224         spin_unlock(&info->block_group_cache_lock);
225
226         return ret;
227 }
228
229 static int add_excluded_extent(struct btrfs_root *root,
230                                u64 start, u64 num_bytes)
231 {
232         u64 end = start + num_bytes - 1;
233         set_extent_bits(&root->fs_info->freed_extents[0],
234                         start, end, EXTENT_UPTODATE, GFP_NOFS);
235         set_extent_bits(&root->fs_info->freed_extents[1],
236                         start, end, EXTENT_UPTODATE, GFP_NOFS);
237         return 0;
238 }
239
240 static void free_excluded_extents(struct btrfs_root *root,
241                                   struct btrfs_block_group_cache *cache)
242 {
243         u64 start, end;
244
245         start = cache->key.objectid;
246         end = start + cache->key.offset - 1;
247
248         clear_extent_bits(&root->fs_info->freed_extents[0],
249                           start, end, EXTENT_UPTODATE, GFP_NOFS);
250         clear_extent_bits(&root->fs_info->freed_extents[1],
251                           start, end, EXTENT_UPTODATE, GFP_NOFS);
252 }
253
254 static int exclude_super_stripes(struct btrfs_root *root,
255                                  struct btrfs_block_group_cache *cache)
256 {
257         u64 bytenr;
258         u64 *logical;
259         int stripe_len;
260         int i, nr, ret;
261
262         if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
263                 stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
264                 cache->bytes_super += stripe_len;
265                 ret = add_excluded_extent(root, cache->key.objectid,
266                                           stripe_len);
267                 if (ret)
268                         return ret;
269         }
270
271         for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
272                 bytenr = btrfs_sb_offset(i);
273                 ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
274                                        cache->key.objectid, bytenr,
275                                        0, &logical, &nr, &stripe_len);
276                 if (ret)
277                         return ret;
278
279                 while (nr--) {
280                         u64 start, len;
281
282                         if (logical[nr] > cache->key.objectid +
283                             cache->key.offset)
284                                 continue;
285
286                         if (logical[nr] + stripe_len <= cache->key.objectid)
287                                 continue;
288
289                         start = logical[nr];
290                         if (start < cache->key.objectid) {
291                                 start = cache->key.objectid;
292                                 len = (logical[nr] + stripe_len) - start;
293                         } else {
294                                 len = min_t(u64, stripe_len,
295                                             cache->key.objectid +
296                                             cache->key.offset - start);
297                         }
298
299                         cache->bytes_super += len;
300                         ret = add_excluded_extent(root, start, len);
301                         if (ret) {
302                                 kfree(logical);
303                                 return ret;
304                         }
305                 }
306
307                 kfree(logical);
308         }
309         return 0;
310 }
311
312 static struct btrfs_caching_control *
313 get_caching_control(struct btrfs_block_group_cache *cache)
314 {
315         struct btrfs_caching_control *ctl;
316
317         spin_lock(&cache->lock);
318         if (!cache->caching_ctl) {
319                 spin_unlock(&cache->lock);
320                 return NULL;
321         }
322
323         ctl = cache->caching_ctl;
324         atomic_inc(&ctl->count);
325         spin_unlock(&cache->lock);
326         return ctl;
327 }
328
329 static void put_caching_control(struct btrfs_caching_control *ctl)
330 {
331         if (atomic_dec_and_test(&ctl->count))
332                 kfree(ctl);
333 }
334
335 #ifdef CONFIG_BTRFS_DEBUG
336 static void fragment_free_space(struct btrfs_root *root,
337                                 struct btrfs_block_group_cache *block_group)
338 {
339         u64 start = block_group->key.objectid;
340         u64 len = block_group->key.offset;
341         u64 chunk = block_group->flags & BTRFS_BLOCK_GROUP_METADATA ?
342                 root->nodesize : root->sectorsize;
343         u64 step = chunk << 1;
344
345         while (len > chunk) {
346                 btrfs_remove_free_space(block_group, start, chunk);
347                 start += step;
348                 if (len < step)
349                         len = 0;
350                 else
351                         len -= step;
352         }
353 }
354 #endif
355
356 /*
357  * this is only called by cache_block_group, since we could have freed extents
358  * we need to check the pinned_extents for any extents that can't be used yet
359  * since their free space will be released as soon as the transaction commits.
360  */
361 static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
362                               struct btrfs_fs_info *info, u64 start, u64 end)
363 {
364         u64 extent_start, extent_end, size, total_added = 0;
365         int ret;
366
367         while (start < end) {
368                 ret = find_first_extent_bit(info->pinned_extents, start,
369                                             &extent_start, &extent_end,
370                                             EXTENT_DIRTY | EXTENT_UPTODATE,
371                                             NULL);
372                 if (ret)
373                         break;
374
375                 if (extent_start <= start) {
376                         start = extent_end + 1;
377                 } else if (extent_start > start && extent_start < end) {
378                         size = extent_start - start;
379                         total_added += size;
380                         ret = btrfs_add_free_space(block_group, start,
381                                                    size);
382                         BUG_ON(ret); /* -ENOMEM or logic error */
383                         start = extent_end + 1;
384                 } else {
385                         break;
386                 }
387         }
388
389         if (start < end) {
390                 size = end - start;
391                 total_added += size;
392                 ret = btrfs_add_free_space(block_group, start, size);
393                 BUG_ON(ret); /* -ENOMEM or logic error */
394         }
395
396         return total_added;
397 }
398
399 static noinline void caching_thread(struct btrfs_work *work)
400 {
401         struct btrfs_block_group_cache *block_group;
402         struct btrfs_fs_info *fs_info;
403         struct btrfs_caching_control *caching_ctl;
404         struct btrfs_root *extent_root;
405         struct btrfs_path *path;
406         struct extent_buffer *leaf;
407         struct btrfs_key key;
408         u64 total_found = 0;
409         u64 last = 0;
410         u32 nritems;
411         int ret = -ENOMEM;
412         bool wakeup = true;
413
414         caching_ctl = container_of(work, struct btrfs_caching_control, work);
415         block_group = caching_ctl->block_group;
416         fs_info = block_group->fs_info;
417         extent_root = fs_info->extent_root;
418
419         path = btrfs_alloc_path();
420         if (!path)
421                 goto out;
422
423         last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
424
425 #ifdef CONFIG_BTRFS_DEBUG
426         /*
427          * If we're fragmenting we don't want to make anybody think we can
428          * allocate from this block group until we've had a chance to fragment
429          * the free space.
430          */
431         if (btrfs_should_fragment_free_space(extent_root, block_group))
432                 wakeup = false;
433 #endif
434         /*
435          * We don't want to deadlock with somebody trying to allocate a new
436          * extent for the extent root while also trying to search the extent
437          * root to add free space.  So we skip locking and search the commit
438          * root, since its read-only
439          */
440         path->skip_locking = 1;
441         path->search_commit_root = 1;
442         path->reada = 1;
443
444         key.objectid = last;
445         key.offset = 0;
446         key.type = BTRFS_EXTENT_ITEM_KEY;
447 again:
448         mutex_lock(&caching_ctl->mutex);
449         /* need to make sure the commit_root doesn't disappear */
450         down_read(&fs_info->commit_root_sem);
451
452 next:
453         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
454         if (ret < 0)
455                 goto err;
456
457         leaf = path->nodes[0];
458         nritems = btrfs_header_nritems(leaf);
459
460         while (1) {
461                 if (btrfs_fs_closing(fs_info) > 1) {
462                         last = (u64)-1;
463                         break;
464                 }
465
466                 if (path->slots[0] < nritems) {
467                         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
468                 } else {
469                         ret = find_next_key(path, 0, &key);
470                         if (ret)
471                                 break;
472
473                         if (need_resched() ||
474                             rwsem_is_contended(&fs_info->commit_root_sem)) {
475                                 if (wakeup)
476                                         caching_ctl->progress = last;
477                                 btrfs_release_path(path);
478                                 up_read(&fs_info->commit_root_sem);
479                                 mutex_unlock(&caching_ctl->mutex);
480                                 cond_resched();
481                                 goto again;
482                         }
483
484                         ret = btrfs_next_leaf(extent_root, path);
485                         if (ret < 0)
486                                 goto err;
487                         if (ret)
488                                 break;
489                         leaf = path->nodes[0];
490                         nritems = btrfs_header_nritems(leaf);
491                         continue;
492                 }
493
494                 if (key.objectid < last) {
495                         key.objectid = last;
496                         key.offset = 0;
497                         key.type = BTRFS_EXTENT_ITEM_KEY;
498
499                         if (wakeup)
500                                 caching_ctl->progress = last;
501                         btrfs_release_path(path);
502                         goto next;
503                 }
504
505                 if (key.objectid < block_group->key.objectid) {
506                         path->slots[0]++;
507                         continue;
508                 }
509
510                 if (key.objectid >= block_group->key.objectid +
511                     block_group->key.offset)
512                         break;
513
514                 if (key.type == BTRFS_EXTENT_ITEM_KEY ||
515                     key.type == BTRFS_METADATA_ITEM_KEY) {
516                         total_found += add_new_free_space(block_group,
517                                                           fs_info, last,
518                                                           key.objectid);
519                         if (key.type == BTRFS_METADATA_ITEM_KEY)
520                                 last = key.objectid +
521                                         fs_info->tree_root->nodesize;
522                         else
523                                 last = key.objectid + key.offset;
524
525                         if (total_found > (1024 * 1024 * 2)) {
526                                 total_found = 0;
527                                 if (wakeup)
528                                         wake_up(&caching_ctl->wait);
529                         }
530                 }
531                 path->slots[0]++;
532         }
533         ret = 0;
534
535         total_found += add_new_free_space(block_group, fs_info, last,
536                                           block_group->key.objectid +
537                                           block_group->key.offset);
538         spin_lock(&block_group->lock);
539         block_group->caching_ctl = NULL;
540         block_group->cached = BTRFS_CACHE_FINISHED;
541         spin_unlock(&block_group->lock);
542
543 #ifdef CONFIG_BTRFS_DEBUG
544         if (btrfs_should_fragment_free_space(extent_root, block_group)) {
545                 u64 bytes_used;
546
547                 spin_lock(&block_group->space_info->lock);
548                 spin_lock(&block_group->lock);
549                 bytes_used = block_group->key.offset -
550                         btrfs_block_group_used(&block_group->item);
551                 block_group->space_info->bytes_used += bytes_used >> 1;
552                 spin_unlock(&block_group->lock);
553                 spin_unlock(&block_group->space_info->lock);
554                 fragment_free_space(extent_root, block_group);
555         }
556 #endif
557
558         caching_ctl->progress = (u64)-1;
559 err:
560         btrfs_free_path(path);
561         up_read(&fs_info->commit_root_sem);
562
563         free_excluded_extents(extent_root, block_group);
564
565         mutex_unlock(&caching_ctl->mutex);
566 out:
567         if (ret) {
568                 spin_lock(&block_group->lock);
569                 block_group->caching_ctl = NULL;
570                 block_group->cached = BTRFS_CACHE_ERROR;
571                 spin_unlock(&block_group->lock);
572         }
573         wake_up(&caching_ctl->wait);
574
575         put_caching_control(caching_ctl);
576         btrfs_put_block_group(block_group);
577 }
578
579 static int cache_block_group(struct btrfs_block_group_cache *cache,
580                              int load_cache_only)
581 {
582         DEFINE_WAIT(wait);
583         struct btrfs_fs_info *fs_info = cache->fs_info;
584         struct btrfs_caching_control *caching_ctl;
585         int ret = 0;
586
587         caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
588         if (!caching_ctl)
589                 return -ENOMEM;
590
591         INIT_LIST_HEAD(&caching_ctl->list);
592         mutex_init(&caching_ctl->mutex);
593         init_waitqueue_head(&caching_ctl->wait);
594         caching_ctl->block_group = cache;
595         caching_ctl->progress = cache->key.objectid;
596         atomic_set(&caching_ctl->count, 1);
597         btrfs_init_work(&caching_ctl->work, btrfs_cache_helper,
598                         caching_thread, NULL, NULL);
599
600         spin_lock(&cache->lock);
601         /*
602          * This should be a rare occasion, but this could happen I think in the
603          * case where one thread starts to load the space cache info, and then
604          * some other thread starts a transaction commit which tries to do an
605          * allocation while the other thread is still loading the space cache
606          * info.  The previous loop should have kept us from choosing this block
607          * group, but if we've moved to the state where we will wait on caching
608          * block groups we need to first check if we're doing a fast load here,
609          * so we can wait for it to finish, otherwise we could end up allocating
610          * from a block group who's cache gets evicted for one reason or
611          * another.
612          */
613         while (cache->cached == BTRFS_CACHE_FAST) {
614                 struct btrfs_caching_control *ctl;
615
616                 ctl = cache->caching_ctl;
617                 atomic_inc(&ctl->count);
618                 prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
619                 spin_unlock(&cache->lock);
620
621                 schedule();
622
623                 finish_wait(&ctl->wait, &wait);
624                 put_caching_control(ctl);
625                 spin_lock(&cache->lock);
626         }
627
628         if (cache->cached != BTRFS_CACHE_NO) {
629                 spin_unlock(&cache->lock);
630                 kfree(caching_ctl);
631                 return 0;
632         }
633         WARN_ON(cache->caching_ctl);
634         cache->caching_ctl = caching_ctl;
635         cache->cached = BTRFS_CACHE_FAST;
636         spin_unlock(&cache->lock);
637
638         if (fs_info->mount_opt & BTRFS_MOUNT_SPACE_CACHE) {
639                 mutex_lock(&caching_ctl->mutex);
640                 ret = load_free_space_cache(fs_info, cache);
641
642                 spin_lock(&cache->lock);
643                 if (ret == 1) {
644                         cache->caching_ctl = NULL;
645                         cache->cached = BTRFS_CACHE_FINISHED;
646                         cache->last_byte_to_unpin = (u64)-1;
647                         caching_ctl->progress = (u64)-1;
648                 } else {
649                         if (load_cache_only) {
650                                 cache->caching_ctl = NULL;
651                                 cache->cached = BTRFS_CACHE_NO;
652                         } else {
653                                 cache->cached = BTRFS_CACHE_STARTED;
654                                 cache->has_caching_ctl = 1;
655                         }
656                 }
657                 spin_unlock(&cache->lock);
658 #ifdef CONFIG_BTRFS_DEBUG
659                 if (ret == 1 &&
660                     btrfs_should_fragment_free_space(fs_info->extent_root,
661                                                      cache)) {
662                         u64 bytes_used;
663
664                         spin_lock(&cache->space_info->lock);
665                         spin_lock(&cache->lock);
666                         bytes_used = cache->key.offset -
667                                 btrfs_block_group_used(&cache->item);
668                         cache->space_info->bytes_used += bytes_used >> 1;
669                         spin_unlock(&cache->lock);
670                         spin_unlock(&cache->space_info->lock);
671                         fragment_free_space(fs_info->extent_root, cache);
672                 }
673 #endif
674                 mutex_unlock(&caching_ctl->mutex);
675
676                 wake_up(&caching_ctl->wait);
677                 if (ret == 1) {
678                         put_caching_control(caching_ctl);
679                         free_excluded_extents(fs_info->extent_root, cache);
680                         return 0;
681                 }
682         } else {
683                 /*
684                  * We are not going to do the fast caching, set cached to the
685                  * appropriate value and wakeup any waiters.
686                  */
687                 spin_lock(&cache->lock);
688                 if (load_cache_only) {
689                         cache->caching_ctl = NULL;
690                         cache->cached = BTRFS_CACHE_NO;
691                 } else {
692                         cache->cached = BTRFS_CACHE_STARTED;
693                         cache->has_caching_ctl = 1;
694                 }
695                 spin_unlock(&cache->lock);
696                 wake_up(&caching_ctl->wait);
697         }
698
699         if (load_cache_only) {
700                 put_caching_control(caching_ctl);
701                 return 0;
702         }
703
704         down_write(&fs_info->commit_root_sem);
705         atomic_inc(&caching_ctl->count);
706         list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
707         up_write(&fs_info->commit_root_sem);
708
709         btrfs_get_block_group(cache);
710
711         btrfs_queue_work(fs_info->caching_workers, &caching_ctl->work);
712
713         return ret;
714 }
715
716 /*
717  * return the block group that starts at or after bytenr
718  */
719 static struct btrfs_block_group_cache *
720 btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
721 {
722         struct btrfs_block_group_cache *cache;
723
724         cache = block_group_cache_tree_search(info, bytenr, 0);
725
726         return cache;
727 }
728
729 /*
730  * return the block group that contains the given bytenr
731  */
732 struct btrfs_block_group_cache *btrfs_lookup_block_group(
733                                                  struct btrfs_fs_info *info,
734                                                  u64 bytenr)
735 {
736         struct btrfs_block_group_cache *cache;
737
738         cache = block_group_cache_tree_search(info, bytenr, 1);
739
740         return cache;
741 }
742
743 static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
744                                                   u64 flags)
745 {
746         struct list_head *head = &info->space_info;
747         struct btrfs_space_info *found;
748
749         flags &= BTRFS_BLOCK_GROUP_TYPE_MASK;
750
751         rcu_read_lock();
752         list_for_each_entry_rcu(found, head, list) {
753                 if (found->flags & flags) {
754                         rcu_read_unlock();
755                         return found;
756                 }
757         }
758         rcu_read_unlock();
759         return NULL;
760 }
761
762 /*
763  * after adding space to the filesystem, we need to clear the full flags
764  * on all the space infos.
765  */
766 void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
767 {
768         struct list_head *head = &info->space_info;
769         struct btrfs_space_info *found;
770
771         rcu_read_lock();
772         list_for_each_entry_rcu(found, head, list)
773                 found->full = 0;
774         rcu_read_unlock();
775 }
776
777 /* simple helper to search for an existing data extent at a given offset */
778 int btrfs_lookup_data_extent(struct btrfs_root *root, u64 start, u64 len)
779 {
780         int ret;
781         struct btrfs_key key;
782         struct btrfs_path *path;
783
784         path = btrfs_alloc_path();
785         if (!path)
786                 return -ENOMEM;
787
788         key.objectid = start;
789         key.offset = len;
790         key.type = BTRFS_EXTENT_ITEM_KEY;
791         ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
792                                 0, 0);
793         btrfs_free_path(path);
794         return ret;
795 }
796
797 /*
798  * helper function to lookup reference count and flags of a tree block.
799  *
800  * the head node for delayed ref is used to store the sum of all the
801  * reference count modifications queued up in the rbtree. the head
802  * node may also store the extent flags to set. This way you can check
803  * to see what the reference count and extent flags would be if all of
804  * the delayed refs are not processed.
805  */
806 int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
807                              struct btrfs_root *root, u64 bytenr,
808                              u64 offset, int metadata, u64 *refs, u64 *flags)
809 {
810         struct btrfs_delayed_ref_head *head;
811         struct btrfs_delayed_ref_root *delayed_refs;
812         struct btrfs_path *path;
813         struct btrfs_extent_item *ei;
814         struct extent_buffer *leaf;
815         struct btrfs_key key;
816         u32 item_size;
817         u64 num_refs;
818         u64 extent_flags;
819         int ret;
820
821         /*
822          * If we don't have skinny metadata, don't bother doing anything
823          * different
824          */
825         if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA)) {
826                 offset = root->nodesize;
827                 metadata = 0;
828         }
829
830         path = btrfs_alloc_path();
831         if (!path)
832                 return -ENOMEM;
833
834         if (!trans) {
835                 path->skip_locking = 1;
836                 path->search_commit_root = 1;
837         }
838
839 search_again:
840         key.objectid = bytenr;
841         key.offset = offset;
842         if (metadata)
843                 key.type = BTRFS_METADATA_ITEM_KEY;
844         else
845                 key.type = BTRFS_EXTENT_ITEM_KEY;
846
847         ret = btrfs_search_slot(trans, root->fs_info->extent_root,
848                                 &key, path, 0, 0);
849         if (ret < 0)
850                 goto out_free;
851
852         if (ret > 0 && metadata && key.type == BTRFS_METADATA_ITEM_KEY) {
853                 if (path->slots[0]) {
854                         path->slots[0]--;
855                         btrfs_item_key_to_cpu(path->nodes[0], &key,
856                                               path->slots[0]);
857                         if (key.objectid == bytenr &&
858                             key.type == BTRFS_EXTENT_ITEM_KEY &&
859                             key.offset == root->nodesize)
860                                 ret = 0;
861                 }
862         }
863
864         if (ret == 0) {
865                 leaf = path->nodes[0];
866                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
867                 if (item_size >= sizeof(*ei)) {
868                         ei = btrfs_item_ptr(leaf, path->slots[0],
869                                             struct btrfs_extent_item);
870                         num_refs = btrfs_extent_refs(leaf, ei);
871                         extent_flags = btrfs_extent_flags(leaf, ei);
872                 } else {
873 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
874                         struct btrfs_extent_item_v0 *ei0;
875                         BUG_ON(item_size != sizeof(*ei0));
876                         ei0 = btrfs_item_ptr(leaf, path->slots[0],
877                                              struct btrfs_extent_item_v0);
878                         num_refs = btrfs_extent_refs_v0(leaf, ei0);
879                         /* FIXME: this isn't correct for data */
880                         extent_flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
881 #else
882                         BUG();
883 #endif
884                 }
885                 BUG_ON(num_refs == 0);
886         } else {
887                 num_refs = 0;
888                 extent_flags = 0;
889                 ret = 0;
890         }
891
892         if (!trans)
893                 goto out;
894
895         delayed_refs = &trans->transaction->delayed_refs;
896         spin_lock(&delayed_refs->lock);
897         head = btrfs_find_delayed_ref_head(trans, bytenr);
898         if (head) {
899                 if (!mutex_trylock(&head->mutex)) {
900                         atomic_inc(&head->node.refs);
901                         spin_unlock(&delayed_refs->lock);
902
903                         btrfs_release_path(path);
904
905                         /*
906                          * Mutex was contended, block until it's released and try
907                          * again
908                          */
909                         mutex_lock(&head->mutex);
910                         mutex_unlock(&head->mutex);
911                         btrfs_put_delayed_ref(&head->node);
912                         goto search_again;
913                 }
914                 spin_lock(&head->lock);
915                 if (head->extent_op && head->extent_op->update_flags)
916                         extent_flags |= head->extent_op->flags_to_set;
917                 else
918                         BUG_ON(num_refs == 0);
919
920                 num_refs += head->node.ref_mod;
921                 spin_unlock(&head->lock);
922                 mutex_unlock(&head->mutex);
923         }
924         spin_unlock(&delayed_refs->lock);
925 out:
926         WARN_ON(num_refs == 0);
927         if (refs)
928                 *refs = num_refs;
929         if (flags)
930                 *flags = extent_flags;
931 out_free:
932         btrfs_free_path(path);
933         return ret;
934 }
935
936 /*
937  * Back reference rules.  Back refs have three main goals:
938  *
939  * 1) differentiate between all holders of references to an extent so that
940  *    when a reference is dropped we can make sure it was a valid reference
941  *    before freeing the extent.
942  *
943  * 2) Provide enough information to quickly find the holders of an extent
944  *    if we notice a given block is corrupted or bad.
945  *
946  * 3) Make it easy to migrate blocks for FS shrinking or storage pool
947  *    maintenance.  This is actually the same as #2, but with a slightly
948  *    different use case.
949  *
950  * There are two kinds of back refs. The implicit back refs is optimized
951  * for pointers in non-shared tree blocks. For a given pointer in a block,
952  * back refs of this kind provide information about the block's owner tree
953  * and the pointer's key. These information allow us to find the block by
954  * b-tree searching. The full back refs is for pointers in tree blocks not
955  * referenced by their owner trees. The location of tree block is recorded
956  * in the back refs. Actually the full back refs is generic, and can be
957  * used in all cases the implicit back refs is used. The major shortcoming
958  * of the full back refs is its overhead. Every time a tree block gets
959  * COWed, we have to update back refs entry for all pointers in it.
960  *
961  * For a newly allocated tree block, we use implicit back refs for
962  * pointers in it. This means most tree related operations only involve
963  * implicit back refs. For a tree block created in old transaction, the
964  * only way to drop a reference to it is COW it. So we can detect the
965  * event that tree block loses its owner tree's reference and do the
966  * back refs conversion.
967  *
968  * When a tree block is COW'd through a tree, there are four cases:
969  *
970  * The reference count of the block is one and the tree is the block's
971  * owner tree. Nothing to do in this case.
972  *
973  * The reference count of the block is one and the tree is not the
974  * block's owner tree. In this case, full back refs is used for pointers
975  * in the block. Remove these full back refs, add implicit back refs for
976  * every pointers in the new block.
977  *
978  * The reference count of the block is greater than one and the tree is
979  * the block's owner tree. In this case, implicit back refs is used for
980  * pointers in the block. Add full back refs for every pointers in the
981  * block, increase lower level extents' reference counts. The original
982  * implicit back refs are entailed to the new block.
983  *
984  * The reference count of the block is greater than one and the tree is
985  * not the block's owner tree. Add implicit back refs for every pointer in
986  * the new block, increase lower level extents' reference count.
987  *
988  * Back Reference Key composing:
989  *
990  * The key objectid corresponds to the first byte in the extent,
991  * The key type is used to differentiate between types of back refs.
992  * There are different meanings of the key offset for different types
993  * of back refs.
994  *
995  * File extents can be referenced by:
996  *
997  * - multiple snapshots, subvolumes, or different generations in one subvol
998  * - different files inside a single subvolume
999  * - different offsets inside a file (bookend extents in file.c)
1000  *
1001  * The extent ref structure for the implicit back refs has fields for:
1002  *
1003  * - Objectid of the subvolume root
1004  * - objectid of the file holding the reference
1005  * - original offset in the file
1006  * - how many bookend extents
1007  *
1008  * The key offset for the implicit back refs is hash of the first
1009  * three fields.
1010  *
1011  * The extent ref structure for the full back refs has field for:
1012  *
1013  * - number of pointers in the tree leaf
1014  *
1015  * The key offset for the implicit back refs is the first byte of
1016  * the tree leaf
1017  *
1018  * When a file extent is allocated, The implicit back refs is used.
1019  * the fields are filled in:
1020  *
1021  *     (root_key.objectid, inode objectid, offset in file, 1)
1022  *
1023  * When a file extent is removed file truncation, we find the
1024  * corresponding implicit back refs and check the following fields:
1025  *
1026  *     (btrfs_header_owner(leaf), inode objectid, offset in file)
1027  *
1028  * Btree extents can be referenced by:
1029  *
1030  * - Different subvolumes
1031  *
1032  * Both the implicit back refs and the full back refs for tree blocks
1033  * only consist of key. The key offset for the implicit back refs is
1034  * objectid of block's owner tree. The key offset for the full back refs
1035  * is the first byte of parent block.
1036  *
1037  * When implicit back refs is used, information about the lowest key and
1038  * level of the tree block are required. These information are stored in
1039  * tree block info structure.
1040  */
1041
1042 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1043 static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
1044                                   struct btrfs_root *root,
1045                                   struct btrfs_path *path,
1046                                   u64 owner, u32 extra_size)
1047 {
1048         struct btrfs_extent_item *item;
1049         struct btrfs_extent_item_v0 *ei0;
1050         struct btrfs_extent_ref_v0 *ref0;
1051         struct btrfs_tree_block_info *bi;
1052         struct extent_buffer *leaf;
1053         struct btrfs_key key;
1054         struct btrfs_key found_key;
1055         u32 new_size = sizeof(*item);
1056         u64 refs;
1057         int ret;
1058
1059         leaf = path->nodes[0];
1060         BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));
1061
1062         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1063         ei0 = btrfs_item_ptr(leaf, path->slots[0],
1064                              struct btrfs_extent_item_v0);
1065         refs = btrfs_extent_refs_v0(leaf, ei0);
1066
1067         if (owner == (u64)-1) {
1068                 while (1) {
1069                         if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1070                                 ret = btrfs_next_leaf(root, path);
1071                                 if (ret < 0)
1072                                         return ret;
1073                                 BUG_ON(ret > 0); /* Corruption */
1074                                 leaf = path->nodes[0];
1075                         }
1076                         btrfs_item_key_to_cpu(leaf, &found_key,
1077                                               path->slots[0]);
1078                         BUG_ON(key.objectid != found_key.objectid);
1079                         if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
1080                                 path->slots[0]++;
1081                                 continue;
1082                         }
1083                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
1084                                               struct btrfs_extent_ref_v0);
1085                         owner = btrfs_ref_objectid_v0(leaf, ref0);
1086                         break;
1087                 }
1088         }
1089         btrfs_release_path(path);
1090
1091         if (owner < BTRFS_FIRST_FREE_OBJECTID)
1092                 new_size += sizeof(*bi);
1093
1094         new_size -= sizeof(*ei0);
1095         ret = btrfs_search_slot(trans, root, &key, path,
1096                                 new_size + extra_size, 1);
1097         if (ret < 0)
1098                 return ret;
1099         BUG_ON(ret); /* Corruption */
1100
1101         btrfs_extend_item(root, path, new_size);
1102
1103         leaf = path->nodes[0];
1104         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1105         btrfs_set_extent_refs(leaf, item, refs);
1106         /* FIXME: get real generation */
1107         btrfs_set_extent_generation(leaf, item, 0);
1108         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1109                 btrfs_set_extent_flags(leaf, item,
1110                                        BTRFS_EXTENT_FLAG_TREE_BLOCK |
1111                                        BTRFS_BLOCK_FLAG_FULL_BACKREF);
1112                 bi = (struct btrfs_tree_block_info *)(item + 1);
1113                 /* FIXME: get first key of the block */
1114                 memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi));
1115                 btrfs_set_tree_block_level(leaf, bi, (int)owner);
1116         } else {
1117                 btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
1118         }
1119         btrfs_mark_buffer_dirty(leaf);
1120         return 0;
1121 }
1122 #endif
1123
1124 static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
1125 {
1126         u32 high_crc = ~(u32)0;
1127         u32 low_crc = ~(u32)0;
1128         __le64 lenum;
1129
1130         lenum = cpu_to_le64(root_objectid);
1131         high_crc = btrfs_crc32c(high_crc, &lenum, sizeof(lenum));
1132         lenum = cpu_to_le64(owner);
1133         low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
1134         lenum = cpu_to_le64(offset);
1135         low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
1136
1137         return ((u64)high_crc << 31) ^ (u64)low_crc;
1138 }
1139
1140 static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
1141                                      struct btrfs_extent_data_ref *ref)
1142 {
1143         return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
1144                                     btrfs_extent_data_ref_objectid(leaf, ref),
1145                                     btrfs_extent_data_ref_offset(leaf, ref));
1146 }
1147
1148 static int match_extent_data_ref(struct extent_buffer *leaf,
1149                                  struct btrfs_extent_data_ref *ref,
1150                                  u64 root_objectid, u64 owner, u64 offset)
1151 {
1152         if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
1153             btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
1154             btrfs_extent_data_ref_offset(leaf, ref) != offset)
1155                 return 0;
1156         return 1;
1157 }
1158
1159 static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
1160                                            struct btrfs_root *root,
1161                                            struct btrfs_path *path,
1162                                            u64 bytenr, u64 parent,
1163                                            u64 root_objectid,
1164                                            u64 owner, u64 offset)
1165 {
1166         struct btrfs_key key;
1167         struct btrfs_extent_data_ref *ref;
1168         struct extent_buffer *leaf;
1169         u32 nritems;
1170         int ret;
1171         int recow;
1172         int err = -ENOENT;
1173
1174         key.objectid = bytenr;
1175         if (parent) {
1176                 key.type = BTRFS_SHARED_DATA_REF_KEY;
1177                 key.offset = parent;
1178         } else {
1179                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1180                 key.offset = hash_extent_data_ref(root_objectid,
1181                                                   owner, offset);
1182         }
1183 again:
1184         recow = 0;
1185         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1186         if (ret < 0) {
1187                 err = ret;
1188                 goto fail;
1189         }
1190
1191         if (parent) {
1192                 if (!ret)
1193                         return 0;
1194 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1195                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1196                 btrfs_release_path(path);
1197                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1198                 if (ret < 0) {
1199                         err = ret;
1200                         goto fail;
1201                 }
1202                 if (!ret)
1203                         return 0;
1204 #endif
1205                 goto fail;
1206         }
1207
1208         leaf = path->nodes[0];
1209         nritems = btrfs_header_nritems(leaf);
1210         while (1) {
1211                 if (path->slots[0] >= nritems) {
1212                         ret = btrfs_next_leaf(root, path);
1213                         if (ret < 0)
1214                                 err = ret;
1215                         if (ret)
1216                                 goto fail;
1217
1218                         leaf = path->nodes[0];
1219                         nritems = btrfs_header_nritems(leaf);
1220                         recow = 1;
1221                 }
1222
1223                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1224                 if (key.objectid != bytenr ||
1225                     key.type != BTRFS_EXTENT_DATA_REF_KEY)
1226                         goto fail;
1227
1228                 ref = btrfs_item_ptr(leaf, path->slots[0],
1229                                      struct btrfs_extent_data_ref);
1230
1231                 if (match_extent_data_ref(leaf, ref, root_objectid,
1232                                           owner, offset)) {
1233                         if (recow) {
1234                                 btrfs_release_path(path);
1235                                 goto again;
1236                         }
1237                         err = 0;
1238                         break;
1239                 }
1240                 path->slots[0]++;
1241         }
1242 fail:
1243         return err;
1244 }
1245
1246 static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
1247                                            struct btrfs_root *root,
1248                                            struct btrfs_path *path,
1249                                            u64 bytenr, u64 parent,
1250                                            u64 root_objectid, u64 owner,
1251                                            u64 offset, int refs_to_add)
1252 {
1253         struct btrfs_key key;
1254         struct extent_buffer *leaf;
1255         u32 size;
1256         u32 num_refs;
1257         int ret;
1258
1259         key.objectid = bytenr;
1260         if (parent) {
1261                 key.type = BTRFS_SHARED_DATA_REF_KEY;
1262                 key.offset = parent;
1263                 size = sizeof(struct btrfs_shared_data_ref);
1264         } else {
1265                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1266                 key.offset = hash_extent_data_ref(root_objectid,
1267                                                   owner, offset);
1268                 size = sizeof(struct btrfs_extent_data_ref);
1269         }
1270
1271         ret = btrfs_insert_empty_item(trans, root, path, &key, size);
1272         if (ret && ret != -EEXIST)
1273                 goto fail;
1274
1275         leaf = path->nodes[0];
1276         if (parent) {
1277                 struct btrfs_shared_data_ref *ref;
1278                 ref = btrfs_item_ptr(leaf, path->slots[0],
1279                                      struct btrfs_shared_data_ref);
1280                 if (ret == 0) {
1281                         btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
1282                 } else {
1283                         num_refs = btrfs_shared_data_ref_count(leaf, ref);
1284                         num_refs += refs_to_add;
1285                         btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
1286                 }
1287         } else {
1288                 struct btrfs_extent_data_ref *ref;
1289                 while (ret == -EEXIST) {
1290                         ref = btrfs_item_ptr(leaf, path->slots[0],
1291                                              struct btrfs_extent_data_ref);
1292                         if (match_extent_data_ref(leaf, ref, root_objectid,
1293                                                   owner, offset))
1294                                 break;
1295                         btrfs_release_path(path);
1296                         key.offset++;
1297                         ret = btrfs_insert_empty_item(trans, root, path, &key,
1298                                                       size);
1299                         if (ret && ret != -EEXIST)
1300                                 goto fail;
1301
1302                         leaf = path->nodes[0];
1303                 }
1304                 ref = btrfs_item_ptr(leaf, path->slots[0],
1305                                      struct btrfs_extent_data_ref);
1306                 if (ret == 0) {
1307                         btrfs_set_extent_data_ref_root(leaf, ref,
1308                                                        root_objectid);
1309                         btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
1310                         btrfs_set_extent_data_ref_offset(leaf, ref, offset);
1311                         btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
1312                 } else {
1313                         num_refs = btrfs_extent_data_ref_count(leaf, ref);
1314                         num_refs += refs_to_add;
1315                         btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
1316                 }
1317         }
1318         btrfs_mark_buffer_dirty(leaf);
1319         ret = 0;
1320 fail:
1321         btrfs_release_path(path);
1322         return ret;
1323 }
1324
1325 static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
1326                                            struct btrfs_root *root,
1327                                            struct btrfs_path *path,
1328                                            int refs_to_drop, int *last_ref)
1329 {
1330         struct btrfs_key key;
1331         struct btrfs_extent_data_ref *ref1 = NULL;
1332         struct btrfs_shared_data_ref *ref2 = NULL;
1333         struct extent_buffer *leaf;
1334         u32 num_refs = 0;
1335         int ret = 0;
1336
1337         leaf = path->nodes[0];
1338         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1339
1340         if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1341                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1342                                       struct btrfs_extent_data_ref);
1343                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1344         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1345                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1346                                       struct btrfs_shared_data_ref);
1347                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1348 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1349         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1350                 struct btrfs_extent_ref_v0 *ref0;
1351                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1352                                       struct btrfs_extent_ref_v0);
1353                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1354 #endif
1355         } else {
1356                 BUG();
1357         }
1358
1359         BUG_ON(num_refs < refs_to_drop);
1360         num_refs -= refs_to_drop;
1361
1362         if (num_refs == 0) {
1363                 ret = btrfs_del_item(trans, root, path);
1364                 *last_ref = 1;
1365         } else {
1366                 if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
1367                         btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
1368                 else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
1369                         btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
1370 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1371                 else {
1372                         struct btrfs_extent_ref_v0 *ref0;
1373                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
1374                                         struct btrfs_extent_ref_v0);
1375                         btrfs_set_ref_count_v0(leaf, ref0, num_refs);
1376                 }
1377 #endif
1378                 btrfs_mark_buffer_dirty(leaf);
1379         }
1380         return ret;
1381 }
1382
1383 static noinline u32 extent_data_ref_count(struct btrfs_path *path,
1384                                           struct btrfs_extent_inline_ref *iref)
1385 {
1386         struct btrfs_key key;
1387         struct extent_buffer *leaf;
1388         struct btrfs_extent_data_ref *ref1;
1389         struct btrfs_shared_data_ref *ref2;
1390         u32 num_refs = 0;
1391
1392         leaf = path->nodes[0];
1393         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1394         if (iref) {
1395                 if (btrfs_extent_inline_ref_type(leaf, iref) ==
1396                     BTRFS_EXTENT_DATA_REF_KEY) {
1397                         ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
1398                         num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1399                 } else {
1400                         ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
1401                         num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1402                 }
1403         } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1404                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1405                                       struct btrfs_extent_data_ref);
1406                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1407         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1408                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1409                                       struct btrfs_shared_data_ref);
1410                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1411 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1412         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1413                 struct btrfs_extent_ref_v0 *ref0;
1414                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1415                                       struct btrfs_extent_ref_v0);
1416                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1417 #endif
1418         } else {
1419                 WARN_ON(1);
1420         }
1421         return num_refs;
1422 }
1423
1424 static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
1425                                           struct btrfs_root *root,
1426                                           struct btrfs_path *path,
1427                                           u64 bytenr, u64 parent,
1428                                           u64 root_objectid)
1429 {
1430         struct btrfs_key key;
1431         int ret;
1432
1433         key.objectid = bytenr;
1434         if (parent) {
1435                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1436                 key.offset = parent;
1437         } else {
1438                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1439                 key.offset = root_objectid;
1440         }
1441
1442         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1443         if (ret > 0)
1444                 ret = -ENOENT;
1445 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1446         if (ret == -ENOENT && parent) {
1447                 btrfs_release_path(path);
1448                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1449                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1450                 if (ret > 0)
1451                         ret = -ENOENT;
1452         }
1453 #endif
1454         return ret;
1455 }
1456
1457 static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
1458                                           struct btrfs_root *root,
1459                                           struct btrfs_path *path,
1460                                           u64 bytenr, u64 parent,
1461                                           u64 root_objectid)
1462 {
1463         struct btrfs_key key;
1464         int ret;
1465
1466         key.objectid = bytenr;
1467         if (parent) {
1468                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1469                 key.offset = parent;
1470         } else {
1471                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1472                 key.offset = root_objectid;
1473         }
1474
1475         ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1476         btrfs_release_path(path);
1477         return ret;
1478 }
1479
1480 static inline int extent_ref_type(u64 parent, u64 owner)
1481 {
1482         int type;
1483         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1484                 if (parent > 0)
1485                         type = BTRFS_SHARED_BLOCK_REF_KEY;
1486                 else
1487                         type = BTRFS_TREE_BLOCK_REF_KEY;
1488         } else {
1489                 if (parent > 0)
1490                         type = BTRFS_SHARED_DATA_REF_KEY;
1491                 else
1492                         type = BTRFS_EXTENT_DATA_REF_KEY;
1493         }
1494         return type;
1495 }
1496
1497 static int find_next_key(struct btrfs_path *path, int level,
1498                          struct btrfs_key *key)
1499
1500 {
1501         for (; level < BTRFS_MAX_LEVEL; level++) {
1502                 if (!path->nodes[level])
1503                         break;
1504                 if (path->slots[level] + 1 >=
1505                     btrfs_header_nritems(path->nodes[level]))
1506                         continue;
1507                 if (level == 0)
1508                         btrfs_item_key_to_cpu(path->nodes[level], key,
1509                                               path->slots[level] + 1);
1510                 else
1511                         btrfs_node_key_to_cpu(path->nodes[level], key,
1512                                               path->slots[level] + 1);
1513                 return 0;
1514         }
1515         return 1;
1516 }
1517
1518 /*
1519  * look for inline back ref. if back ref is found, *ref_ret is set
1520  * to the address of inline back ref, and 0 is returned.
1521  *
1522  * if back ref isn't found, *ref_ret is set to the address where it
1523  * should be inserted, and -ENOENT is returned.
1524  *
1525  * if insert is true and there are too many inline back refs, the path
1526  * points to the extent item, and -EAGAIN is returned.
1527  *
1528  * NOTE: inline back refs are ordered in the same way that back ref
1529  *       items in the tree are ordered.
1530  */
1531 static noinline_for_stack
1532 int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
1533                                  struct btrfs_root *root,
1534                                  struct btrfs_path *path,
1535                                  struct btrfs_extent_inline_ref **ref_ret,
1536                                  u64 bytenr, u64 num_bytes,
1537                                  u64 parent, u64 root_objectid,
1538                                  u64 owner, u64 offset, int insert)
1539 {
1540         struct btrfs_key key;
1541         struct extent_buffer *leaf;
1542         struct btrfs_extent_item *ei;
1543         struct btrfs_extent_inline_ref *iref;
1544         u64 flags;
1545         u64 item_size;
1546         unsigned long ptr;
1547         unsigned long end;
1548         int extra_size;
1549         int type;
1550         int want;
1551         int ret;
1552         int err = 0;
1553         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
1554                                                  SKINNY_METADATA);
1555
1556         key.objectid = bytenr;
1557         key.type = BTRFS_EXTENT_ITEM_KEY;
1558         key.offset = num_bytes;
1559
1560         want = extent_ref_type(parent, owner);
1561         if (insert) {
1562                 extra_size = btrfs_extent_inline_ref_size(want);
1563                 path->keep_locks = 1;
1564         } else
1565                 extra_size = -1;
1566
1567         /*
1568          * Owner is our parent level, so we can just add one to get the level
1569          * for the block we are interested in.
1570          */
1571         if (skinny_metadata && owner < BTRFS_FIRST_FREE_OBJECTID) {
1572                 key.type = BTRFS_METADATA_ITEM_KEY;
1573                 key.offset = owner;
1574         }
1575
1576 again:
1577         ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
1578         if (ret < 0) {
1579                 err = ret;
1580                 goto out;
1581         }
1582
1583         /*
1584          * We may be a newly converted file system which still has the old fat
1585          * extent entries for metadata, so try and see if we have one of those.
1586          */
1587         if (ret > 0 && skinny_metadata) {
1588                 skinny_metadata = false;
1589                 if (path->slots[0]) {
1590                         path->slots[0]--;
1591                         btrfs_item_key_to_cpu(path->nodes[0], &key,
1592                                               path->slots[0]);
1593                         if (key.objectid == bytenr &&
1594                             key.type == BTRFS_EXTENT_ITEM_KEY &&
1595                             key.offset == num_bytes)
1596                                 ret = 0;
1597                 }
1598                 if (ret) {
1599                         key.objectid = bytenr;
1600                         key.type = BTRFS_EXTENT_ITEM_KEY;
1601                         key.offset = num_bytes;
1602                         btrfs_release_path(path);
1603                         goto again;
1604                 }
1605         }
1606
1607         if (ret && !insert) {
1608                 err = -ENOENT;
1609                 goto out;
1610         } else if (WARN_ON(ret)) {
1611                 err = -EIO;
1612                 goto out;
1613         }
1614
1615         leaf = path->nodes[0];
1616         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1617 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1618         if (item_size < sizeof(*ei)) {
1619                 if (!insert) {
1620                         err = -ENOENT;
1621                         goto out;
1622                 }
1623                 ret = convert_extent_item_v0(trans, root, path, owner,
1624                                              extra_size);
1625                 if (ret < 0) {
1626                         err = ret;
1627                         goto out;
1628                 }
1629                 leaf = path->nodes[0];
1630                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1631         }
1632 #endif
1633         BUG_ON(item_size < sizeof(*ei));
1634
1635         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1636         flags = btrfs_extent_flags(leaf, ei);
1637
1638         ptr = (unsigned long)(ei + 1);
1639         end = (unsigned long)ei + item_size;
1640
1641         if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK && !skinny_metadata) {
1642                 ptr += sizeof(struct btrfs_tree_block_info);
1643                 BUG_ON(ptr > end);
1644         }
1645
1646         err = -ENOENT;
1647         while (1) {
1648                 if (ptr >= end) {
1649                         WARN_ON(ptr > end);
1650                         break;
1651                 }
1652                 iref = (struct btrfs_extent_inline_ref *)ptr;
1653                 type = btrfs_extent_inline_ref_type(leaf, iref);
1654                 if (want < type)
1655                         break;
1656                 if (want > type) {
1657                         ptr += btrfs_extent_inline_ref_size(type);
1658                         continue;
1659                 }
1660
1661                 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1662                         struct btrfs_extent_data_ref *dref;
1663                         dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1664                         if (match_extent_data_ref(leaf, dref, root_objectid,
1665                                                   owner, offset)) {
1666                                 err = 0;
1667                                 break;
1668                         }
1669                         if (hash_extent_data_ref_item(leaf, dref) <
1670                             hash_extent_data_ref(root_objectid, owner, offset))
1671                                 break;
1672                 } else {
1673                         u64 ref_offset;
1674                         ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
1675                         if (parent > 0) {
1676                                 if (parent == ref_offset) {
1677                                         err = 0;
1678                                         break;
1679                                 }
1680                                 if (ref_offset < parent)
1681                                         break;
1682                         } else {
1683                                 if (root_objectid == ref_offset) {
1684                                         err = 0;
1685                                         break;
1686                                 }
1687                                 if (ref_offset < root_objectid)
1688                                         break;
1689                         }
1690                 }
1691                 ptr += btrfs_extent_inline_ref_size(type);
1692         }
1693         if (err == -ENOENT && insert) {
1694                 if (item_size + extra_size >=
1695                     BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
1696                         err = -EAGAIN;
1697                         goto out;
1698                 }
1699                 /*
1700                  * To add new inline back ref, we have to make sure
1701                  * there is no corresponding back ref item.
1702                  * For simplicity, we just do not add new inline back
1703                  * ref if there is any kind of item for this block
1704                  */
1705                 if (find_next_key(path, 0, &key) == 0 &&
1706                     key.objectid == bytenr &&
1707                     key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
1708                         err = -EAGAIN;
1709                         goto out;
1710                 }
1711         }
1712         *ref_ret = (struct btrfs_extent_inline_ref *)ptr;
1713 out:
1714         if (insert) {
1715                 path->keep_locks = 0;
1716                 btrfs_unlock_up_safe(path, 1);
1717         }
1718         return err;
1719 }
1720
1721 /*
1722  * helper to add new inline back ref
1723  */
1724 static noinline_for_stack
1725 void setup_inline_extent_backref(struct btrfs_root *root,
1726                                  struct btrfs_path *path,
1727                                  struct btrfs_extent_inline_ref *iref,
1728                                  u64 parent, u64 root_objectid,
1729                                  u64 owner, u64 offset, int refs_to_add,
1730                                  struct btrfs_delayed_extent_op *extent_op)
1731 {
1732         struct extent_buffer *leaf;
1733         struct btrfs_extent_item *ei;
1734         unsigned long ptr;
1735         unsigned long end;
1736         unsigned long item_offset;
1737         u64 refs;
1738         int size;
1739         int type;
1740
1741         leaf = path->nodes[0];
1742         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1743         item_offset = (unsigned long)iref - (unsigned long)ei;
1744
1745         type = extent_ref_type(parent, owner);
1746         size = btrfs_extent_inline_ref_size(type);
1747
1748         btrfs_extend_item(root, path, size);
1749
1750         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1751         refs = btrfs_extent_refs(leaf, ei);
1752         refs += refs_to_add;
1753         btrfs_set_extent_refs(leaf, ei, refs);
1754         if (extent_op)
1755                 __run_delayed_extent_op(extent_op, leaf, ei);
1756
1757         ptr = (unsigned long)ei + item_offset;
1758         end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
1759         if (ptr < end - size)
1760                 memmove_extent_buffer(leaf, ptr + size, ptr,
1761                                       end - size - ptr);
1762
1763         iref = (struct btrfs_extent_inline_ref *)ptr;
1764         btrfs_set_extent_inline_ref_type(leaf, iref, type);
1765         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1766                 struct btrfs_extent_data_ref *dref;
1767                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1768                 btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
1769                 btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
1770                 btrfs_set_extent_data_ref_offset(leaf, dref, offset);
1771                 btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
1772         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1773                 struct btrfs_shared_data_ref *sref;
1774                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1775                 btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
1776                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1777         } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
1778                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1779         } else {
1780                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
1781         }
1782         btrfs_mark_buffer_dirty(leaf);
1783 }
1784
1785 static int lookup_extent_backref(struct btrfs_trans_handle *trans,
1786                                  struct btrfs_root *root,
1787                                  struct btrfs_path *path,
1788                                  struct btrfs_extent_inline_ref **ref_ret,
1789                                  u64 bytenr, u64 num_bytes, u64 parent,
1790                                  u64 root_objectid, u64 owner, u64 offset)
1791 {
1792         int ret;
1793
1794         ret = lookup_inline_extent_backref(trans, root, path, ref_ret,
1795                                            bytenr, num_bytes, parent,
1796                                            root_objectid, owner, offset, 0);
1797         if (ret != -ENOENT)
1798                 return ret;
1799
1800         btrfs_release_path(path);
1801         *ref_ret = NULL;
1802
1803         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1804                 ret = lookup_tree_block_ref(trans, root, path, bytenr, parent,
1805                                             root_objectid);
1806         } else {
1807                 ret = lookup_extent_data_ref(trans, root, path, bytenr, parent,
1808                                              root_objectid, owner, offset);
1809         }
1810         return ret;
1811 }
1812
1813 /*
1814  * helper to update/remove inline back ref
1815  */
1816 static noinline_for_stack
1817 void update_inline_extent_backref(struct btrfs_root *root,
1818                                   struct btrfs_path *path,
1819                                   struct btrfs_extent_inline_ref *iref,
1820                                   int refs_to_mod,
1821                                   struct btrfs_delayed_extent_op *extent_op,
1822                                   int *last_ref)
1823 {
1824         struct extent_buffer *leaf;
1825         struct btrfs_extent_item *ei;
1826         struct btrfs_extent_data_ref *dref = NULL;
1827         struct btrfs_shared_data_ref *sref = NULL;
1828         unsigned long ptr;
1829         unsigned long end;
1830         u32 item_size;
1831         int size;
1832         int type;
1833         u64 refs;
1834
1835         leaf = path->nodes[0];
1836         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1837         refs = btrfs_extent_refs(leaf, ei);
1838         WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
1839         refs += refs_to_mod;
1840         btrfs_set_extent_refs(leaf, ei, refs);
1841         if (extent_op)
1842                 __run_delayed_extent_op(extent_op, leaf, ei);
1843
1844         type = btrfs_extent_inline_ref_type(leaf, iref);
1845
1846         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1847                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1848                 refs = btrfs_extent_data_ref_count(leaf, dref);
1849         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1850                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1851                 refs = btrfs_shared_data_ref_count(leaf, sref);
1852         } else {
1853                 refs = 1;
1854                 BUG_ON(refs_to_mod != -1);
1855         }
1856
1857         BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
1858         refs += refs_to_mod;
1859
1860         if (refs > 0) {
1861                 if (type == BTRFS_EXTENT_DATA_REF_KEY)
1862                         btrfs_set_extent_data_ref_count(leaf, dref, refs);
1863                 else
1864                         btrfs_set_shared_data_ref_count(leaf, sref, refs);
1865         } else {
1866                 *last_ref = 1;
1867                 size =  btrfs_extent_inline_ref_size(type);
1868                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1869                 ptr = (unsigned long)iref;
1870                 end = (unsigned long)ei + item_size;
1871                 if (ptr + size < end)
1872                         memmove_extent_buffer(leaf, ptr, ptr + size,
1873                                               end - ptr - size);
1874                 item_size -= size;
1875                 btrfs_truncate_item(root, path, item_size, 1);
1876         }
1877         btrfs_mark_buffer_dirty(leaf);
1878 }
1879
1880 static noinline_for_stack
1881 int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
1882                                  struct btrfs_root *root,
1883                                  struct btrfs_path *path,
1884                                  u64 bytenr, u64 num_bytes, u64 parent,
1885                                  u64 root_objectid, u64 owner,
1886                                  u64 offset, int refs_to_add,
1887                                  struct btrfs_delayed_extent_op *extent_op)
1888 {
1889         struct btrfs_extent_inline_ref *iref;
1890         int ret;
1891
1892         ret = lookup_inline_extent_backref(trans, root, path, &iref,
1893                                            bytenr, num_bytes, parent,
1894                                            root_objectid, owner, offset, 1);
1895         if (ret == 0) {
1896                 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
1897                 update_inline_extent_backref(root, path, iref,
1898                                              refs_to_add, extent_op, NULL);
1899         } else if (ret == -ENOENT) {
1900                 setup_inline_extent_backref(root, path, iref, parent,
1901                                             root_objectid, owner, offset,
1902                                             refs_to_add, extent_op);
1903                 ret = 0;
1904         }
1905         return ret;
1906 }
1907
1908 static int insert_extent_backref(struct btrfs_trans_handle *trans,
1909                                  struct btrfs_root *root,
1910                                  struct btrfs_path *path,
1911                                  u64 bytenr, u64 parent, u64 root_objectid,
1912                                  u64 owner, u64 offset, int refs_to_add)
1913 {
1914         int ret;
1915         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1916                 BUG_ON(refs_to_add != 1);
1917                 ret = insert_tree_block_ref(trans, root, path, bytenr,
1918                                             parent, root_objectid);
1919         } else {
1920                 ret = insert_extent_data_ref(trans, root, path, bytenr,
1921                                              parent, root_objectid,
1922                                              owner, offset, refs_to_add);
1923         }
1924         return ret;
1925 }
1926
1927 static int remove_extent_backref(struct btrfs_trans_handle *trans,
1928                                  struct btrfs_root *root,
1929                                  struct btrfs_path *path,
1930                                  struct btrfs_extent_inline_ref *iref,
1931                                  int refs_to_drop, int is_data, int *last_ref)
1932 {
1933         int ret = 0;
1934
1935         BUG_ON(!is_data && refs_to_drop != 1);
1936         if (iref) {
1937                 update_inline_extent_backref(root, path, iref,
1938                                              -refs_to_drop, NULL, last_ref);
1939         } else if (is_data) {
1940                 ret = remove_extent_data_ref(trans, root, path, refs_to_drop,
1941                                              last_ref);
1942         } else {
1943                 *last_ref = 1;
1944                 ret = btrfs_del_item(trans, root, path);
1945         }
1946         return ret;
1947 }
1948
1949 #define in_range(b, first, len)        ((b) >= (first) && (b) < (first) + (len))
1950 static int btrfs_issue_discard(struct block_device *bdev, u64 start, u64 len,
1951                                u64 *discarded_bytes)
1952 {
1953         int j, ret = 0;
1954         u64 bytes_left, end;
1955         u64 aligned_start = ALIGN(start, 1 << 9);
1956
1957         if (WARN_ON(start != aligned_start)) {
1958                 len -= aligned_start - start;
1959                 len = round_down(len, 1 << 9);
1960                 start = aligned_start;
1961         }
1962
1963         *discarded_bytes = 0;
1964
1965         if (!len)
1966                 return 0;
1967
1968         end = start + len;
1969         bytes_left = len;
1970
1971         /* Skip any superblocks on this device. */
1972         for (j = 0; j < BTRFS_SUPER_MIRROR_MAX; j++) {
1973                 u64 sb_start = btrfs_sb_offset(j);
1974                 u64 sb_end = sb_start + BTRFS_SUPER_INFO_SIZE;
1975                 u64 size = sb_start - start;
1976
1977                 if (!in_range(sb_start, start, bytes_left) &&
1978                     !in_range(sb_end, start, bytes_left) &&
1979                     !in_range(start, sb_start, BTRFS_SUPER_INFO_SIZE))
1980                         continue;
1981
1982                 /*
1983                  * Superblock spans beginning of range.  Adjust start and
1984                  * try again.
1985                  */
1986                 if (sb_start <= start) {
1987                         start += sb_end - start;
1988                         if (start > end) {
1989                                 bytes_left = 0;
1990                                 break;
1991                         }
1992                         bytes_left = end - start;
1993                         continue;
1994                 }
1995
1996                 if (size) {
1997                         ret = blkdev_issue_discard(bdev, start >> 9, size >> 9,
1998                                                    GFP_NOFS, 0);
1999                         if (!ret)
2000                                 *discarded_bytes += size;
2001                         else if (ret != -EOPNOTSUPP)
2002                                 return ret;
2003                 }
2004
2005                 start = sb_end;
2006                 if (start > end) {
2007                         bytes_left = 0;
2008                         break;
2009                 }
2010                 bytes_left = end - start;
2011         }
2012
2013         if (bytes_left) {
2014                 ret = blkdev_issue_discard(bdev, start >> 9, bytes_left >> 9,
2015                                            GFP_NOFS, 0);
2016                 if (!ret)
2017                         *discarded_bytes += bytes_left;
2018         }
2019         return ret;
2020 }
2021
2022 int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
2023                          u64 num_bytes, u64 *actual_bytes)
2024 {
2025         int ret;
2026         u64 discarded_bytes = 0;
2027         struct btrfs_bio *bbio = NULL;
2028
2029
2030         /* Tell the block device(s) that the sectors can be discarded */
2031         ret = btrfs_map_block(root->fs_info, REQ_DISCARD,
2032                               bytenr, &num_bytes, &bbio, 0);
2033         /* Error condition is -ENOMEM */
2034         if (!ret) {
2035                 struct btrfs_bio_stripe *stripe = bbio->stripes;
2036                 int i;
2037
2038
2039                 for (i = 0; i < bbio->num_stripes; i++, stripe++) {
2040                         u64 bytes;
2041                         if (!stripe->dev->can_discard)
2042                                 continue;
2043
2044                         ret = btrfs_issue_discard(stripe->dev->bdev,
2045                                                   stripe->physical,
2046                                                   stripe->length,
2047                                                   &bytes);
2048                         if (!ret)
2049                                 discarded_bytes += bytes;
2050                         else if (ret != -EOPNOTSUPP)
2051                                 break; /* Logic errors or -ENOMEM, or -EIO but I don't know how that could happen JDM */
2052
2053                         /*
2054                          * Just in case we get back EOPNOTSUPP for some reason,
2055                          * just ignore the return value so we don't screw up
2056                          * people calling discard_extent.
2057                          */
2058                         ret = 0;
2059                 }
2060                 btrfs_put_bbio(bbio);
2061         }
2062
2063         if (actual_bytes)
2064                 *actual_bytes = discarded_bytes;
2065
2066
2067         if (ret == -EOPNOTSUPP)
2068                 ret = 0;
2069         return ret;
2070 }
2071
2072 /* Can return -ENOMEM */
2073 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
2074                          struct btrfs_root *root,
2075                          u64 bytenr, u64 num_bytes, u64 parent,
2076                          u64 root_objectid, u64 owner, u64 offset,
2077                          int no_quota)
2078 {
2079         int ret;
2080         struct btrfs_fs_info *fs_info = root->fs_info;
2081
2082         BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
2083                root_objectid == BTRFS_TREE_LOG_OBJECTID);
2084
2085         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
2086                 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
2087                                         num_bytes,
2088                                         parent, root_objectid, (int)owner,
2089                                         BTRFS_ADD_DELAYED_REF, NULL, no_quota);
2090         } else {
2091                 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
2092                                         num_bytes,
2093                                         parent, root_objectid, owner, offset,
2094                                         BTRFS_ADD_DELAYED_REF, NULL, no_quota);
2095         }
2096         return ret;
2097 }
2098
2099 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
2100                                   struct btrfs_root *root,
2101                                   struct btrfs_delayed_ref_node *node,
2102                                   u64 parent, u64 root_objectid,
2103                                   u64 owner, u64 offset, int refs_to_add,
2104                                   struct btrfs_delayed_extent_op *extent_op)
2105 {
2106         struct btrfs_fs_info *fs_info = root->fs_info;
2107         struct btrfs_path *path;
2108         struct extent_buffer *leaf;
2109         struct btrfs_extent_item *item;
2110         struct btrfs_key key;
2111         u64 bytenr = node->bytenr;
2112         u64 num_bytes = node->num_bytes;
2113         u64 refs;
2114         int ret;
2115         int no_quota = node->no_quota;
2116
2117         path = btrfs_alloc_path();
2118         if (!path)
2119                 return -ENOMEM;
2120
2121         if (!is_fstree(root_objectid) || !root->fs_info->quota_enabled)
2122                 no_quota = 1;
2123
2124         path->reada = 1;
2125         path->leave_spinning = 1;
2126         /* this will setup the path even if it fails to insert the back ref */
2127         ret = insert_inline_extent_backref(trans, fs_info->extent_root, path,
2128                                            bytenr, num_bytes, parent,
2129                                            root_objectid, owner, offset,
2130                                            refs_to_add, extent_op);
2131         if ((ret < 0 && ret != -EAGAIN) || !ret)
2132                 goto out;
2133
2134         /*
2135          * Ok we had -EAGAIN which means we didn't have space to insert and
2136          * inline extent ref, so just update the reference count and add a
2137          * normal backref.
2138          */
2139         leaf = path->nodes[0];
2140         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2141         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2142         refs = btrfs_extent_refs(leaf, item);
2143         btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
2144         if (extent_op)
2145                 __run_delayed_extent_op(extent_op, leaf, item);
2146
2147         btrfs_mark_buffer_dirty(leaf);
2148         btrfs_release_path(path);
2149
2150         path->reada = 1;
2151         path->leave_spinning = 1;
2152         /* now insert the actual backref */
2153         ret = insert_extent_backref(trans, root->fs_info->extent_root,
2154                                     path, bytenr, parent, root_objectid,
2155                                     owner, offset, refs_to_add);
2156         if (ret)
2157                 btrfs_abort_transaction(trans, root, ret);
2158 out:
2159         btrfs_free_path(path);
2160         return ret;
2161 }
2162
2163 static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
2164                                 struct btrfs_root *root,
2165                                 struct btrfs_delayed_ref_node *node,
2166                                 struct btrfs_delayed_extent_op *extent_op,
2167                                 int insert_reserved)
2168 {
2169         int ret = 0;
2170         struct btrfs_delayed_data_ref *ref;
2171         struct btrfs_key ins;
2172         u64 parent = 0;
2173         u64 ref_root = 0;
2174         u64 flags = 0;
2175
2176         ins.objectid = node->bytenr;
2177         ins.offset = node->num_bytes;
2178         ins.type = BTRFS_EXTENT_ITEM_KEY;
2179
2180         ref = btrfs_delayed_node_to_data_ref(node);
2181         trace_run_delayed_data_ref(node, ref, node->action);
2182
2183         if (node->type == BTRFS_SHARED_DATA_REF_KEY)
2184                 parent = ref->parent;
2185         ref_root = ref->root;
2186
2187         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2188                 if (extent_op)
2189                         flags |= extent_op->flags_to_set;
2190                 ret = alloc_reserved_file_extent(trans, root,
2191                                                  parent, ref_root, flags,
2192                                                  ref->objectid, ref->offset,
2193                                                  &ins, node->ref_mod);
2194         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2195                 ret = __btrfs_inc_extent_ref(trans, root, node, parent,
2196                                              ref_root, ref->objectid,
2197                                              ref->offset, node->ref_mod,
2198                                              extent_op);
2199         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2200                 ret = __btrfs_free_extent(trans, root, node, parent,
2201                                           ref_root, ref->objectid,
2202                                           ref->offset, node->ref_mod,
2203                                           extent_op);
2204         } else {
2205                 BUG();
2206         }
2207         return ret;
2208 }
2209
2210 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
2211                                     struct extent_buffer *leaf,
2212                                     struct btrfs_extent_item *ei)
2213 {
2214         u64 flags = btrfs_extent_flags(leaf, ei);
2215         if (extent_op->update_flags) {
2216                 flags |= extent_op->flags_to_set;
2217                 btrfs_set_extent_flags(leaf, ei, flags);
2218         }
2219
2220         if (extent_op->update_key) {
2221                 struct btrfs_tree_block_info *bi;
2222                 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
2223                 bi = (struct btrfs_tree_block_info *)(ei + 1);
2224                 btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
2225         }
2226 }
2227
2228 static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
2229                                  struct btrfs_root *root,
2230                                  struct btrfs_delayed_ref_node *node,
2231                                  struct btrfs_delayed_extent_op *extent_op)
2232 {
2233         struct btrfs_key key;
2234         struct btrfs_path *path;
2235         struct btrfs_extent_item *ei;
2236         struct extent_buffer *leaf;
2237         u32 item_size;
2238         int ret;
2239         int err = 0;
2240         int metadata = !extent_op->is_data;
2241
2242         if (trans->aborted)
2243                 return 0;
2244
2245         if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA))
2246                 metadata = 0;
2247
2248         path = btrfs_alloc_path();
2249         if (!path)
2250                 return -ENOMEM;
2251
2252         key.objectid = node->bytenr;
2253
2254         if (metadata) {
2255                 key.type = BTRFS_METADATA_ITEM_KEY;
2256                 key.offset = extent_op->level;
2257         } else {
2258                 key.type = BTRFS_EXTENT_ITEM_KEY;
2259                 key.offset = node->num_bytes;
2260         }
2261
2262 again:
2263         path->reada = 1;
2264         path->leave_spinning = 1;
2265         ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
2266                                 path, 0, 1);
2267         if (ret < 0) {
2268                 err = ret;
2269                 goto out;
2270         }
2271         if (ret > 0) {
2272                 if (metadata) {
2273                         if (path->slots[0] > 0) {
2274                                 path->slots[0]--;
2275                                 btrfs_item_key_to_cpu(path->nodes[0], &key,
2276                                                       path->slots[0]);
2277                                 if (key.objectid == node->bytenr &&
2278                                     key.type == BTRFS_EXTENT_ITEM_KEY &&
2279                                     key.offset == node->num_bytes)
2280                                         ret = 0;
2281                         }
2282                         if (ret > 0) {
2283                                 btrfs_release_path(path);
2284                                 metadata = 0;
2285
2286                                 key.objectid = node->bytenr;
2287                                 key.offset = node->num_bytes;
2288                                 key.type = BTRFS_EXTENT_ITEM_KEY;
2289                                 goto again;
2290                         }
2291                 } else {
2292                         err = -EIO;
2293                         goto out;
2294                 }
2295         }
2296
2297         leaf = path->nodes[0];
2298         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2299 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2300         if (item_size < sizeof(*ei)) {
2301                 ret = convert_extent_item_v0(trans, root->fs_info->extent_root,
2302                                              path, (u64)-1, 0);
2303                 if (ret < 0) {
2304                         err = ret;
2305                         goto out;
2306                 }
2307                 leaf = path->nodes[0];
2308                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2309         }
2310 #endif
2311         BUG_ON(item_size < sizeof(*ei));
2312         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2313         __run_delayed_extent_op(extent_op, leaf, ei);
2314
2315         btrfs_mark_buffer_dirty(leaf);
2316 out:
2317         btrfs_free_path(path);
2318         return err;
2319 }
2320
2321 static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
2322                                 struct btrfs_root *root,
2323                                 struct btrfs_delayed_ref_node *node,
2324                                 struct btrfs_delayed_extent_op *extent_op,
2325                                 int insert_reserved)
2326 {
2327         int ret = 0;
2328         struct btrfs_delayed_tree_ref *ref;
2329         struct btrfs_key ins;
2330         u64 parent = 0;
2331         u64 ref_root = 0;
2332         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
2333                                                  SKINNY_METADATA);
2334
2335         ref = btrfs_delayed_node_to_tree_ref(node);
2336         trace_run_delayed_tree_ref(node, ref, node->action);
2337
2338         if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2339                 parent = ref->parent;
2340         ref_root = ref->root;
2341
2342         ins.objectid = node->bytenr;
2343         if (skinny_metadata) {
2344                 ins.offset = ref->level;
2345                 ins.type = BTRFS_METADATA_ITEM_KEY;
2346         } else {
2347                 ins.offset = node->num_bytes;
2348                 ins.type = BTRFS_EXTENT_ITEM_KEY;
2349         }
2350
2351         BUG_ON(node->ref_mod != 1);
2352         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2353                 BUG_ON(!extent_op || !extent_op->update_flags);
2354                 ret = alloc_reserved_tree_block(trans, root,
2355                                                 parent, ref_root,
2356                                                 extent_op->flags_to_set,
2357                                                 &extent_op->key,
2358                                                 ref->level, &ins,
2359                                                 node->no_quota);
2360         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2361                 ret = __btrfs_inc_extent_ref(trans, root, node,
2362                                              parent, ref_root,
2363                                              ref->level, 0, 1,
2364                                              extent_op);
2365         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2366                 ret = __btrfs_free_extent(trans, root, node,
2367                                           parent, ref_root,
2368                                           ref->level, 0, 1, extent_op);
2369         } else {
2370                 BUG();
2371         }
2372         return ret;
2373 }
2374
2375 /* helper function to actually process a single delayed ref entry */
2376 static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
2377                                struct btrfs_root *root,
2378                                struct btrfs_delayed_ref_node *node,
2379                                struct btrfs_delayed_extent_op *extent_op,
2380                                int insert_reserved)
2381 {
2382         int ret = 0;
2383
2384         if (trans->aborted) {
2385                 if (insert_reserved)
2386                         btrfs_pin_extent(root, node->bytenr,
2387                                          node->num_bytes, 1);
2388                 return 0;
2389         }
2390
2391         if (btrfs_delayed_ref_is_head(node)) {
2392                 struct btrfs_delayed_ref_head *head;
2393                 /*
2394                  * we've hit the end of the chain and we were supposed
2395                  * to insert this extent into the tree.  But, it got
2396                  * deleted before we ever needed to insert it, so all
2397                  * we have to do is clean up the accounting
2398                  */
2399                 BUG_ON(extent_op);
2400                 head = btrfs_delayed_node_to_head(node);
2401                 trace_run_delayed_ref_head(node, head, node->action);
2402
2403                 if (insert_reserved) {
2404                         btrfs_pin_extent(root, node->bytenr,
2405                                          node->num_bytes, 1);
2406                         if (head->is_data) {
2407                                 ret = btrfs_del_csums(trans, root,
2408                                                       node->bytenr,
2409                                                       node->num_bytes);
2410                         }
2411                 }
2412                 return ret;
2413         }
2414
2415         if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
2416             node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2417                 ret = run_delayed_tree_ref(trans, root, node, extent_op,
2418                                            insert_reserved);
2419         else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
2420                  node->type == BTRFS_SHARED_DATA_REF_KEY)
2421                 ret = run_delayed_data_ref(trans, root, node, extent_op,
2422                                            insert_reserved);
2423         else
2424                 BUG();
2425         return ret;
2426 }
2427
2428 static inline struct btrfs_delayed_ref_node *
2429 select_delayed_ref(struct btrfs_delayed_ref_head *head)
2430 {
2431         struct btrfs_delayed_ref_node *ref;
2432
2433         if (list_empty(&head->ref_list))
2434                 return NULL;
2435
2436         /*
2437          * Select a delayed ref of type BTRFS_ADD_DELAYED_REF first.
2438          * This is to prevent a ref count from going down to zero, which deletes
2439          * the extent item from the extent tree, when there still are references
2440          * to add, which would fail because they would not find the extent item.
2441          */
2442         list_for_each_entry(ref, &head->ref_list, list) {
2443                 if (ref->action == BTRFS_ADD_DELAYED_REF)
2444                         return ref;
2445         }
2446
2447         return list_entry(head->ref_list.next, struct btrfs_delayed_ref_node,
2448                           list);
2449 }
2450
2451 /*
2452  * Returns 0 on success or if called with an already aborted transaction.
2453  * Returns -ENOMEM or -EIO on failure and will abort the transaction.
2454  */
2455 static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2456                                              struct btrfs_root *root,
2457                                              unsigned long nr)
2458 {
2459         struct btrfs_delayed_ref_root *delayed_refs;
2460         struct btrfs_delayed_ref_node *ref;
2461         struct btrfs_delayed_ref_head *locked_ref = NULL;
2462         struct btrfs_delayed_extent_op *extent_op;
2463         struct btrfs_fs_info *fs_info = root->fs_info;
2464         ktime_t start = ktime_get();
2465         int ret;
2466         unsigned long count = 0;
2467         unsigned long actual_count = 0;
2468         int must_insert_reserved = 0;
2469
2470         delayed_refs = &trans->transaction->delayed_refs;
2471         while (1) {
2472                 if (!locked_ref) {
2473                         if (count >= nr)
2474                                 break;
2475
2476                         spin_lock(&delayed_refs->lock);
2477                         locked_ref = btrfs_select_ref_head(trans);
2478                         if (!locked_ref) {
2479                                 spin_unlock(&delayed_refs->lock);
2480                                 break;
2481                         }
2482
2483                         /* grab the lock that says we are going to process
2484                          * all the refs for this head */
2485                         ret = btrfs_delayed_ref_lock(trans, locked_ref);
2486                         spin_unlock(&delayed_refs->lock);
2487                         /*
2488                          * we may have dropped the spin lock to get the head
2489                          * mutex lock, and that might have given someone else
2490                          * time to free the head.  If that's true, it has been
2491                          * removed from our list and we can move on.
2492                          */
2493                         if (ret == -EAGAIN) {
2494                                 locked_ref = NULL;
2495                                 count++;
2496                                 continue;
2497                         }
2498                 }
2499
2500                 spin_lock(&locked_ref->lock);
2501
2502                 /*
2503                  * locked_ref is the head node, so we have to go one
2504                  * node back for any delayed ref updates
2505                  */
2506                 ref = select_delayed_ref(locked_ref);
2507
2508                 if (ref && ref->seq &&
2509                     btrfs_check_delayed_seq(fs_info, delayed_refs, ref->seq)) {
2510                         spin_unlock(&locked_ref->lock);
2511                         btrfs_delayed_ref_unlock(locked_ref);
2512                         spin_lock(&delayed_refs->lock);
2513                         locked_ref->processing = 0;
2514                         delayed_refs->num_heads_ready++;
2515                         spin_unlock(&delayed_refs->lock);
2516                         locked_ref = NULL;
2517                         cond_resched();
2518                         count++;
2519                         continue;
2520                 }
2521
2522                 /*
2523                  * record the must insert reserved flag before we
2524                  * drop the spin lock.
2525                  */
2526                 must_insert_reserved = locked_ref->must_insert_reserved;
2527                 locked_ref->must_insert_reserved = 0;
2528
2529                 extent_op = locked_ref->extent_op;
2530                 locked_ref->extent_op = NULL;
2531
2532                 if (!ref) {
2533
2534
2535                         /* All delayed refs have been processed, Go ahead
2536                          * and send the head node to run_one_delayed_ref,
2537                          * so that any accounting fixes can happen
2538                          */
2539                         ref = &locked_ref->node;
2540
2541                         if (extent_op && must_insert_reserved) {
2542                                 btrfs_free_delayed_extent_op(extent_op);
2543                                 extent_op = NULL;
2544                         }
2545
2546                         if (extent_op) {
2547                                 spin_unlock(&locked_ref->lock);
2548                                 ret = run_delayed_extent_op(trans, root,
2549                                                             ref, extent_op);
2550                                 btrfs_free_delayed_extent_op(extent_op);
2551
2552                                 if (ret) {
2553                                         /*
2554                                          * Need to reset must_insert_reserved if
2555                                          * there was an error so the abort stuff
2556                                          * can cleanup the reserved space
2557                                          * properly.
2558                                          */
2559                                         if (must_insert_reserved)
2560                                                 locked_ref->must_insert_reserved = 1;
2561                                         locked_ref->processing = 0;
2562                                         btrfs_debug(fs_info, "run_delayed_extent_op returned %d", ret);
2563                                         btrfs_delayed_ref_unlock(locked_ref);
2564                                         return ret;
2565                                 }
2566                                 continue;
2567                         }
2568
2569                         /*
2570                          * Need to drop our head ref lock and re-aqcuire the
2571                          * delayed ref lock and then re-check to make sure
2572                          * nobody got added.
2573                          */
2574                         spin_unlock(&locked_ref->lock);
2575                         spin_lock(&delayed_refs->lock);
2576                         spin_lock(&locked_ref->lock);
2577                         if (!list_empty(&locked_ref->ref_list) ||
2578                             locked_ref->extent_op) {
2579                                 spin_unlock(&locked_ref->lock);
2580                                 spin_unlock(&delayed_refs->lock);
2581                                 continue;
2582                         }
2583                         ref->in_tree = 0;
2584                         delayed_refs->num_heads--;
2585                         rb_erase(&locked_ref->href_node,
2586                                  &delayed_refs->href_root);
2587                         spin_unlock(&delayed_refs->lock);
2588                 } else {
2589                         actual_count++;
2590                         ref->in_tree = 0;
2591                         list_del(&ref->list);
2592                 }
2593                 atomic_dec(&delayed_refs->num_entries);
2594
2595                 if (!btrfs_delayed_ref_is_head(ref)) {
2596                         /*
2597                          * when we play the delayed ref, also correct the
2598                          * ref_mod on head
2599                          */
2600                         switch (ref->action) {
2601                         case BTRFS_ADD_DELAYED_REF:
2602                         case BTRFS_ADD_DELAYED_EXTENT:
2603                                 locked_ref->node.ref_mod -= ref->ref_mod;
2604                                 break;
2605                         case BTRFS_DROP_DELAYED_REF:
2606                                 locked_ref->node.ref_mod += ref->ref_mod;
2607                                 break;
2608                         default:
2609                                 WARN_ON(1);
2610                         }
2611                 }
2612                 spin_unlock(&locked_ref->lock);
2613
2614                 ret = run_one_delayed_ref(trans, root, ref, extent_op,
2615                                           must_insert_reserved);
2616
2617                 btrfs_free_delayed_extent_op(extent_op);
2618                 if (ret) {
2619                         locked_ref->processing = 0;
2620                         btrfs_delayed_ref_unlock(locked_ref);
2621                         btrfs_put_delayed_ref(ref);
2622                         btrfs_debug(fs_info, "run_one_delayed_ref returned %d", ret);
2623                         return ret;
2624                 }
2625
2626                 /*
2627                  * If this node is a head, that means all the refs in this head
2628                  * have been dealt with, and we will pick the next head to deal
2629                  * with, so we must unlock the head and drop it from the cluster
2630                  * list before we release it.
2631                  */
2632                 if (btrfs_delayed_ref_is_head(ref)) {
2633                         if (locked_ref->is_data &&
2634                             locked_ref->total_ref_mod < 0) {
2635                                 spin_lock(&delayed_refs->lock);
2636                                 delayed_refs->pending_csums -= ref->num_bytes;
2637                                 spin_unlock(&delayed_refs->lock);
2638                         }
2639                         btrfs_delayed_ref_unlock(locked_ref);
2640                         locked_ref = NULL;
2641                 }
2642                 btrfs_put_delayed_ref(ref);
2643                 count++;
2644                 cond_resched();
2645         }
2646
2647         /*
2648          * We don't want to include ref heads since we can have empty ref heads
2649          * and those will drastically skew our runtime down since we just do
2650          * accounting, no actual extent tree updates.
2651          */
2652         if (actual_count > 0) {
2653                 u64 runtime = ktime_to_ns(ktime_sub(ktime_get(), start));
2654                 u64 avg;
2655
2656                 /*
2657                  * We weigh the current average higher than our current runtime
2658                  * to avoid large swings in the average.
2659                  */
2660                 spin_lock(&delayed_refs->lock);
2661                 avg = fs_info->avg_delayed_ref_runtime * 3 + runtime;
2662                 fs_info->avg_delayed_ref_runtime = avg >> 2;    /* div by 4 */
2663                 spin_unlock(&delayed_refs->lock);
2664         }
2665         return 0;
2666 }
2667
2668 #ifdef SCRAMBLE_DELAYED_REFS
2669 /*
2670  * Normally delayed refs get processed in ascending bytenr order. This
2671  * correlates in most cases to the order added. To expose dependencies on this
2672  * order, we start to process the tree in the middle instead of the beginning
2673  */
2674 static u64 find_middle(struct rb_root *root)
2675 {
2676         struct rb_node *n = root->rb_node;
2677         struct btrfs_delayed_ref_node *entry;
2678         int alt = 1;
2679         u64 middle;
2680         u64 first = 0, last = 0;
2681
2682         n = rb_first(root);
2683         if (n) {
2684                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2685                 first = entry->bytenr;
2686         }
2687         n = rb_last(root);
2688         if (n) {
2689                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2690                 last = entry->bytenr;
2691         }
2692         n = root->rb_node;
2693
2694         while (n) {
2695                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2696                 WARN_ON(!entry->in_tree);
2697
2698                 middle = entry->bytenr;
2699
2700                 if (alt)
2701                         n = n->rb_left;
2702                 else
2703                         n = n->rb_right;
2704
2705                 alt = 1 - alt;
2706         }
2707         return middle;
2708 }
2709 #endif
2710
2711 static inline u64 heads_to_leaves(struct btrfs_root *root, u64 heads)
2712 {
2713         u64 num_bytes;
2714
2715         num_bytes = heads * (sizeof(struct btrfs_extent_item) +
2716                              sizeof(struct btrfs_extent_inline_ref));
2717         if (!btrfs_fs_incompat(root->fs_info, SKINNY_METADATA))
2718                 num_bytes += heads * sizeof(struct btrfs_tree_block_info);
2719
2720         /*
2721          * We don't ever fill up leaves all the way so multiply by 2 just to be
2722          * closer to what we're really going to want to ouse.
2723          */
2724         return div_u64(num_bytes, BTRFS_LEAF_DATA_SIZE(root));
2725 }
2726
2727 /*
2728  * Takes the number of bytes to be csumm'ed and figures out how many leaves it
2729  * would require to store the csums for that many bytes.
2730  */
2731 u64 btrfs_csum_bytes_to_leaves(struct btrfs_root *root, u64 csum_bytes)
2732 {
2733         u64 csum_size;
2734         u64 num_csums_per_leaf;
2735         u64 num_csums;
2736
2737         csum_size = BTRFS_LEAF_DATA_SIZE(root) - sizeof(struct btrfs_item);
2738         num_csums_per_leaf = div64_u64(csum_size,
2739                         (u64)btrfs_super_csum_size(root->fs_info->super_copy));
2740         num_csums = div64_u64(csum_bytes, root->sectorsize);
2741         num_csums += num_csums_per_leaf - 1;
2742         num_csums = div64_u64(num_csums, num_csums_per_leaf);
2743         return num_csums;
2744 }
2745
2746 int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans,
2747                                        struct btrfs_root *root)
2748 {
2749         struct btrfs_block_rsv *global_rsv;
2750         u64 num_heads = trans->transaction->delayed_refs.num_heads_ready;
2751         u64 csum_bytes = trans->transaction->delayed_refs.pending_csums;
2752         u64 num_dirty_bgs = trans->transaction->num_dirty_bgs;
2753         u64 num_bytes, num_dirty_bgs_bytes;
2754         int ret = 0;
2755
2756         num_bytes = btrfs_calc_trans_metadata_size(root, 1);
2757         num_heads = heads_to_leaves(root, num_heads);
2758         if (num_heads > 1)
2759                 num_bytes += (num_heads - 1) * root->nodesize;
2760         num_bytes <<= 1;
2761         num_bytes += btrfs_csum_bytes_to_leaves(root, csum_bytes) * root->nodesize;
2762         num_dirty_bgs_bytes = btrfs_calc_trans_metadata_size(root,
2763                                                              num_dirty_bgs);
2764         global_rsv = &root->fs_info->global_block_rsv;
2765
2766         /*
2767          * If we can't allocate any more chunks lets make sure we have _lots_ of
2768          * wiggle room since running delayed refs can create more delayed refs.
2769          */
2770         if (global_rsv->space_info->full) {
2771                 num_dirty_bgs_bytes <<= 1;
2772                 num_bytes <<= 1;
2773         }
2774
2775         spin_lock(&global_rsv->lock);
2776         if (global_rsv->reserved <= num_bytes + num_dirty_bgs_bytes)
2777                 ret = 1;
2778         spin_unlock(&global_rsv->lock);
2779         return ret;
2780 }
2781
2782 int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans,
2783                                        struct btrfs_root *root)
2784 {
2785         struct btrfs_fs_info *fs_info = root->fs_info;
2786         u64 num_entries =
2787                 atomic_read(&trans->transaction->delayed_refs.num_entries);
2788         u64 avg_runtime;
2789         u64 val;
2790
2791         smp_mb();
2792         avg_runtime = fs_info->avg_delayed_ref_runtime;
2793         val = num_entries * avg_runtime;
2794         if (num_entries * avg_runtime >= NSEC_PER_SEC)
2795                 return 1;
2796         if (val >= NSEC_PER_SEC / 2)
2797                 return 2;
2798
2799         return btrfs_check_space_for_delayed_refs(trans, root);
2800 }
2801
2802 struct async_delayed_refs {
2803         struct btrfs_root *root;
2804         int count;
2805         int error;
2806         int sync;
2807         struct completion wait;
2808         struct btrfs_work work;
2809 };
2810
2811 static void delayed_ref_async_start(struct btrfs_work *work)
2812 {
2813         struct async_delayed_refs *async;
2814         struct btrfs_trans_handle *trans;
2815         int ret;
2816
2817         async = container_of(work, struct async_delayed_refs, work);
2818
2819         trans = btrfs_join_transaction(async->root);
2820         if (IS_ERR(trans)) {
2821                 async->error = PTR_ERR(trans);
2822                 goto done;
2823         }
2824
2825         /*
2826          * trans->sync means that when we call end_transaciton, we won't
2827          * wait on delayed refs
2828          */
2829         trans->sync = true;
2830         ret = btrfs_run_delayed_refs(trans, async->root, async->count);
2831         if (ret)
2832                 async->error = ret;
2833
2834         ret = btrfs_end_transaction(trans, async->root);
2835         if (ret && !async->error)
2836                 async->error = ret;
2837 done:
2838         if (async->sync)
2839                 complete(&async->wait);
2840         else
2841                 kfree(async);
2842 }
2843
2844 int btrfs_async_run_delayed_refs(struct btrfs_root *root,
2845                                  unsigned long count, int wait)
2846 {
2847         struct async_delayed_refs *async;
2848         int ret;
2849
2850         async = kmalloc(sizeof(*async), GFP_NOFS);
2851         if (!async)
2852                 return -ENOMEM;
2853
2854         async->root = root->fs_info->tree_root;
2855         async->count = count;
2856         async->error = 0;
2857         if (wait)
2858                 async->sync = 1;
2859         else
2860                 async->sync = 0;
2861         init_completion(&async->wait);
2862
2863         btrfs_init_work(&async->work, btrfs_extent_refs_helper,
2864                         delayed_ref_async_start, NULL, NULL);
2865
2866         btrfs_queue_work(root->fs_info->extent_workers, &async->work);
2867
2868         if (wait) {
2869                 wait_for_completion(&async->wait);
2870                 ret = async->error;
2871                 kfree(async);
2872                 return ret;
2873         }
2874         return 0;
2875 }
2876
2877 /*
2878  * this starts processing the delayed reference count updates and
2879  * extent insertions we have queued up so far.  count can be
2880  * 0, which means to process everything in the tree at the start
2881  * of the run (but not newly added entries), or it can be some target
2882  * number you'd like to process.
2883  *
2884  * Returns 0 on success or if called with an aborted transaction
2885  * Returns <0 on error and aborts the transaction
2886  */
2887 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2888                            struct btrfs_root *root, unsigned long count)
2889 {
2890         struct rb_node *node;
2891         struct btrfs_delayed_ref_root *delayed_refs;
2892         struct btrfs_delayed_ref_head *head;
2893         int ret;
2894         int run_all = count == (unsigned long)-1;
2895         bool can_flush_pending_bgs = trans->can_flush_pending_bgs;
2896
2897         /* We'll clean this up in btrfs_cleanup_transaction */
2898         if (trans->aborted)
2899                 return 0;
2900
2901         if (root == root->fs_info->extent_root)
2902                 root = root->fs_info->tree_root;
2903
2904         delayed_refs = &trans->transaction->delayed_refs;
2905         if (count == 0)
2906                 count = atomic_read(&delayed_refs->num_entries) * 2;
2907
2908 again:
2909 #ifdef SCRAMBLE_DELAYED_REFS
2910         delayed_refs->run_delayed_start = find_middle(&delayed_refs->root);
2911 #endif
2912         trans->can_flush_pending_bgs = false;
2913         ret = __btrfs_run_delayed_refs(trans, root, count);
2914         if (ret < 0) {
2915                 btrfs_abort_transaction(trans, root, ret);
2916                 return ret;
2917         }
2918
2919         if (run_all) {
2920                 if (!list_empty(&trans->new_bgs))
2921                         btrfs_create_pending_block_groups(trans, root);
2922
2923                 spin_lock(&delayed_refs->lock);
2924                 node = rb_first(&delayed_refs->href_root);
2925                 if (!node) {
2926                         spin_unlock(&delayed_refs->lock);
2927                         goto out;
2928                 }
2929                 count = (unsigned long)-1;
2930
2931                 while (node) {
2932                         head = rb_entry(node, struct btrfs_delayed_ref_head,
2933                                         href_node);
2934                         if (btrfs_delayed_ref_is_head(&head->node)) {
2935                                 struct btrfs_delayed_ref_node *ref;
2936
2937                                 ref = &head->node;
2938                                 atomic_inc(&ref->refs);
2939
2940                                 spin_unlock(&delayed_refs->lock);
2941                                 /*
2942                                  * Mutex was contended, block until it's
2943                                  * released and try again
2944                                  */
2945                                 mutex_lock(&head->mutex);
2946                                 mutex_unlock(&head->mutex);
2947
2948                                 btrfs_put_delayed_ref(ref);
2949                                 cond_resched();
2950                                 goto again;
2951                         } else {
2952                                 WARN_ON(1);
2953                         }
2954                         node = rb_next(node);
2955                 }
2956                 spin_unlock(&delayed_refs->lock);
2957                 cond_resched();
2958                 goto again;
2959         }
2960 out:
2961         assert_qgroups_uptodate(trans);
2962         trans->can_flush_pending_bgs = can_flush_pending_bgs;
2963         return 0;
2964 }
2965
2966 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
2967                                 struct btrfs_root *root,
2968                                 u64 bytenr, u64 num_bytes, u64 flags,
2969                                 int level, int is_data)
2970 {
2971         struct btrfs_delayed_extent_op *extent_op;
2972         int ret;
2973
2974         extent_op = btrfs_alloc_delayed_extent_op();
2975         if (!extent_op)
2976                 return -ENOMEM;
2977
2978         extent_op->flags_to_set = flags;
2979         extent_op->update_flags = 1;
2980         extent_op->update_key = 0;
2981         extent_op->is_data = is_data ? 1 : 0;
2982         extent_op->level = level;
2983
2984         ret = btrfs_add_delayed_extent_op(root->fs_info, trans, bytenr,
2985                                           num_bytes, extent_op);
2986         if (ret)
2987                 btrfs_free_delayed_extent_op(extent_op);
2988         return ret;
2989 }
2990
2991 static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
2992                                       struct btrfs_root *root,
2993                                       struct btrfs_path *path,
2994                                       u64 objectid, u64 offset, u64 bytenr)
2995 {
2996         struct btrfs_delayed_ref_head *head;
2997         struct btrfs_delayed_ref_node *ref;
2998         struct btrfs_delayed_data_ref *data_ref;
2999         struct btrfs_delayed_ref_root *delayed_refs;
3000         int ret = 0;
3001
3002         delayed_refs = &trans->transaction->delayed_refs;
3003         spin_lock(&delayed_refs->lock);
3004         head = btrfs_find_delayed_ref_head(trans, bytenr);
3005         if (!head) {
3006                 spin_unlock(&delayed_refs->lock);
3007                 return 0;
3008         }
3009
3010         if (!mutex_trylock(&head->mutex)) {
3011                 atomic_inc(&head->node.refs);
3012                 spin_unlock(&delayed_refs->lock);
3013
3014                 btrfs_release_path(path);
3015
3016                 /*
3017                  * Mutex was contended, block until it's released and let
3018                  * caller try again
3019                  */
3020                 mutex_lock(&head->mutex);
3021                 mutex_unlock(&head->mutex);
3022                 btrfs_put_delayed_ref(&head->node);
3023                 return -EAGAIN;
3024         }
3025         spin_unlock(&delayed_refs->lock);
3026
3027         spin_lock(&head->lock);
3028         list_for_each_entry(ref, &head->ref_list, list) {
3029                 /* If it's a shared ref we know a cross reference exists */
3030                 if (ref->type != BTRFS_EXTENT_DATA_REF_KEY) {
3031                         ret = 1;
3032                         break;
3033                 }
3034
3035                 data_ref = btrfs_delayed_node_to_data_ref(ref);
3036
3037                 /*
3038                  * If our ref doesn't match the one we're currently looking at
3039                  * then we have a cross reference.
3040                  */
3041                 if (data_ref->root != root->root_key.objectid ||
3042                     data_ref->objectid != objectid ||
3043                     data_ref->offset != offset) {
3044                         ret = 1;
3045                         break;
3046                 }
3047         }
3048         spin_unlock(&head->lock);
3049         mutex_unlock(&head->mutex);
3050         return ret;
3051 }
3052
3053 static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
3054                                         struct btrfs_root *root,
3055                                         struct btrfs_path *path,
3056                                         u64 objectid, u64 offset, u64 bytenr)
3057 {
3058         struct btrfs_root *extent_root = root->fs_info->extent_root;
3059         struct extent_buffer *leaf;
3060         struct btrfs_extent_data_ref *ref;
3061         struct btrfs_extent_inline_ref *iref;
3062         struct btrfs_extent_item *ei;
3063         struct btrfs_key key;
3064         u32 item_size;
3065         int ret;
3066
3067         key.objectid = bytenr;
3068         key.offset = (u64)-1;
3069         key.type = BTRFS_EXTENT_ITEM_KEY;
3070
3071         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
3072         if (ret < 0)
3073                 goto out;
3074         BUG_ON(ret == 0); /* Corruption */
3075
3076         ret = -ENOENT;
3077         if (path->slots[0] == 0)
3078                 goto out;
3079
3080         path->slots[0]--;
3081         leaf = path->nodes[0];
3082         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3083
3084         if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
3085                 goto out;
3086
3087         ret = 1;
3088         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
3089 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
3090         if (item_size < sizeof(*ei)) {
3091                 WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
3092                 goto out;
3093         }
3094 #endif
3095         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
3096
3097         if (item_size != sizeof(*ei) +
3098             btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
3099                 goto out;
3100
3101         if (btrfs_extent_generation(leaf, ei) <=
3102             btrfs_root_last_snapshot(&root->root_item))
3103                 goto out;
3104
3105         iref = (struct btrfs_extent_inline_ref *)(ei + 1);
3106         if (btrfs_extent_inline_ref_type(leaf, iref) !=
3107             BTRFS_EXTENT_DATA_REF_KEY)
3108                 goto out;
3109
3110         ref = (struct btrfs_extent_data_ref *)(&iref->offset);
3111         if (btrfs_extent_refs(leaf, ei) !=
3112             btrfs_extent_data_ref_count(leaf, ref) ||
3113             btrfs_extent_data_ref_root(leaf, ref) !=
3114             root->root_key.objectid ||
3115             btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
3116             btrfs_extent_data_ref_offset(leaf, ref) != offset)
3117                 goto out;
3118
3119         ret = 0;
3120 out:
3121         return ret;
3122 }
3123
3124 int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
3125                           struct btrfs_root *root,
3126                           u64 objectid, u64 offset, u64 bytenr)
3127 {
3128         struct btrfs_path *path;
3129         int ret;
3130         int ret2;
3131
3132         path = btrfs_alloc_path();
3133         if (!path)
3134                 return -ENOENT;
3135
3136         do {
3137                 ret = check_committed_ref(trans, root, path, objectid,
3138                                           offset, bytenr);
3139                 if (ret && ret != -ENOENT)
3140                         goto out;
3141
3142                 ret2 = check_delayed_ref(trans, root, path, objectid,
3143                                          offset, bytenr);
3144         } while (ret2 == -EAGAIN);
3145
3146         if (ret2 && ret2 != -ENOENT) {
3147                 ret = ret2;
3148                 goto out;
3149         }
3150
3151         if (ret != -ENOENT || ret2 != -ENOENT)
3152                 ret = 0;
3153 out:
3154         btrfs_free_path(path);
3155         if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
3156                 WARN_ON(ret > 0);
3157         return ret;
3158 }
3159
3160 static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
3161                            struct btrfs_root *root,
3162                            struct extent_buffer *buf,
3163                            int full_backref, int inc)
3164 {
3165         u64 bytenr;
3166         u64 num_bytes;
3167         u64 parent;
3168         u64 ref_root;
3169         u32 nritems;
3170         struct btrfs_key key;
3171         struct btrfs_file_extent_item *fi;
3172         int i;
3173         int level;
3174         int ret = 0;
3175         int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
3176                             u64, u64, u64, u64, u64, u64, int);
3177
3178
3179         if (btrfs_test_is_dummy_root(root))
3180                 return 0;
3181
3182         ref_root = btrfs_header_owner(buf);
3183         nritems = btrfs_header_nritems(buf);
3184         level = btrfs_header_level(buf);
3185
3186         if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state) && level == 0)
3187                 return 0;
3188
3189         if (inc)
3190                 process_func = btrfs_inc_extent_ref;
3191         else
3192                 process_func = btrfs_free_extent;
3193
3194         if (full_backref)
3195                 parent = buf->start;
3196         else
3197                 parent = 0;
3198
3199         for (i = 0; i < nritems; i++) {
3200                 if (level == 0) {
3201                         btrfs_item_key_to_cpu(buf, &key, i);
3202                         if (key.type != BTRFS_EXTENT_DATA_KEY)
3203                                 continue;
3204                         fi = btrfs_item_ptr(buf, i,
3205                                             struct btrfs_file_extent_item);
3206                         if (btrfs_file_extent_type(buf, fi) ==
3207                             BTRFS_FILE_EXTENT_INLINE)
3208                                 continue;
3209                         bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
3210                         if (bytenr == 0)
3211                                 continue;
3212
3213                         num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
3214                         key.offset -= btrfs_file_extent_offset(buf, fi);
3215                         ret = process_func(trans, root, bytenr, num_bytes,
3216                                            parent, ref_root, key.objectid,
3217                                            key.offset, 1);
3218                         if (ret)
3219                                 goto fail;
3220                 } else {
3221                         bytenr = btrfs_node_blockptr(buf, i);
3222                         num_bytes = root->nodesize;
3223                         ret = process_func(trans, root, bytenr, num_bytes,
3224                                            parent, ref_root, level - 1, 0,
3225                                            1);
3226                         if (ret)
3227                                 goto fail;
3228                 }
3229         }
3230         return 0;
3231 fail:
3232         return ret;
3233 }
3234
3235 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3236                   struct extent_buffer *buf, int full_backref)
3237 {
3238         return __btrfs_mod_ref(trans, root, buf, full_backref, 1);
3239 }
3240
3241 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3242                   struct extent_buffer *buf, int full_backref)
3243 {
3244         return __btrfs_mod_ref(trans, root, buf, full_backref, 0);
3245 }
3246
3247 static int write_one_cache_group(struct btrfs_trans_handle *trans,
3248                                  struct btrfs_root *root,
3249                                  struct btrfs_path *path,
3250                                  struct btrfs_block_group_cache *cache)
3251 {
3252         int ret;
3253         struct btrfs_root *extent_root = root->fs_info->extent_root;
3254         unsigned long bi;
3255         struct extent_buffer *leaf;
3256
3257         ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
3258         if (ret) {
3259                 if (ret > 0)
3260                         ret = -ENOENT;
3261                 goto fail;
3262         }
3263
3264         leaf = path->nodes[0];
3265         bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
3266         write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
3267         btrfs_mark_buffer_dirty(leaf);
3268 fail:
3269         btrfs_release_path(path);
3270         return ret;
3271
3272 }
3273
3274 static struct btrfs_block_group_cache *
3275 next_block_group(struct btrfs_root *root,
3276                  struct btrfs_block_group_cache *cache)
3277 {
3278         struct rb_node *node;
3279
3280         spin_lock(&root->fs_info->block_group_cache_lock);
3281
3282         /* If our block group was removed, we need a full search. */
3283         if (RB_EMPTY_NODE(&cache->cache_node)) {
3284                 const u64 next_bytenr = cache->key.objectid + cache->key.offset;
3285
3286                 spin_unlock(&root->fs_info->block_group_cache_lock);
3287                 btrfs_put_block_group(cache);
3288                 cache = btrfs_lookup_first_block_group(root->fs_info,
3289                                                        next_bytenr);
3290                 return cache;
3291         }
3292         node = rb_next(&cache->cache_node);
3293         btrfs_put_block_group(cache);
3294         if (node) {
3295                 cache = rb_entry(node, struct btrfs_block_group_cache,
3296                                  cache_node);
3297                 btrfs_get_block_group(cache);
3298         } else
3299                 cache = NULL;
3300         spin_unlock(&root->fs_info->block_group_cache_lock);
3301         return cache;
3302 }
3303
3304 static int cache_save_setup(struct btrfs_block_group_cache *block_group,
3305                             struct btrfs_trans_handle *trans,
3306                             struct btrfs_path *path)
3307 {
3308         struct btrfs_root *root = block_group->fs_info->tree_root;
3309         struct inode *inode = NULL;
3310         u64 alloc_hint = 0;
3311         int dcs = BTRFS_DC_ERROR;
3312         u64 num_pages = 0;
3313         int retries = 0;
3314         int ret = 0;
3315
3316         /*
3317          * If this block group is smaller than 100 megs don't bother caching the
3318          * block group.
3319          */
3320         if (block_group->key.offset < (100 * 1024 * 1024)) {
3321                 spin_lock(&block_group->lock);
3322                 block_group->disk_cache_state = BTRFS_DC_WRITTEN;
3323                 spin_unlock(&block_group->lock);
3324                 return 0;
3325         }
3326
3327         if (trans->aborted)
3328                 return 0;
3329 again:
3330         inode = lookup_free_space_inode(root, block_group, path);
3331         if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
3332                 ret = PTR_ERR(inode);
3333                 btrfs_release_path(path);
3334                 goto out;
3335         }
3336
3337         if (IS_ERR(inode)) {
3338                 BUG_ON(retries);
3339                 retries++;
3340
3341                 if (block_group->ro)
3342                         goto out_free;
3343
3344                 ret = create_free_space_inode(root, trans, block_group, path);
3345                 if (ret)
3346                         goto out_free;
3347                 goto again;
3348         }
3349
3350         /* We've already setup this transaction, go ahead and exit */
3351         if (block_group->cache_generation == trans->transid &&
3352             i_size_read(inode)) {
3353                 dcs = BTRFS_DC_SETUP;
3354                 goto out_put;
3355         }
3356
3357         /*
3358          * We want to set the generation to 0, that way if anything goes wrong
3359          * from here on out we know not to trust this cache when we load up next
3360          * time.
3361          */
3362         BTRFS_I(inode)->generation = 0;
3363         ret = btrfs_update_inode(trans, root, inode);
3364         if (ret) {
3365                 /*
3366                  * So theoretically we could recover from this, simply set the
3367                  * super cache generation to 0 so we know to invalidate the
3368                  * cache, but then we'd have to keep track of the block groups
3369                  * that fail this way so we know we _have_ to reset this cache
3370                  * before the next commit or risk reading stale cache.  So to
3371                  * limit our exposure to horrible edge cases lets just abort the
3372                  * transaction, this only happens in really bad situations
3373                  * anyway.
3374                  */
3375                 btrfs_abort_transaction(trans, root, ret);
3376                 goto out_put;
3377         }
3378         WARN_ON(ret);
3379
3380         if (i_size_read(inode) > 0) {
3381                 ret = btrfs_check_trunc_cache_free_space(root,
3382                                         &root->fs_info->global_block_rsv);
3383                 if (ret)
3384                         goto out_put;
3385
3386                 ret = btrfs_truncate_free_space_cache(root, trans, NULL, inode);
3387                 if (ret)
3388                         goto out_put;
3389         }
3390
3391         spin_lock(&block_group->lock);
3392         if (block_group->cached != BTRFS_CACHE_FINISHED ||
3393             !btrfs_test_opt(root, SPACE_CACHE)) {
3394                 /*
3395                  * don't bother trying to write stuff out _if_
3396                  * a) we're not cached,
3397                  * b) we're with nospace_cache mount option.
3398                  */
3399                 dcs = BTRFS_DC_WRITTEN;
3400                 spin_unlock(&block_group->lock);
3401                 goto out_put;
3402         }
3403         spin_unlock(&block_group->lock);
3404
3405         /*
3406          * Try to preallocate enough space based on how big the block group is.
3407          * Keep in mind this has to include any pinned space which could end up
3408          * taking up quite a bit since it's not folded into the other space
3409          * cache.
3410          */
3411         num_pages = div_u64(block_group->key.offset, 256 * 1024 * 1024);
3412         if (!num_pages)
3413                 num_pages = 1;
3414
3415         num_pages *= 16;
3416         num_pages *= PAGE_CACHE_SIZE;
3417
3418         ret = btrfs_check_data_free_space(inode, num_pages, num_pages);
3419         if (ret)
3420                 goto out_put;
3421
3422         ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
3423                                               num_pages, num_pages,
3424                                               &alloc_hint);
3425         if (!ret)
3426                 dcs = BTRFS_DC_SETUP;
3427         btrfs_free_reserved_data_space(inode, num_pages);
3428
3429 out_put:
3430         iput(inode);
3431 out_free:
3432         btrfs_release_path(path);
3433 out:
3434         spin_lock(&block_group->lock);
3435         if (!ret && dcs == BTRFS_DC_SETUP)
3436                 block_group->cache_generation = trans->transid;
3437         block_group->disk_cache_state = dcs;
3438         spin_unlock(&block_group->lock);
3439
3440         return ret;
3441 }
3442
3443 int btrfs_setup_space_cache(struct btrfs_trans_handle *trans,
3444                             struct btrfs_root *root)
3445 {
3446         struct btrfs_block_group_cache *cache, *tmp;
3447         struct btrfs_transaction *cur_trans = trans->transaction;
3448         struct btrfs_path *path;
3449
3450         if (list_empty(&cur_trans->dirty_bgs) ||
3451             !btrfs_test_opt(root, SPACE_CACHE))
3452                 return 0;
3453
3454         path = btrfs_alloc_path();
3455         if (!path)
3456                 return -ENOMEM;
3457
3458         /* Could add new block groups, use _safe just in case */
3459         list_for_each_entry_safe(cache, tmp, &cur_trans->dirty_bgs,
3460                                  dirty_list) {
3461                 if (cache->disk_cache_state == BTRFS_DC_CLEAR)
3462                         cache_save_setup(cache, trans, path);
3463         }
3464
3465         btrfs_free_path(path);
3466         return 0;
3467 }
3468
3469 /*
3470  * transaction commit does final block group cache writeback during a
3471  * critical section where nothing is allowed to change the FS.  This is
3472  * required in order for the cache to actually match the block group,
3473  * but can introduce a lot of latency into the commit.
3474  *
3475  * So, btrfs_start_dirty_block_groups is here to kick off block group
3476  * cache IO.  There's a chance we'll have to redo some of it if the
3477  * block group changes again during the commit, but it greatly reduces
3478  * the commit latency by getting rid of the easy block groups while
3479  * we're still allowing others to join the commit.
3480  */
3481 int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans,
3482                                    struct btrfs_root *root)
3483 {
3484         struct btrfs_block_group_cache *cache;
3485         struct btrfs_transaction *cur_trans = trans->transaction;
3486         int ret = 0;
3487         int should_put;
3488         struct btrfs_path *path = NULL;
3489         LIST_HEAD(dirty);
3490         struct list_head *io = &cur_trans->io_bgs;
3491         int num_started = 0;
3492         int loops = 0;
3493
3494         spin_lock(&cur_trans->dirty_bgs_lock);
3495         if (list_empty(&cur_trans->dirty_bgs)) {
3496                 spin_unlock(&cur_trans->dirty_bgs_lock);
3497                 return 0;
3498         }
3499         list_splice_init(&cur_trans->dirty_bgs, &dirty);
3500         spin_unlock(&cur_trans->dirty_bgs_lock);
3501
3502 again:
3503         /*
3504          * make sure all the block groups on our dirty list actually
3505          * exist
3506          */
3507         btrfs_create_pending_block_groups(trans, root);
3508
3509         if (!path) {
3510                 path = btrfs_alloc_path();
3511                 if (!path)
3512                         return -ENOMEM;
3513         }
3514
3515         /*
3516          * cache_write_mutex is here only to save us from balance or automatic
3517          * removal of empty block groups deleting this block group while we are
3518          * writing out the cache
3519          */
3520         mutex_lock(&trans->transaction->cache_write_mutex);
3521         while (!list_empty(&dirty)) {
3522                 cache = list_first_entry(&dirty,
3523                                          struct btrfs_block_group_cache,
3524                                          dirty_list);
3525                 /*
3526                  * this can happen if something re-dirties a block
3527                  * group that is already under IO.  Just wait for it to
3528                  * finish and then do it all again
3529                  */
3530                 if (!list_empty(&cache->io_list)) {
3531                         list_del_init(&cache->io_list);
3532                         btrfs_wait_cache_io(root, trans, cache,
3533                                             &cache->io_ctl, path,
3534                                             cache->key.objectid);
3535                         btrfs_put_block_group(cache);
3536                 }
3537
3538
3539                 /*
3540                  * btrfs_wait_cache_io uses the cache->dirty_list to decide
3541                  * if it should update the cache_state.  Don't delete
3542                  * until after we wait.
3543                  *
3544                  * Since we're not running in the commit critical section
3545                  * we need the dirty_bgs_lock to protect from update_block_group
3546                  */
3547                 spin_lock(&cur_trans->dirty_bgs_lock);
3548                 list_del_init(&cache->dirty_list);
3549                 spin_unlock(&cur_trans->dirty_bgs_lock);
3550
3551                 should_put = 1;
3552
3553                 cache_save_setup(cache, trans, path);
3554
3555                 if (cache->disk_cache_state == BTRFS_DC_SETUP) {
3556                         cache->io_ctl.inode = NULL;
3557                         ret = btrfs_write_out_cache(root, trans, cache, path);
3558                         if (ret == 0 && cache->io_ctl.inode) {
3559                                 num_started++;
3560                                 should_put = 0;
3561
3562                                 /*
3563                                  * the cache_write_mutex is protecting
3564                                  * the io_list
3565                                  */
3566                                 list_add_tail(&cache->io_list, io);
3567                         } else {
3568                                 /*
3569                                  * if we failed to write the cache, the
3570                                  * generation will be bad and life goes on
3571                                  */
3572                                 ret = 0;
3573                         }
3574                 }
3575                 if (!ret) {
3576                         ret = write_one_cache_group(trans, root, path, cache);
3577                         /*
3578                          * Our block group might still be attached to the list
3579                          * of new block groups in the transaction handle of some
3580                          * other task (struct btrfs_trans_handle->new_bgs). This
3581                          * means its block group item isn't yet in the extent
3582                          * tree. If this happens ignore the error, as we will
3583                          * try again later in the critical section of the
3584                          * transaction commit.
3585                          */
3586                         if (ret == -ENOENT) {
3587                                 ret = 0;
3588                                 spin_lock(&cur_trans->dirty_bgs_lock);
3589                                 if (list_empty(&cache->dirty_list)) {
3590                                         list_add_tail(&cache->dirty_list,
3591                                                       &cur_trans->dirty_bgs);
3592                                         btrfs_get_block_group(cache);
3593                                 }
3594                                 spin_unlock(&cur_trans->dirty_bgs_lock);
3595                         } else if (ret) {
3596                                 btrfs_abort_transaction(trans, root, ret);
3597                         }
3598                 }
3599
3600                 /* if its not on the io list, we need to put the block group */
3601                 if (should_put)
3602                         btrfs_put_block_group(cache);
3603
3604                 if (ret)
3605                         break;
3606
3607                 /*
3608                  * Avoid blocking other tasks for too long. It might even save
3609                  * us from writing caches for block groups that are going to be
3610                  * removed.
3611                  */
3612                 mutex_unlock(&trans->transaction->cache_write_mutex);
3613                 mutex_lock(&trans->transaction->cache_write_mutex);
3614         }
3615         mutex_unlock(&trans->transaction->cache_write_mutex);
3616
3617         /*
3618          * go through delayed refs for all the stuff we've just kicked off
3619          * and then loop back (just once)
3620          */
3621         ret = btrfs_run_delayed_refs(trans, root, 0);
3622         if (!ret && loops == 0) {
3623                 loops++;
3624                 spin_lock(&cur_trans->dirty_bgs_lock);
3625                 list_splice_init(&cur_trans->dirty_bgs, &dirty);
3626                 /*
3627                  * dirty_bgs_lock protects us from concurrent block group
3628                  * deletes too (not just cache_write_mutex).
3629                  */
3630                 if (!list_empty(&dirty)) {
3631                         spin_unlock(&cur_trans->dirty_bgs_lock);
3632                         goto again;
3633                 }
3634                 spin_unlock(&cur_trans->dirty_bgs_lock);
3635         }
3636
3637         btrfs_free_path(path);
3638         return ret;
3639 }
3640
3641 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
3642                                    struct btrfs_root *root)
3643 {
3644         struct btrfs_block_group_cache *cache;
3645         struct btrfs_transaction *cur_trans = trans->transaction;
3646         int ret = 0;
3647         int should_put;
3648         struct btrfs_path *path;
3649         struct list_head *io = &cur_trans->io_bgs;
3650         int num_started = 0;
3651
3652         path = btrfs_alloc_path();
3653         if (!path)
3654                 return -ENOMEM;
3655
3656         /*
3657          * We don't need the lock here since we are protected by the transaction
3658          * commit.  We want to do the cache_save_setup first and then run the
3659          * delayed refs to make sure we have the best chance at doing this all
3660          * in one shot.
3661          */
3662         while (!list_empty(&cur_trans->dirty_bgs)) {
3663                 cache = list_first_entry(&cur_trans->dirty_bgs,
3664                                          struct btrfs_block_group_cache,
3665                                          dirty_list);
3666
3667                 /*
3668                  * this can happen if cache_save_setup re-dirties a block
3669                  * group that is already under IO.  Just wait for it to
3670                  * finish and then do it all again
3671                  */
3672                 if (!list_empty(&cache->io_list)) {
3673                         list_del_init(&cache->io_list);
3674                         btrfs_wait_cache_io(root, trans, cache,
3675                                             &cache->io_ctl, path,
3676                                             cache->key.objectid);
3677                         btrfs_put_block_group(cache);
3678                 }
3679
3680                 /*
3681                  * don't remove from the dirty list until after we've waited
3682                  * on any pending IO
3683                  */
3684                 list_del_init(&cache->dirty_list);
3685                 should_put = 1;
3686
3687                 cache_save_setup(cache, trans, path);
3688
3689                 if (!ret)
3690                         ret = btrfs_run_delayed_refs(trans, root, (unsigned long) -1);
3691
3692                 if (!ret && cache->disk_cache_state == BTRFS_DC_SETUP) {
3693                         cache->io_ctl.inode = NULL;
3694                         ret = btrfs_write_out_cache(root, trans, cache, path);
3695                         if (ret == 0 && cache->io_ctl.inode) {
3696                                 num_started++;
3697                                 should_put = 0;
3698                                 list_add_tail(&cache->io_list, io);
3699                         } else {
3700                                 /*
3701                                  * if we failed to write the cache, the
3702                                  * generation will be bad and life goes on
3703                                  */
3704                                 ret = 0;
3705                         }
3706                 }
3707                 if (!ret) {
3708                         ret = write_one_cache_group(trans, root, path, cache);
3709                         if (ret)
3710                                 btrfs_abort_transaction(trans, root, ret);
3711                 }
3712
3713                 /* if its not on the io list, we need to put the block group */
3714                 if (should_put)
3715                         btrfs_put_block_group(cache);
3716         }
3717
3718         while (!list_empty(io)) {
3719                 cache = list_first_entry(io, struct btrfs_block_group_cache,
3720                                          io_list);
3721                 list_del_init(&cache->io_list);
3722                 btrfs_wait_cache_io(root, trans, cache,
3723                                     &cache->io_ctl, path, cache->key.objectid);
3724                 btrfs_put_block_group(cache);
3725         }
3726
3727         btrfs_free_path(path);
3728         return ret;
3729 }
3730
3731 int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
3732 {
3733         struct btrfs_block_group_cache *block_group;
3734         int readonly = 0;
3735
3736         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
3737         if (!block_group || block_group->ro)
3738                 readonly = 1;
3739         if (block_group)
3740                 btrfs_put_block_group(block_group);
3741         return readonly;
3742 }
3743
3744 static const char *alloc_name(u64 flags)
3745 {
3746         switch (flags) {
3747         case BTRFS_BLOCK_GROUP_METADATA|BTRFS_BLOCK_GROUP_DATA:
3748                 return "mixed";
3749         case BTRFS_BLOCK_GROUP_METADATA:
3750                 return "metadata";
3751         case BTRFS_BLOCK_GROUP_DATA:
3752                 return "data";
3753         case BTRFS_BLOCK_GROUP_SYSTEM:
3754                 return "system";
3755         default:
3756                 WARN_ON(1);
3757                 return "invalid-combination";
3758         };
3759 }
3760
3761 static int update_space_info(struct btrfs_fs_info *info, u64 flags,
3762                              u64 total_bytes, u64 bytes_used,
3763                              struct btrfs_space_info **space_info)
3764 {
3765         struct btrfs_space_info *found;
3766         int i;
3767         int factor;
3768         int ret;
3769
3770         if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
3771                      BTRFS_BLOCK_GROUP_RAID10))
3772                 factor = 2;
3773         else
3774                 factor = 1;
3775
3776         found = __find_space_info(info, flags);
3777         if (found) {
3778                 spin_lock(&found->lock);
3779                 found->total_bytes += total_bytes;
3780                 found->disk_total += total_bytes * factor;
3781                 found->bytes_used += bytes_used;
3782                 found->disk_used += bytes_used * factor;
3783                 if (total_bytes > 0)
3784                         found->full = 0;
3785                 spin_unlock(&found->lock);
3786                 *space_info = found;
3787                 return 0;
3788         }
3789         found = kzalloc(sizeof(*found), GFP_NOFS);
3790         if (!found)
3791                 return -ENOMEM;
3792
3793         ret = percpu_counter_init(&found->total_bytes_pinned, 0, GFP_KERNEL);
3794         if (ret) {
3795                 kfree(found);
3796                 return ret;
3797         }
3798
3799         for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
3800                 INIT_LIST_HEAD(&found->block_groups[i]);
3801         init_rwsem(&found->groups_sem);
3802         spin_lock_init(&found->lock);
3803         found->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
3804         found->total_bytes = total_bytes;
3805         found->disk_total = total_bytes * factor;
3806         found->bytes_used = bytes_used;
3807         found->disk_used = bytes_used * factor;
3808         found->bytes_pinned = 0;
3809         found->bytes_reserved = 0;
3810         found->bytes_readonly = 0;
3811         found->bytes_may_use = 0;
3812         found->full = 0;
3813         found->force_alloc = CHUNK_ALLOC_NO_FORCE;
3814         found->chunk_alloc = 0;
3815         found->flush = 0;
3816         init_waitqueue_head(&found->wait);
3817         INIT_LIST_HEAD(&found->ro_bgs);
3818
3819         ret = kobject_init_and_add(&found->kobj, &space_info_ktype,
3820                                     info->space_info_kobj, "%s",
3821                                     alloc_name(found->flags));
3822         if (ret) {
3823                 kfree(found);
3824                 return ret;
3825         }
3826
3827         *space_info = found;
3828         list_add_rcu(&found->list, &info->space_info);
3829         if (flags & BTRFS_BLOCK_GROUP_DATA)
3830                 info->data_sinfo = found;
3831
3832         return ret;
3833 }
3834
3835 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
3836 {
3837         u64 extra_flags = chunk_to_extended(flags) &
3838                                 BTRFS_EXTENDED_PROFILE_MASK;
3839
3840         write_seqlock(&fs_info->profiles_lock);
3841         if (flags & BTRFS_BLOCK_GROUP_DATA)
3842                 fs_info->avail_data_alloc_bits |= extra_flags;
3843         if (flags & BTRFS_BLOCK_GROUP_METADATA)
3844                 fs_info->avail_metadata_alloc_bits |= extra_flags;
3845         if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3846                 fs_info->avail_system_alloc_bits |= extra_flags;
3847         write_sequnlock(&fs_info->profiles_lock);
3848 }
3849
3850 /*
3851  * returns target flags in extended format or 0 if restripe for this
3852  * chunk_type is not in progress
3853  *
3854  * should be called with either volume_mutex or balance_lock held
3855  */
3856 static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
3857 {
3858         struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3859         u64 target = 0;
3860
3861         if (!bctl)
3862                 return 0;
3863
3864         if (flags & BTRFS_BLOCK_GROUP_DATA &&
3865             bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3866                 target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
3867         } else if (flags & BTRFS_BLOCK_GROUP_SYSTEM &&
3868                    bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3869                 target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
3870         } else if (flags & BTRFS_BLOCK_GROUP_METADATA &&
3871                    bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3872                 target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
3873         }
3874
3875         return target;
3876 }
3877
3878 /*
3879  * @flags: available profiles in extended format (see ctree.h)
3880  *
3881  * Returns reduced profile in chunk format.  If profile changing is in
3882  * progress (either running or paused) picks the target profile (if it's
3883  * already available), otherwise falls back to plain reducing.
3884  */
3885 static u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
3886 {
3887         u64 num_devices = root->fs_info->fs_devices->rw_devices;
3888         u64 target;
3889         u64 raid_type;
3890         u64 allowed = 0;
3891
3892         /*
3893          * see if restripe for this chunk_type is in progress, if so
3894          * try to reduce to the target profile
3895          */
3896         spin_lock(&root->fs_info->balance_lock);
3897         target = get_restripe_target(root->fs_info, flags);
3898         if (target) {
3899                 /* pick target profile only if it's already available */
3900                 if ((flags & target) & BTRFS_EXTENDED_PROFILE_MASK) {
3901                         spin_unlock(&root->fs_info->balance_lock);
3902                         return extended_to_chunk(target);
3903                 }
3904         }
3905         spin_unlock(&root->fs_info->balance_lock);
3906
3907         /* First, mask out the RAID levels which aren't possible */
3908         for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) {
3909                 if (num_devices >= btrfs_raid_array[raid_type].devs_min)
3910                         allowed |= btrfs_raid_group[raid_type];
3911         }
3912         allowed &= flags;
3913
3914         if (allowed & BTRFS_BLOCK_GROUP_RAID6)
3915                 allowed = BTRFS_BLOCK_GROUP_RAID6;
3916         else if (allowed & BTRFS_BLOCK_GROUP_RAID5)
3917                 allowed = BTRFS_BLOCK_GROUP_RAID5;
3918         else if (allowed & BTRFS_BLOCK_GROUP_RAID10)
3919                 allowed = BTRFS_BLOCK_GROUP_RAID10;
3920         else if (allowed & BTRFS_BLOCK_GROUP_RAID1)
3921                 allowed = BTRFS_BLOCK_GROUP_RAID1;
3922         else if (allowed & BTRFS_BLOCK_GROUP_RAID0)
3923                 allowed = BTRFS_BLOCK_GROUP_RAID0;
3924
3925         flags &= ~BTRFS_BLOCK_GROUP_PROFILE_MASK;
3926
3927         return extended_to_chunk(flags | allowed);
3928 }
3929
3930 static u64 get_alloc_profile(struct btrfs_root *root, u64 orig_flags)
3931 {
3932         unsigned seq;
3933         u64 flags;
3934
3935         do {
3936                 flags = orig_flags;
3937                 seq = read_seqbegin(&root->fs_info->profiles_lock);
3938
3939                 if (flags & BTRFS_BLOCK_GROUP_DATA)
3940                         flags |= root->fs_info->avail_data_alloc_bits;
3941                 else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3942                         flags |= root->fs_info->avail_system_alloc_bits;
3943                 else if (flags & BTRFS_BLOCK_GROUP_METADATA)
3944                         flags |= root->fs_info->avail_metadata_alloc_bits;
3945         } while (read_seqretry(&root->fs_info->profiles_lock, seq));
3946
3947         return btrfs_reduce_alloc_profile(root, flags);
3948 }
3949
3950 u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
3951 {
3952         u64 flags;
3953         u64 ret;
3954
3955         if (data)
3956                 flags = BTRFS_BLOCK_GROUP_DATA;
3957         else if (root == root->fs_info->chunk_root)
3958                 flags = BTRFS_BLOCK_GROUP_SYSTEM;
3959         else
3960                 flags = BTRFS_BLOCK_GROUP_METADATA;
3961
3962         ret = get_alloc_profile(root, flags);
3963         return ret;
3964 }
3965
3966 /*
3967  * This will check the space that the inode allocates from to make sure we have
3968  * enough space for bytes.
3969  */
3970 int btrfs_check_data_free_space(struct inode *inode, u64 bytes, u64 write_bytes)
3971 {
3972         struct btrfs_space_info *data_sinfo;
3973         struct btrfs_root *root = BTRFS_I(inode)->root;
3974         struct btrfs_fs_info *fs_info = root->fs_info;
3975         u64 used;
3976         int ret = 0;
3977         int need_commit = 2;
3978         int have_pinned_space;
3979
3980         /* make sure bytes are sectorsize aligned */
3981         bytes = ALIGN(bytes, root->sectorsize);
3982
3983         if (btrfs_is_free_space_inode(inode)) {
3984                 need_commit = 0;
3985                 ASSERT(current->journal_info);
3986         }
3987
3988         data_sinfo = fs_info->data_sinfo;
3989         if (!data_sinfo)
3990                 goto alloc;
3991
3992 again:
3993         /* make sure we have enough space to handle the data first */
3994         spin_lock(&data_sinfo->lock);
3995         used = data_sinfo->bytes_used + data_sinfo->bytes_reserved +
3996                 data_sinfo->bytes_pinned + data_sinfo->bytes_readonly +
3997                 data_sinfo->bytes_may_use;
3998
3999         if (used + bytes > data_sinfo->total_bytes) {
4000                 struct btrfs_trans_handle *trans;
4001
4002                 /*
4003                  * if we don't have enough free bytes in this space then we need
4004                  * to alloc a new chunk.
4005                  */
4006                 if (!data_sinfo->full) {
4007                         u64 alloc_target;
4008
4009                         data_sinfo->force_alloc = CHUNK_ALLOC_FORCE;
4010                         spin_unlock(&data_sinfo->lock);
4011 alloc:
4012                         alloc_target = btrfs_get_alloc_profile(root, 1);
4013                         /*
4014                          * It is ugly that we don't call nolock join
4015                          * transaction for the free space inode case here.
4016                          * But it is safe because we only do the data space
4017                          * reservation for the free space cache in the
4018                          * transaction context, the common join transaction
4019                          * just increase the counter of the current transaction
4020                          * handler, doesn't try to acquire the trans_lock of
4021                          * the fs.
4022                          */
4023                         trans = btrfs_join_transaction(root);
4024                         if (IS_ERR(trans))
4025                                 return PTR_ERR(trans);
4026
4027                         ret = do_chunk_alloc(trans, root->fs_info->extent_root,
4028                                              alloc_target,
4029                                              CHUNK_ALLOC_NO_FORCE);
4030                         btrfs_end_transaction(trans, root);
4031                         if (ret < 0) {
4032                                 if (ret != -ENOSPC)
4033                                         return ret;
4034                                 else {
4035                                         have_pinned_space = 1;
4036                                         goto commit_trans;
4037                                 }
4038                         }
4039
4040                         if (!data_sinfo)
4041                                 data_sinfo = fs_info->data_sinfo;
4042
4043                         goto again;
4044                 }
4045
4046                 /*
4047                  * If we don't have enough pinned space to deal with this
4048                  * allocation, and no removed chunk in current transaction,
4049                  * don't bother committing the transaction.
4050                  */
4051                 have_pinned_space = percpu_counter_compare(
4052                         &data_sinfo->total_bytes_pinned,
4053                         used + bytes - data_sinfo->total_bytes);
4054                 spin_unlock(&data_sinfo->lock);
4055
4056                 /* commit the current transaction and try again */
4057 commit_trans:
4058                 if (need_commit &&
4059                     !atomic_read(&root->fs_info->open_ioctl_trans)) {
4060                         need_commit--;
4061
4062                         if (need_commit > 0)
4063                                 btrfs_wait_ordered_roots(fs_info, -1);
4064
4065                         trans = btrfs_join_transaction(root);
4066                         if (IS_ERR(trans))
4067                                 return PTR_ERR(trans);
4068                         if (have_pinned_space >= 0 ||
4069                             trans->transaction->have_free_bgs ||
4070                             need_commit > 0) {
4071                                 ret = btrfs_commit_transaction(trans, root);
4072                                 if (ret)
4073                                         return ret;
4074                                 /*
4075                                  * make sure that all running delayed iput are
4076                                  * done
4077                                  */
4078                                 down_write(&root->fs_info->delayed_iput_sem);
4079                                 up_write(&root->fs_info->delayed_iput_sem);
4080                                 goto again;
4081                         } else {
4082                                 btrfs_end_transaction(trans, root);
4083                         }
4084                 }
4085
4086                 trace_btrfs_space_reservation(root->fs_info,
4087                                               "space_info:enospc",
4088                                               data_sinfo->flags, bytes, 1);
4089                 return -ENOSPC;
4090         }
4091         ret = btrfs_qgroup_reserve(root, write_bytes);
4092         if (ret)
4093                 goto out;
4094         data_sinfo->bytes_may_use += bytes;
4095         trace_btrfs_space_reservation(root->fs_info, "space_info",
4096                                       data_sinfo->flags, bytes, 1);
4097 out:
4098         spin_unlock(&data_sinfo->lock);
4099
4100         return ret;
4101 }
4102
4103 /*
4104  * Called if we need to clear a data reservation for this inode.
4105  */
4106 void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes)
4107 {
4108         struct btrfs_root *root = BTRFS_I(inode)->root;
4109         struct btrfs_space_info *data_sinfo;
4110
4111         /* make sure bytes are sectorsize aligned */
4112         bytes = ALIGN(bytes, root->sectorsize);
4113
4114         data_sinfo = root->fs_info->data_sinfo;
4115         spin_lock(&data_sinfo->lock);
4116         WARN_ON(data_sinfo->bytes_may_use < bytes);
4117         data_sinfo->bytes_may_use -= bytes;
4118         trace_btrfs_space_reservation(root->fs_info, "space_info",
4119                                       data_sinfo->flags, bytes, 0);
4120         spin_unlock(&data_sinfo->lock);
4121 }
4122
4123 static void force_metadata_allocation(struct btrfs_fs_info *info)
4124 {
4125         struct list_head *head = &info->space_info;
4126         struct btrfs_space_info *found;
4127
4128         rcu_read_lock();
4129         list_for_each_entry_rcu(found, head, list) {
4130                 if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
4131                         found->force_alloc = CHUNK_ALLOC_FORCE;
4132         }
4133         rcu_read_unlock();
4134 }
4135
4136 static inline u64 calc_global_rsv_need_space(struct btrfs_block_rsv *global)
4137 {
4138         return (global->size << 1);
4139 }
4140
4141 static int should_alloc_chunk(struct btrfs_root *root,
4142                               struct btrfs_space_info *sinfo, int force)
4143 {
4144         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
4145         u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly;
4146         u64 num_allocated = sinfo->bytes_used + sinfo->bytes_reserved;
4147         u64 thresh;
4148
4149         if (force == CHUNK_ALLOC_FORCE)
4150                 return 1;
4151
4152         /*
4153          * We need to take into account the global rsv because for all intents
4154          * and purposes it's used space.  Don't worry about locking the
4155          * global_rsv, it doesn't change except when the transaction commits.
4156          */
4157         if (sinfo->flags & BTRFS_BLOCK_GROUP_METADATA)
4158                 num_allocated += calc_global_rsv_need_space(global_rsv);
4159
4160         /*
4161          * in limited mode, we want to have some free space up to
4162          * about 1% of the FS size.
4163          */
4164         if (force == CHUNK_ALLOC_LIMITED) {
4165                 thresh = btrfs_super_total_bytes(root->fs_info->super_copy);
4166                 thresh = max_t(u64, 64 * 1024 * 1024,
4167                                div_factor_fine(thresh, 1));
4168
4169                 if (num_bytes - num_allocated < thresh)
4170                         return 1;
4171         }
4172
4173         if (num_allocated + 2 * 1024 * 1024 < div_factor(num_bytes, 8))
4174                 return 0;
4175         return 1;
4176 }
4177
4178 static u64 get_profile_num_devs(struct btrfs_root *root, u64 type)
4179 {
4180         u64 num_dev;
4181
4182         if (type & (BTRFS_BLOCK_GROUP_RAID10 |
4183                     BTRFS_BLOCK_GROUP_RAID0 |
4184                     BTRFS_BLOCK_GROUP_RAID5 |
4185                     BTRFS_BLOCK_GROUP_RAID6))
4186                 num_dev = root->fs_info->fs_devices->rw_devices;
4187         else if (type & BTRFS_BLOCK_GROUP_RAID1)
4188                 num_dev = 2;
4189         else
4190                 num_dev = 1;    /* DUP or single */
4191
4192         return num_dev;
4193 }
4194
4195 /*
4196  * If @is_allocation is true, reserve space in the system space info necessary
4197  * for allocating a chunk, otherwise if it's false, reserve space necessary for
4198  * removing a chunk.
4199  */
4200 void check_system_chunk(struct btrfs_trans_handle *trans,
4201                         struct btrfs_root *root,
4202                         u64 type)
4203 {
4204         struct btrfs_space_info *info;
4205         u64 left;
4206         u64 thresh;
4207         int ret = 0;
4208         u64 num_devs;
4209
4210         /*
4211          * Needed because we can end up allocating a system chunk and for an
4212          * atomic and race free space reservation in the chunk block reserve.
4213          */
4214         ASSERT(mutex_is_locked(&root->fs_info->chunk_mutex));
4215
4216         info = __find_space_info(root->fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
4217         spin_lock(&info->lock);
4218         left = info->total_bytes - info->bytes_used - info->bytes_pinned -
4219                 info->bytes_reserved - info->bytes_readonly -
4220                 info->bytes_may_use;
4221         spin_unlock(&info->lock);
4222
4223         num_devs = get_profile_num_devs(root, type);
4224
4225         /* num_devs device items to update and 1 chunk item to add or remove */
4226         thresh = btrfs_calc_trunc_metadata_size(root, num_devs) +
4227                 btrfs_calc_trans_metadata_size(root, 1);
4228
4229         if (left < thresh && btrfs_test_opt(root, ENOSPC_DEBUG)) {
4230                 btrfs_info(root->fs_info, "left=%llu, need=%llu, flags=%llu",
4231                         left, thresh, type);
4232                 dump_space_info(info, 0, 0);
4233         }
4234
4235         if (left < thresh) {
4236                 u64 flags;
4237
4238                 flags = btrfs_get_alloc_profile(root->fs_info->chunk_root, 0);
4239                 /*
4240                  * Ignore failure to create system chunk. We might end up not
4241                  * needing it, as we might not need to COW all nodes/leafs from
4242                  * the paths we visit in the chunk tree (they were already COWed
4243                  * or created in the current transaction for example).
4244                  */
4245                 ret = btrfs_alloc_chunk(trans, root, flags);
4246         }
4247
4248         if (!ret) {
4249                 ret = btrfs_block_rsv_add(root->fs_info->chunk_root,
4250                                           &root->fs_info->chunk_block_rsv,
4251                                           thresh, BTRFS_RESERVE_NO_FLUSH);
4252                 if (!ret)
4253                         trans->chunk_bytes_reserved += thresh;
4254         }
4255 }
4256
4257 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
4258                           struct btrfs_root *extent_root, u64 flags, int force)
4259 {
4260         struct btrfs_space_info *space_info;
4261         struct btrfs_fs_info *fs_info = extent_root->fs_info;
4262         int wait_for_alloc = 0;
4263         int ret = 0;
4264
4265         /* Don't re-enter if we're already allocating a chunk */
4266         if (trans->allocating_chunk)
4267                 return -ENOSPC;
4268
4269         space_info = __find_space_info(extent_root->fs_info, flags);
4270         if (!space_info) {
4271                 ret = update_space_info(extent_root->fs_info, flags,
4272                                         0, 0, &space_info);
4273                 BUG_ON(ret); /* -ENOMEM */
4274         }
4275         BUG_ON(!space_info); /* Logic error */
4276
4277 again:
4278         spin_lock(&space_info->lock);
4279         if (force < space_info->force_alloc)
4280                 force = space_info->force_alloc;
4281         if (space_info->full) {
4282                 if (should_alloc_chunk(extent_root, space_info, force))
4283                         ret = -ENOSPC;
4284                 else
4285                         ret = 0;
4286                 spin_unlock(&space_info->lock);
4287                 return ret;
4288         }
4289
4290         if (!should_alloc_chunk(extent_root, space_info, force)) {
4291                 spin_unlock(&space_info->lock);
4292                 return 0;
4293         } else if (space_info->chunk_alloc) {
4294                 wait_for_alloc = 1;
4295         } else {
4296                 space_info->chunk_alloc = 1;
4297         }
4298
4299         spin_unlock(&space_info->lock);
4300
4301         mutex_lock(&fs_info->chunk_mutex);
4302
4303         /*
4304          * The chunk_mutex is held throughout the entirety of a chunk
4305          * allocation, so once we've acquired the chunk_mutex we know that the
4306          * other guy is done and we need to recheck and see if we should
4307          * allocate.
4308          */
4309         if (wait_for_alloc) {
4310                 mutex_unlock(&fs_info->chunk_mutex);
4311                 wait_for_alloc = 0;
4312                 goto again;
4313         }
4314
4315         trans->allocating_chunk = true;
4316
4317         /*
4318          * If we have mixed data/metadata chunks we want to make sure we keep
4319          * allocating mixed chunks instead of individual chunks.
4320          */
4321         if (btrfs_mixed_space_info(space_info))
4322                 flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA);
4323
4324         /*
4325          * if we're doing a data chunk, go ahead and make sure that
4326          * we keep a reasonable number of metadata chunks allocated in the
4327          * FS as well.
4328          */
4329         if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
4330                 fs_info->data_chunk_allocations++;
4331                 if (!(fs_info->data_chunk_allocations %
4332                       fs_info->metadata_ratio))
4333                         force_metadata_allocation(fs_info);
4334         }
4335
4336         /*
4337          * Check if we have enough space in SYSTEM chunk because we may need
4338          * to update devices.
4339          */
4340         check_system_chunk(trans, extent_root, flags);
4341
4342         ret = btrfs_alloc_chunk(trans, extent_root, flags);
4343         trans->allocating_chunk = false;
4344
4345         spin_lock(&space_info->lock);
4346         if (ret < 0 && ret != -ENOSPC)
4347                 goto out;
4348         if (ret)
4349                 space_info->full = 1;
4350         else
4351                 ret = 1;
4352
4353         space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
4354 out:
4355         space_info->chunk_alloc = 0;
4356         spin_unlock(&space_info->lock);
4357         mutex_unlock(&fs_info->chunk_mutex);
4358         /*
4359          * When we allocate a new chunk we reserve space in the chunk block
4360          * reserve to make sure we can COW nodes/leafs in the chunk tree or
4361          * add new nodes/leafs to it if we end up needing to do it when
4362          * inserting the chunk item and updating device items as part of the
4363          * second phase of chunk allocation, performed by
4364          * btrfs_finish_chunk_alloc(). So make sure we don't accumulate a
4365          * large number of new block groups to create in our transaction
4366          * handle's new_bgs list to avoid exhausting the chunk block reserve
4367          * in extreme cases - like having a single transaction create many new
4368          * block groups when starting to write out the free space caches of all
4369          * the block groups that were made dirty during the lifetime of the
4370          * transaction.
4371          */
4372         if (trans->can_flush_pending_bgs &&
4373             trans->chunk_bytes_reserved >= (2 * 1024 * 1024ull)) {
4374                 btrfs_create_pending_block_groups(trans, trans->root);
4375                 btrfs_trans_release_chunk_metadata(trans);
4376         }
4377         return ret;
4378 }
4379
4380 static int can_overcommit(struct btrfs_root *root,
4381                           struct btrfs_space_info *space_info, u64 bytes,
4382                           enum btrfs_reserve_flush_enum flush)
4383 {
4384         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
4385         u64 profile = btrfs_get_alloc_profile(root, 0);
4386         u64 space_size;
4387         u64 avail;
4388         u64 used;
4389
4390         used = space_info->bytes_used + space_info->bytes_reserved +
4391                 space_info->bytes_pinned + space_info->bytes_readonly;
4392
4393         /*
4394          * We only want to allow over committing if we have lots of actual space
4395          * free, but if we don't have enough space to handle the global reserve
4396          * space then we could end up having a real enospc problem when trying
4397          * to allocate a chunk or some other such important allocation.
4398          */
4399         spin_lock(&global_rsv->lock);
4400         space_size = calc_global_rsv_need_space(global_rsv);
4401         spin_unlock(&global_rsv->lock);
4402         if (used + space_size >= space_info->total_bytes)
4403                 return 0;
4404
4405         used += space_info->bytes_may_use;
4406
4407         spin_lock(&root->fs_info->free_chunk_lock);
4408         avail = root->fs_info->free_chunk_space;
4409         spin_unlock(&root->fs_info->free_chunk_lock);
4410
4411         /*
4412          * If we have dup, raid1 or raid10 then only half of the free
4413          * space is actually useable.  For raid56, the space info used
4414          * doesn't include the parity drive, so we don't have to
4415          * change the math
4416          */
4417         if (profile & (BTRFS_BLOCK_GROUP_DUP |
4418                        BTRFS_BLOCK_GROUP_RAID1 |
4419                        BTRFS_BLOCK_GROUP_RAID10))
4420                 avail >>= 1;
4421
4422         /*
4423          * If we aren't flushing all things, let us overcommit up to
4424          * 1/2th of the space. If we can flush, don't let us overcommit
4425          * too much, let it overcommit up to 1/8 of the space.
4426          */
4427         if (flush == BTRFS_RESERVE_FLUSH_ALL)
4428                 avail >>= 3;
4429         else
4430                 avail >>= 1;
4431
4432         if (used + bytes < space_info->total_bytes + avail)
4433                 return 1;
4434         return 0;
4435 }
4436
4437 static void btrfs_writeback_inodes_sb_nr(struct btrfs_root *root,
4438                                          unsigned long nr_pages, int nr_items)
4439 {
4440         struct super_block *sb = root->fs_info->sb;
4441
4442         if (down_read_trylock(&sb->s_umount)) {
4443                 writeback_inodes_sb_nr(sb, nr_pages, WB_REASON_FS_FREE_SPACE);
4444                 up_read(&sb->s_umount);
4445         } else {
4446                 /*
4447                  * We needn't worry the filesystem going from r/w to r/o though
4448                  * we don't acquire ->s_umount mutex, because the filesystem
4449                  * should guarantee the delalloc inodes list be empty after
4450                  * the filesystem is readonly(all dirty pages are written to
4451                  * the disk).
4452                  */
4453                 btrfs_start_delalloc_roots(root->fs_info, 0, nr_items);
4454                 if (!current->journal_info)
4455                         btrfs_wait_ordered_roots(root->fs_info, nr_items);
4456         }
4457 }
4458
4459 static inline int calc_reclaim_items_nr(struct btrfs_root *root, u64 to_reclaim)
4460 {
4461         u64 bytes;
4462         int nr;
4463
4464         bytes = btrfs_calc_trans_metadata_size(root, 1);
4465         nr = (int)div64_u64(to_reclaim, bytes);
4466         if (!nr)
4467                 nr = 1;
4468         return nr;
4469 }
4470
4471 #define EXTENT_SIZE_PER_ITEM    (256 * 1024)
4472
4473 /*
4474  * shrink metadata reservation for delalloc
4475  */
4476 static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
4477                             bool wait_ordered)
4478 {
4479         struct btrfs_block_rsv *block_rsv;
4480         struct btrfs_space_info *space_info;
4481         struct btrfs_trans_handle *trans;
4482         u64 delalloc_bytes;
4483         u64 max_reclaim;
4484         long time_left;
4485         unsigned long nr_pages;
4486         int loops;
4487         int items;
4488         enum btrfs_reserve_flush_enum flush;
4489
4490         /* Calc the number of the pages we need flush for space reservation */
4491         items = calc_reclaim_items_nr(root, to_reclaim);
4492         to_reclaim = items * EXTENT_SIZE_PER_ITEM;
4493
4494         trans = (struct btrfs_trans_handle *)current->journal_info;
4495         block_rsv = &root->fs_info->delalloc_block_rsv;
4496         space_info = block_rsv->space_info;
4497
4498         delalloc_bytes = percpu_counter_sum_positive(
4499                                                 &root->fs_info->delalloc_bytes);
4500         if (delalloc_bytes == 0) {
4501                 if (trans)
4502                         return;
4503                 if (wait_ordered)
4504                         btrfs_wait_ordered_roots(root->fs_info, items);
4505                 return;
4506         }
4507
4508         loops = 0;
4509         while (delalloc_bytes && loops < 3) {
4510                 max_reclaim = min(delalloc_bytes, to_reclaim);
4511                 nr_pages = max_reclaim >> PAGE_CACHE_SHIFT;
4512                 btrfs_writeback_inodes_sb_nr(root, nr_pages, items);
4513                 /*
4514                  * We need to wait for the async pages to actually start before
4515                  * we do anything.
4516                  */
4517                 max_reclaim = atomic_read(&root->fs_info->async_delalloc_pages);
4518                 if (!max_reclaim)
4519                         goto skip_async;
4520
4521                 if (max_reclaim <= nr_pages)
4522                         max_reclaim = 0;
4523                 else
4524                         max_reclaim -= nr_pages;
4525
4526                 wait_event(root->fs_info->async_submit_wait,
4527                            atomic_read(&root->fs_info->async_delalloc_pages) <=
4528                            (int)max_reclaim);
4529 skip_async:
4530                 if (!trans)
4531                         flush = BTRFS_RESERVE_FLUSH_ALL;
4532                 else
4533                         flush = BTRFS_RESERVE_NO_FLUSH;
4534                 spin_lock(&space_info->lock);
4535                 if (can_overcommit(root, space_info, orig, flush)) {
4536                         spin_unlock(&space_info->lock);
4537                         break;
4538                 }
4539                 spin_unlock(&space_info->lock);
4540
4541                 loops++;
4542                 if (wait_ordered && !trans) {
4543                         btrfs_wait_ordered_roots(root->fs_info, items);
4544                 } else {
4545                         time_left = schedule_timeout_killable(1);
4546                         if (time_left)
4547                                 break;
4548                 }
4549                 delalloc_bytes = percpu_counter_sum_positive(
4550                                                 &root->fs_info->delalloc_bytes);
4551         }
4552 }
4553
4554 /**
4555  * maybe_commit_transaction - possibly commit the transaction if its ok to
4556  * @root - the root we're allocating for
4557  * @bytes - the number of bytes we want to reserve
4558  * @force - force the commit
4559  *
4560  * This will check to make sure that committing the transaction will actually
4561  * get us somewhere and then commit the transaction if it does.  Otherwise it
4562  * will return -ENOSPC.
4563  */
4564 static int may_commit_transaction(struct btrfs_root *root,
4565                                   struct btrfs_space_info *space_info,
4566                                   u64 bytes, int force)
4567 {
4568         struct btrfs_block_rsv *delayed_rsv = &root->fs_info->delayed_block_rsv;
4569         struct btrfs_trans_handle *trans;
4570
4571         trans = (struct btrfs_trans_handle *)current->journal_info;
4572         if (trans)
4573                 return -EAGAIN;
4574
4575         if (force)
4576                 goto commit;
4577
4578         /* See if there is enough pinned space to make this reservation */
4579         if (percpu_counter_compare(&space_info->total_bytes_pinned,
4580                                    bytes) >= 0)
4581                 goto commit;
4582
4583         /*
4584          * See if there is some space in the delayed insertion reservation for
4585          * this reservation.
4586          */
4587         if (space_info != delayed_rsv->space_info)
4588                 return -ENOSPC;
4589
4590         spin_lock(&delayed_rsv->lock);
4591         if (percpu_counter_compare(&space_info->total_bytes_pinned,
4592                                    bytes - delayed_rsv->size) >= 0) {
4593                 spin_unlock(&delayed_rsv->lock);
4594                 return -ENOSPC;
4595         }
4596         spin_unlock(&delayed_rsv->lock);
4597
4598 commit:
4599         trans = btrfs_join_transaction(root);
4600         if (IS_ERR(trans))
4601                 return -ENOSPC;
4602
4603         return btrfs_commit_transaction(trans, root);
4604 }
4605
4606 enum flush_state {
4607         FLUSH_DELAYED_ITEMS_NR  =       1,
4608         FLUSH_DELAYED_ITEMS     =       2,
4609         FLUSH_DELALLOC          =       3,
4610         FLUSH_DELALLOC_WAIT     =       4,
4611         ALLOC_CHUNK             =       5,
4612         COMMIT_TRANS            =       6,
4613 };
4614
4615 static int flush_space(struct btrfs_root *root,
4616                        struct btrfs_space_info *space_info, u64 num_bytes,
4617                        u64 orig_bytes, int state)
4618 {
4619         struct btrfs_trans_handle *trans;
4620         int nr;
4621         int ret = 0;
4622
4623         switch (state) {
4624         case FLUSH_DELAYED_ITEMS_NR:
4625         case FLUSH_DELAYED_ITEMS:
4626                 if (state == FLUSH_DELAYED_ITEMS_NR)
4627                         nr = calc_reclaim_items_nr(root, num_bytes) * 2;
4628                 else
4629                         nr = -1;
4630
4631                 trans = btrfs_join_transaction(root);
4632                 if (IS_ERR(trans)) {
4633                         ret = PTR_ERR(trans);
4634                         break;
4635                 }
4636                 ret = btrfs_run_delayed_items_nr(trans, root, nr);
4637                 btrfs_end_transaction(trans, root);
4638                 break;
4639         case FLUSH_DELALLOC:
4640         case FLUSH_DELALLOC_WAIT:
4641                 shrink_delalloc(root, num_bytes * 2, orig_bytes,
4642                                 state == FLUSH_DELALLOC_WAIT);
4643                 break;
4644         case ALLOC_CHUNK:
4645                 trans = btrfs_join_transaction(root);
4646                 if (IS_ERR(trans)) {
4647                         ret = PTR_ERR(trans);
4648                         break;
4649                 }
4650                 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
4651                                      btrfs_get_alloc_profile(root, 0),
4652                                      CHUNK_ALLOC_NO_FORCE);
4653                 btrfs_end_transaction(trans, root);
4654                 if (ret == -ENOSPC)
4655                         ret = 0;
4656                 break;
4657         case COMMIT_TRANS:
4658                 ret = may_commit_transaction(root, space_info, orig_bytes, 0);
4659                 break;
4660         default:
4661                 ret = -ENOSPC;
4662                 break;
4663         }
4664
4665         return ret;
4666 }
4667
4668 static inline u64
4669 btrfs_calc_reclaim_metadata_size(struct btrfs_root *root,
4670                                  struct btrfs_space_info *space_info)
4671 {
4672         u64 used;
4673         u64 expected;
4674         u64 to_reclaim;
4675
4676         to_reclaim = min_t(u64, num_online_cpus() * 1024 * 1024,
4677                                 16 * 1024 * 1024);
4678         spin_lock(&space_info->lock);
4679         if (can_overcommit(root, space_info, to_reclaim,
4680                            BTRFS_RESERVE_FLUSH_ALL)) {
4681                 to_reclaim = 0;
4682                 goto out;
4683         }
4684
4685         used = space_info->bytes_used + space_info->bytes_reserved +
4686                space_info->bytes_pinned + space_info->bytes_readonly +
4687                space_info->bytes_may_use;
4688         if (can_overcommit(root, space_info, 1024 * 1024,
4689                            BTRFS_RESERVE_FLUSH_ALL))
4690                 expected = div_factor_fine(space_info->total_bytes, 95);
4691         else
4692                 expected = div_factor_fine(space_info->total_bytes, 90);
4693
4694         if (used > expected)
4695                 to_reclaim = used - expected;
4696         else
4697                 to_reclaim = 0;
4698         to_reclaim = min(to_reclaim, space_info->bytes_may_use +
4699                                      space_info->bytes_reserved);
4700 out:
4701         spin_unlock(&space_info->lock);
4702
4703         return to_reclaim;
4704 }
4705
4706 static inline int need_do_async_reclaim(struct btrfs_space_info *space_info,
4707                                         struct btrfs_fs_info *fs_info, u64 used)
4708 {
4709         u64 thresh = div_factor_fine(space_info->total_bytes, 98);
4710
4711         /* If we're just plain full then async reclaim just slows us down. */
4712         if (space_info->bytes_used >= thresh)
4713                 return 0;
4714
4715         return (used >= thresh && !btrfs_fs_closing(fs_info) &&
4716                 !test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state));
4717 }
4718
4719 static int btrfs_need_do_async_reclaim(struct btrfs_space_info *space_info,
4720                                        struct btrfs_fs_info *fs_info,
4721                                        int flush_state)
4722 {
4723         u64 used;
4724
4725         spin_lock(&space_info->lock);
4726         /*
4727          * We run out of space and have not got any free space via flush_space,
4728          * so don't bother doing async reclaim.
4729          */
4730         if (flush_state > COMMIT_TRANS && space_info->full) {
4731                 spin_unlock(&space_info->lock);
4732                 return 0;
4733         }
4734
4735         used = space_info->bytes_used + space_info->bytes_reserved +
4736                space_info->bytes_pinned + space_info->bytes_readonly +
4737                space_info->bytes_may_use;
4738         if (need_do_async_reclaim(space_info, fs_info, used)) {
4739                 spin_unlock(&space_info->lock);
4740                 return 1;
4741         }
4742         spin_unlock(&space_info->lock);
4743
4744         return 0;
4745 }
4746
4747 static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
4748 {
4749         struct btrfs_fs_info *fs_info;
4750         struct btrfs_space_info *space_info;
4751         u64 to_reclaim;
4752         int flush_state;
4753
4754         fs_info = container_of(work, struct btrfs_fs_info, async_reclaim_work);
4755         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4756
4757         to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info->fs_root,
4758                                                       space_info);
4759         if (!to_reclaim)
4760                 return;
4761
4762         flush_state = FLUSH_DELAYED_ITEMS_NR;
4763         do {
4764                 flush_space(fs_info->fs_root, space_info, to_reclaim,
4765                             to_reclaim, flush_state);
4766                 flush_state++;
4767                 if (!btrfs_need_do_async_reclaim(space_info, fs_info,
4768                                                  flush_state))
4769                         return;
4770         } while (flush_state < COMMIT_TRANS);
4771 }
4772
4773 void btrfs_init_async_reclaim_work(struct work_struct *work)
4774 {
4775         INIT_WORK(work, btrfs_async_reclaim_metadata_space);
4776 }
4777
4778 /**
4779  * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
4780  * @root - the root we're allocating for
4781  * @block_rsv - the block_rsv we're allocating for
4782  * @orig_bytes - the number of bytes we want
4783  * @flush - whether or not we can flush to make our reservation
4784  *
4785  * This will reserve orgi_bytes number of bytes from the space info associated
4786  * with the block_rsv.  If there is not enough space it will make an attempt to
4787  * flush out space to make room.  It will do this by flushing delalloc if
4788  * possible or committing the transaction.  If flush is 0 then no attempts to
4789  * regain reservations will be made and this will fail if there is not enough
4790  * space already.
4791  */
4792 static int reserve_metadata_bytes(struct btrfs_root *root,
4793                                   struct btrfs_block_rsv *block_rsv,
4794                                   u64 orig_bytes,
4795                                   enum btrfs_reserve_flush_enum flush)
4796 {
4797         struct btrfs_space_info *space_info = block_rsv->space_info;
4798         u64 used;
4799         u64 num_bytes = orig_bytes;
4800         int flush_state = FLUSH_DELAYED_ITEMS_NR;
4801         int ret = 0;
4802         bool flushing = false;
4803
4804 again:
4805         ret = 0;
4806         spin_lock(&space_info->lock);
4807         /*
4808          * We only want to wait if somebody other than us is flushing and we
4809          * are actually allowed to flush all things.
4810          */
4811         while (flush == BTRFS_RESERVE_FLUSH_ALL && !flushing &&
4812                space_info->flush) {
4813                 spin_unlock(&space_info->lock);
4814                 /*
4815                  * If we have a trans handle we can't wait because the flusher
4816                  * may have to commit the transaction, which would mean we would
4817                  * deadlock since we are waiting for the flusher to finish, but
4818                  * hold the current transaction open.
4819                  */
4820                 if (current->journal_info)
4821                         return -EAGAIN;
4822                 ret = wait_event_killable(space_info->wait, !space_info->flush);
4823                 /* Must have been killed, return */
4824                 if (ret)
4825                         return -EINTR;
4826
4827                 spin_lock(&space_info->lock);
4828         }
4829
4830         ret = -ENOSPC;
4831         used = space_info->bytes_used + space_info->bytes_reserved +
4832                 space_info->bytes_pinned + space_info->bytes_readonly +
4833                 space_info->bytes_may_use;
4834
4835         /*
4836          * The idea here is that we've not already over-reserved the block group
4837          * then we can go ahead and save our reservation first and then start
4838          * flushing if we need to.  Otherwise if we've already overcommitted
4839          * lets start flushing stuff first and then come back and try to make
4840          * our reservation.
4841          */
4842         if (used <= space_info->total_bytes) {
4843                 if (used + orig_bytes <= space_info->total_bytes) {
4844                         space_info->bytes_may_use += orig_bytes;
4845                         trace_btrfs_space_reservation(root->fs_info,
4846                                 "space_info", space_info->flags, orig_bytes, 1);
4847                         ret = 0;
4848                 } else {
4849                         /*
4850                          * Ok set num_bytes to orig_bytes since we aren't
4851                          * overocmmitted, this way we only try and reclaim what
4852                          * we need.
4853                          */
4854                         num_bytes = orig_bytes;
4855                 }
4856         } else {
4857                 /*
4858                  * Ok we're over committed, set num_bytes to the overcommitted
4859                  * amount plus the amount of bytes that we need for this
4860                  * reservation.
4861                  */
4862                 num_bytes = used - space_info->total_bytes +
4863                         (orig_bytes * 2);
4864         }
4865
4866         if (ret && can_overcommit(root, space_info, orig_bytes, flush)) {
4867                 space_info->bytes_may_use += orig_bytes;
4868                 trace_btrfs_space_reservation(root->fs_info, "space_info",
4869                                               space_info->flags, orig_bytes,
4870                                               1);
4871                 ret = 0;
4872         }
4873
4874         /*
4875          * Couldn't make our reservation, save our place so while we're trying
4876          * to reclaim space we can actually use it instead of somebody else
4877          * stealing it from us.
4878          *
4879          * We make the other tasks wait for the flush only when we can flush
4880          * all things.
4881          */
4882         if (ret && flush != BTRFS_RESERVE_NO_FLUSH) {
4883                 flushing = true;
4884                 space_info->flush = 1;
4885         } else if (!ret && space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
4886                 used += orig_bytes;
4887                 /*
4888                  * We will do the space reservation dance during log replay,
4889                  * which means we won't have fs_info->fs_root set, so don't do
4890                  * the async reclaim as we will panic.
4891                  */
4892                 if (!root->fs_info->log_root_recovering &&
4893                     need_do_async_reclaim(space_info, root->fs_info, used) &&
4894                     !work_busy(&root->fs_info->async_reclaim_work))
4895                         queue_work(system_unbound_wq,
4896                                    &root->fs_info->async_reclaim_work);
4897         }
4898         spin_unlock(&space_info->lock);
4899
4900         if (!ret || flush == BTRFS_RESERVE_NO_FLUSH)
4901                 goto out;
4902
4903         ret = flush_space(root, space_info, num_bytes, orig_bytes,
4904                           flush_state);
4905         flush_state++;
4906
4907         /*
4908          * If we are FLUSH_LIMIT, we can not flush delalloc, or the deadlock
4909          * would happen. So skip delalloc flush.
4910          */
4911         if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
4912             (flush_state == FLUSH_DELALLOC ||
4913              flush_state == FLUSH_DELALLOC_WAIT))
4914                 flush_state = ALLOC_CHUNK;
4915
4916         if (!ret)
4917                 goto again;
4918         else if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
4919                  flush_state < COMMIT_TRANS)
4920                 goto again;
4921         else if (flush == BTRFS_RESERVE_FLUSH_ALL &&
4922                  flush_state <= COMMIT_TRANS)
4923                 goto again;
4924
4925 out:
4926         if (ret == -ENOSPC &&
4927             unlikely(root->orphan_cleanup_state == ORPHAN_CLEANUP_STARTED)) {
4928                 struct btrfs_block_rsv *global_rsv =
4929                         &root->fs_info->global_block_rsv;
4930
4931                 if (block_rsv != global_rsv &&
4932                     !block_rsv_use_bytes(global_rsv, orig_bytes))
4933                         ret = 0;
4934         }
4935         if (ret == -ENOSPC)
4936                 trace_btrfs_space_reservation(root->fs_info,
4937                                               "space_info:enospc",
4938                                               space_info->flags, orig_bytes, 1);
4939         if (flushing) {
4940                 spin_lock(&space_info->lock);
4941                 space_info->flush = 0;
4942                 wake_up_all(&space_info->wait);
4943                 spin_unlock(&space_info->lock);
4944         }
4945         return ret;
4946 }
4947
4948 static struct btrfs_block_rsv *get_block_rsv(
4949                                         const struct btrfs_trans_handle *trans,
4950                                         const struct btrfs_root *root)
4951 {
4952         struct btrfs_block_rsv *block_rsv = NULL;
4953
4954         if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
4955             (root == root->fs_info->csum_root && trans->adding_csums) ||
4956              (root == root->fs_info->uuid_root))
4957                 block_rsv = trans->block_rsv;
4958
4959         if (!block_rsv)
4960                 block_rsv = root->block_rsv;
4961
4962         if (!block_rsv)
4963                 block_rsv = &root->fs_info->empty_block_rsv;
4964
4965         return block_rsv;
4966 }
4967
4968 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
4969                                u64 num_bytes)
4970 {
4971         int ret = -ENOSPC;
4972         spin_lock(&block_rsv->lock);
4973         if (block_rsv->reserved >= num_bytes) {
4974                 block_rsv->reserved -= num_bytes;
4975                 if (block_rsv->reserved < block_rsv->size)
4976                         block_rsv->full = 0;
4977                 ret = 0;
4978         }
4979         spin_unlock(&block_rsv->lock);
4980         return ret;
4981 }
4982
4983 static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
4984                                 u64 num_bytes, int update_size)
4985 {
4986         spin_lock(&block_rsv->lock);
4987         block_rsv->reserved += num_bytes;
4988         if (update_size)
4989                 block_rsv->size += num_bytes;
4990         else if (block_rsv->reserved >= block_rsv->size)
4991                 block_rsv->full = 1;
4992         spin_unlock(&block_rsv->lock);
4993 }
4994
4995 int btrfs_cond_migrate_bytes(struct btrfs_fs_info *fs_info,
4996                              struct btrfs_block_rsv *dest, u64 num_bytes,
4997                              int min_factor)
4998 {
4999         struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
5000         u64 min_bytes;
5001
5002         if (global_rsv->space_info != dest->space_info)
5003                 return -ENOSPC;
5004
5005         spin_lock(&global_rsv->lock);
5006         min_bytes = div_factor(global_rsv->size, min_factor);
5007         if (global_rsv->reserved < min_bytes + num_bytes) {
5008                 spin_unlock(&global_rsv->lock);
5009                 return -ENOSPC;
5010         }
5011         global_rsv->reserved -= num_bytes;
5012         if (global_rsv->reserved < global_rsv->size)
5013                 global_rsv->full = 0;
5014         spin_unlock(&global_rsv->lock);
5015
5016         block_rsv_add_bytes(dest, num_bytes, 1);
5017         return 0;
5018 }
5019
5020 static void block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
5021                                     struct btrfs_block_rsv *block_rsv,
5022                                     struct btrfs_block_rsv *dest, u64 num_bytes)
5023 {
5024         struct btrfs_space_info *space_info = block_rsv->space_info;
5025
5026         spin_lock(&block_rsv->lock);
5027         if (num_bytes == (u64)-1)
5028                 num_bytes = block_rsv->size;
5029         block_rsv->size -= num_bytes;
5030         if (block_rsv->reserved >= block_rsv->size) {
5031                 num_bytes = block_rsv->reserved - block_rsv->size;
5032                 block_rsv->reserved = block_rsv->size;
5033                 block_rsv->full = 1;
5034         } else {
5035                 num_bytes = 0;
5036         }
5037         spin_unlock(&block_rsv->lock);
5038
5039         if (num_bytes > 0) {
5040                 if (dest) {
5041                         spin_lock(&dest->lock);
5042                         if (!dest->full) {
5043                                 u64 bytes_to_add;
5044
5045                                 bytes_to_add = dest->size - dest->reserved;
5046                                 bytes_to_add = min(num_bytes, bytes_to_add);
5047                                 dest->reserved += bytes_to_add;
5048                                 if (dest->reserved >= dest->size)
5049                                         dest->full = 1;
5050                                 num_bytes -= bytes_to_add;
5051                         }
5052                         spin_unlock(&dest->lock);
5053                 }
5054                 if (num_bytes) {
5055                         spin_lock(&space_info->lock);
5056                         space_info->bytes_may_use -= num_bytes;
5057                         trace_btrfs_space_reservation(fs_info, "space_info",
5058                                         space_info->flags, num_bytes, 0);
5059                         spin_unlock(&space_info->lock);
5060                 }
5061         }
5062 }
5063
5064 static int block_rsv_migrate_bytes(struct btrfs_block_rsv *src,
5065                                    struct btrfs_block_rsv *dst, u64 num_bytes)
5066 {
5067         int ret;
5068
5069         ret = block_rsv_use_bytes(src, num_bytes);
5070         if (ret)
5071                 return ret;
5072
5073         block_rsv_add_bytes(dst, num_bytes, 1);
5074         return 0;
5075 }
5076
5077 void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type)
5078 {
5079         memset(rsv, 0, sizeof(*rsv));
5080         spin_lock_init(&rsv->lock);
5081         rsv->type = type;
5082 }
5083
5084 struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root,
5085                                               unsigned short type)
5086 {
5087         struct btrfs_block_rsv *block_rsv;
5088         struct btrfs_fs_info *fs_info = root->fs_info;
5089
5090         block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS);
5091         if (!block_rsv)
5092                 return NULL;
5093
5094         btrfs_init_block_rsv(block_rsv, type);
5095         block_rsv->space_info = __find_space_info(fs_info,
5096                                                   BTRFS_BLOCK_GROUP_METADATA);
5097         return block_rsv;
5098 }
5099
5100 void btrfs_free_block_rsv(struct btrfs_root *root,
5101                           struct btrfs_block_rsv *rsv)
5102 {
5103         if (!rsv)
5104                 return;
5105         btrfs_block_rsv_release(root, rsv, (u64)-1);
5106         kfree(rsv);
5107 }
5108
5109 void __btrfs_free_block_rsv(struct btrfs_block_rsv *rsv)
5110 {
5111         kfree(rsv);
5112 }
5113
5114 int btrfs_block_rsv_add(struct btrfs_root *root,
5115                         struct btrfs_block_rsv *block_rsv, u64 num_bytes,
5116                         enum btrfs_reserve_flush_enum flush)
5117 {
5118         int ret;
5119
5120         if (num_bytes == 0)
5121                 return 0;
5122
5123         ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
5124         if (!ret) {
5125                 block_rsv_add_bytes(block_rsv, num_bytes, 1);
5126                 return 0;
5127         }
5128
5129         return ret;
5130 }
5131
5132 int btrfs_block_rsv_check(struct btrfs_root *root,
5133                           struct btrfs_block_rsv *block_rsv, int min_factor)
5134 {
5135         u64 num_bytes = 0;
5136         int ret = -ENOSPC;
5137
5138         if (!block_rsv)
5139                 return 0;
5140
5141         spin_lock(&block_rsv->lock);
5142         num_bytes = div_factor(block_rsv->size, min_factor);
5143         if (block_rsv->reserved >= num_bytes)
5144                 ret = 0;
5145         spin_unlock(&block_rsv->lock);
5146
5147         return ret;
5148 }
5149
5150 int btrfs_block_rsv_refill(struct btrfs_root *root,
5151                            struct btrfs_block_rsv *block_rsv, u64 min_reserved,
5152                            enum btrfs_reserve_flush_enum flush)
5153 {
5154         u64 num_bytes = 0;
5155         int ret = -ENOSPC;
5156
5157         if (!block_rsv)
5158                 return 0;
5159
5160         spin_lock(&block_rsv->lock);
5161         num_bytes = min_reserved;
5162         if (block_rsv->reserved >= num_bytes)
5163                 ret = 0;
5164         else
5165                 num_bytes -= block_rsv->reserved;
5166         spin_unlock(&block_rsv->lock);
5167
5168         if (!ret)
5169                 return 0;
5170
5171         ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
5172         if (!ret) {
5173                 block_rsv_add_bytes(block_rsv, num_bytes, 0);
5174                 return 0;
5175         }
5176
5177         return ret;
5178 }
5179
5180 int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
5181                             struct btrfs_block_rsv *dst_rsv,
5182                             u64 num_bytes)
5183 {
5184         return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
5185 }
5186
5187 void btrfs_block_rsv_release(struct btrfs_root *root,
5188                              struct btrfs_block_rsv *block_rsv,
5189                              u64 num_bytes)
5190 {
5191         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
5192         if (global_rsv == block_rsv ||
5193             block_rsv->space_info != global_rsv->space_info)
5194                 global_rsv = NULL;
5195         block_rsv_release_bytes(root->fs_info, block_rsv, global_rsv,
5196                                 num_bytes);
5197 }
5198
5199 /*
5200  * helper to calculate size of global block reservation.
5201  * the desired value is sum of space used by extent tree,
5202  * checksum tree and root tree
5203  */
5204 static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info)
5205 {
5206         struct btrfs_space_info *sinfo;
5207         u64 num_bytes;
5208         u64 meta_used;
5209         u64 data_used;
5210         int csum_size = btrfs_super_csum_size(fs_info->super_copy);
5211
5212         sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
5213         spin_lock(&sinfo->lock);
5214         data_used = sinfo->bytes_used;
5215         spin_unlock(&sinfo->lock);
5216
5217         sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
5218         spin_lock(&sinfo->lock);
5219         if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA)
5220                 data_used = 0;
5221         meta_used = sinfo->bytes_used;
5222         spin_unlock(&sinfo->lock);
5223
5224         num_bytes = (data_used >> fs_info->sb->s_blocksize_bits) *
5225                     csum_size * 2;
5226         num_bytes += div_u64(data_used + meta_used, 50);
5227
5228         if (num_bytes * 3 > meta_used)
5229                 num_bytes = div_u64(meta_used, 3);
5230
5231         return ALIGN(num_bytes, fs_info->extent_root->nodesize << 10);
5232 }
5233
5234 static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
5235 {
5236         struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
5237         struct btrfs_space_info *sinfo = block_rsv->space_info;
5238         u64 num_bytes;
5239
5240         num_bytes = calc_global_metadata_size(fs_info);
5241
5242         spin_lock(&sinfo->lock);
5243         spin_lock(&block_rsv->lock);
5244
5245         block_rsv->size = min_t(u64, num_bytes, 512 * 1024 * 1024);
5246
5247         num_bytes = sinfo->bytes_used + sinfo->bytes_pinned +
5248                     sinfo->bytes_reserved + sinfo->bytes_readonly +
5249                     sinfo->bytes_may_use;
5250
5251         if (sinfo->total_bytes > num_bytes) {
5252                 num_bytes = sinfo->total_bytes - num_bytes;
5253                 block_rsv->reserved += num_bytes;
5254                 sinfo->bytes_may_use += num_bytes;
5255                 trace_btrfs_space_reservation(fs_info, "space_info",
5256                                       sinfo->flags, num_bytes, 1);
5257         }
5258
5259         if (block_rsv->reserved >= block_rsv->size) {
5260                 num_bytes = block_rsv->reserved - block_rsv->size;
5261                 sinfo->bytes_may_use -= num_bytes;
5262                 trace_btrfs_space_reservation(fs_info, "space_info",
5263                                       sinfo->flags, num_bytes, 0);
5264                 block_rsv->reserved = block_rsv->size;
5265                 block_rsv->full = 1;
5266         }
5267
5268         spin_unlock(&block_rsv->lock);
5269         spin_unlock(&sinfo->lock);
5270 }
5271
5272 static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
5273 {
5274         struct btrfs_space_info *space_info;
5275
5276         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
5277         fs_info->chunk_block_rsv.space_info = space_info;
5278
5279         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
5280         fs_info->global_block_rsv.space_info = space_info;
5281         fs_info->delalloc_block_rsv.space_info = space_info;
5282         fs_info->trans_block_rsv.space_info = space_info;
5283         fs_info->empty_block_rsv.space_info = space_info;
5284         fs_info->delayed_block_rsv.space_info = space_info;
5285
5286         fs_info->extent_root->block_rsv = &fs_info->global_block_rsv;
5287         fs_info->csum_root->block_rsv = &fs_info->global_block_rsv;
5288         fs_info->dev_root->block_rsv = &fs_info->global_block_rsv;
5289         fs_info->tree_root->block_rsv = &fs_info->global_block_rsv;
5290         if (fs_info->quota_root)
5291                 fs_info->quota_root->block_rsv = &fs_info->global_block_rsv;
5292         fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv;
5293
5294         update_global_block_rsv(fs_info);
5295 }
5296
5297 static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
5298 {
5299         block_rsv_release_bytes(fs_info, &fs_info->global_block_rsv, NULL,
5300                                 (u64)-1);
5301         WARN_ON(fs_info->delalloc_block_rsv.size > 0);
5302         WARN_ON(fs_info->delalloc_block_rsv.reserved > 0);
5303         WARN_ON(fs_info->trans_block_rsv.size > 0);
5304         WARN_ON(fs_info->trans_block_rsv.reserved > 0);
5305         WARN_ON(fs_info->chunk_block_rsv.size > 0);
5306         WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
5307         WARN_ON(fs_info->delayed_block_rsv.size > 0);
5308         WARN_ON(fs_info->delayed_block_rsv.reserved > 0);
5309 }
5310
5311 void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
5312                                   struct btrfs_root *root)
5313 {
5314         if (!trans->block_rsv)
5315                 return;
5316
5317         if (!trans->bytes_reserved)
5318                 return;
5319
5320         trace_btrfs_space_reservation(root->fs_info, "transaction",
5321                                       trans->transid, trans->bytes_reserved, 0);
5322         btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
5323         trans->bytes_reserved = 0;
5324 }
5325
5326 /*
5327  * To be called after all the new block groups attached to the transaction
5328  * handle have been created (btrfs_create_pending_block_groups()).
5329  */
5330 void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans)
5331 {
5332         struct btrfs_fs_info *fs_info = trans->root->fs_info;
5333
5334         if (!trans->chunk_bytes_reserved)
5335                 return;
5336
5337         WARN_ON_ONCE(!list_empty(&trans->new_bgs));
5338
5339         block_rsv_release_bytes(fs_info, &fs_info->chunk_block_rsv, NULL,
5340                                 trans->chunk_bytes_reserved);
5341         trans->chunk_bytes_reserved = 0;
5342 }
5343
5344 /* Can only return 0 or -ENOSPC */
5345 int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
5346                                   struct inode *inode)
5347 {
5348         struct btrfs_root *root = BTRFS_I(inode)->root;
5349         struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
5350         struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv;
5351
5352         /*
5353          * We need to hold space in order to delete our orphan item once we've
5354          * added it, so this takes the reservation so we can release it later
5355          * when we are truly done with the orphan item.
5356          */
5357         u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
5358         trace_btrfs_space_reservation(root->fs_info, "orphan",
5359                                       btrfs_ino(inode), num_bytes, 1);
5360         return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
5361 }
5362
5363 void btrfs_orphan_release_metadata(struct inode *inode)
5364 {
5365         struct btrfs_root *root = BTRFS_I(inode)->root;
5366         u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
5367         trace_btrfs_space_reservation(root->fs_info, "orphan",
5368                                       btrfs_ino(inode), num_bytes, 0);
5369         btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes);
5370 }
5371
5372 /*
5373  * btrfs_subvolume_reserve_metadata() - reserve space for subvolume operation
5374  * root: the root of the parent directory
5375  * rsv: block reservation
5376  * items: the number of items that we need do reservation
5377  * qgroup_reserved: used to return the reserved size in qgroup
5378  *
5379  * This function is used to reserve the space for snapshot/subvolume
5380  * creation and deletion. Those operations are different with the
5381  * common file/directory operations, they change two fs/file trees
5382  * and root tree, the number of items that the qgroup reserves is
5383  * different with the free space reservation. So we can not use
5384  * the space reseravtion mechanism in start_transaction().
5385  */
5386 int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
5387                                      struct btrfs_block_rsv *rsv,
5388                                      int items,
5389                                      u64 *qgroup_reserved,
5390                                      bool use_global_rsv)
5391 {
5392         u64 num_bytes;
5393         int ret;
5394         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
5395
5396         if (root->fs_info->quota_enabled) {
5397                 /* One for parent inode, two for dir entries */
5398                 num_bytes = 3 * root->nodesize;
5399                 ret = btrfs_qgroup_reserve(root, num_bytes);
5400                 if (ret)
5401                         return ret;
5402         } else {
5403                 num_bytes = 0;
5404         }
5405
5406         *qgroup_reserved = num_bytes;
5407
5408         num_bytes = btrfs_calc_trans_metadata_size(root, items);
5409         rsv->space_info = __find_space_info(root->fs_info,
5410                                             BTRFS_BLOCK_GROUP_METADATA);
5411         ret = btrfs_block_rsv_add(root, rsv, num_bytes,
5412                                   BTRFS_RESERVE_FLUSH_ALL);
5413
5414         if (ret == -ENOSPC && use_global_rsv)
5415                 ret = btrfs_block_rsv_migrate(global_rsv, rsv, num_bytes);
5416
5417         if (ret) {
5418                 if (*qgroup_reserved)
5419                         btrfs_qgroup_free(root, *qgroup_reserved);
5420         }
5421
5422         return ret;
5423 }
5424
5425 void btrfs_subvolume_release_metadata(struct btrfs_root *root,
5426                                       struct btrfs_block_rsv *rsv,
5427                                       u64 qgroup_reserved)
5428 {
5429         btrfs_block_rsv_release(root, rsv, (u64)-1);
5430 }
5431
5432 /**
5433  * drop_outstanding_extent - drop an outstanding extent
5434  * @inode: the inode we're dropping the extent for
5435  * @num_bytes: the number of bytes we're relaseing.
5436  *
5437  * This is called when we are freeing up an outstanding extent, either called
5438  * after an error or after an extent is written.  This will return the number of
5439  * reserved extents that need to be freed.  This must be called with
5440  * BTRFS_I(inode)->lock held.
5441  */
5442 static unsigned drop_outstanding_extent(struct inode *inode, u64 num_bytes)
5443 {
5444         unsigned drop_inode_space = 0;
5445         unsigned dropped_extents = 0;
5446         unsigned num_extents = 0;
5447
5448         num_extents = (unsigned)div64_u64(num_bytes +
5449                                           BTRFS_MAX_EXTENT_SIZE - 1,
5450                                           BTRFS_MAX_EXTENT_SIZE);
5451         ASSERT(num_extents);
5452         ASSERT(BTRFS_I(inode)->outstanding_extents >= num_extents);
5453         BTRFS_I(inode)->outstanding_extents -= num_extents;
5454
5455         if (BTRFS_I(inode)->outstanding_extents == 0 &&
5456             test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
5457                                &BTRFS_I(inode)->runtime_flags))
5458                 drop_inode_space = 1;
5459
5460         /*
5461          * If we have more or the same amount of outsanding extents than we have
5462          * reserved then we need to leave the reserved extents count alone.
5463          */
5464         if (BTRFS_I(inode)->outstanding_extents >=
5465             BTRFS_I(inode)->reserved_extents)
5466                 return drop_inode_space;
5467
5468         dropped_extents = BTRFS_I(inode)->reserved_extents -
5469                 BTRFS_I(inode)->outstanding_extents;
5470         BTRFS_I(inode)->reserved_extents -= dropped_extents;
5471         return dropped_extents + drop_inode_space;
5472 }
5473
5474 /**
5475  * calc_csum_metadata_size - return the amount of metada space that must be
5476  *      reserved/free'd for the given bytes.
5477  * @inode: the inode we're manipulating
5478  * @num_bytes: the number of bytes in question
5479  * @reserve: 1 if we are reserving space, 0 if we are freeing space
5480  *
5481  * This adjusts the number of csum_bytes in the inode and then returns the
5482  * correct amount of metadata that must either be reserved or freed.  We
5483  * calculate how many checksums we can fit into one leaf and then divide the
5484  * number of bytes that will need to be checksumed by this value to figure out
5485  * how many checksums will be required.  If we are adding bytes then the number
5486  * may go up and we will return the number of additional bytes that must be
5487  * reserved.  If it is going down we will return the number of bytes that must
5488  * be freed.
5489  *
5490  * This must be called with BTRFS_I(inode)->lock held.
5491  */
5492 static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes,
5493                                    int reserve)
5494 {
5495         struct btrfs_root *root = BTRFS_I(inode)->root;
5496         u64 old_csums, num_csums;
5497
5498         if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM &&
5499             BTRFS_I(inode)->csum_bytes == 0)
5500                 return 0;
5501
5502         old_csums = btrfs_csum_bytes_to_leaves(root, BTRFS_I(inode)->csum_bytes);
5503         if (reserve)
5504                 BTRFS_I(inode)->csum_bytes += num_bytes;
5505         else
5506                 BTRFS_I(inode)->csum_bytes -= num_bytes;
5507         num_csums = btrfs_csum_bytes_to_leaves(root, BTRFS_I(inode)->csum_bytes);
5508
5509         /* No change, no need to reserve more */
5510         if (old_csums == num_csums)
5511                 return 0;
5512
5513         if (reserve)
5514                 return btrfs_calc_trans_metadata_size(root,
5515                                                       num_csums - old_csums);
5516
5517         return btrfs_calc_trans_metadata_size(root, old_csums - num_csums);
5518 }
5519
5520 int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
5521 {
5522         struct btrfs_root *root = BTRFS_I(inode)->root;
5523         struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
5524         u64 to_reserve = 0;
5525         u64 csum_bytes;
5526         unsigned nr_extents = 0;
5527         int extra_reserve = 0;
5528         enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_ALL;
5529         int ret = 0;
5530         bool delalloc_lock = true;
5531         u64 to_free = 0;
5532         unsigned dropped;
5533
5534         /* If we are a free space inode we need to not flush since we will be in
5535          * the middle of a transaction commit.  We also don't need the delalloc
5536          * mutex since we won't race with anybody.  We need this mostly to make
5537          * lockdep shut its filthy mouth.
5538          */
5539         if (btrfs_is_free_space_inode(inode)) {
5540                 flush = BTRFS_RESERVE_NO_FLUSH;
5541                 delalloc_lock = false;
5542         }
5543
5544         if (flush != BTRFS_RESERVE_NO_FLUSH &&
5545             btrfs_transaction_in_commit(root->fs_info))
5546                 schedule_timeout(1);
5547
5548         if (delalloc_lock)
5549                 mutex_lock(&BTRFS_I(inode)->delalloc_mutex);
5550
5551         num_bytes = ALIGN(num_bytes, root->sectorsize);
5552
5553         spin_lock(&BTRFS_I(inode)->lock);
5554         nr_extents = (unsigned)div64_u64(num_bytes +
5555                                          BTRFS_MAX_EXTENT_SIZE - 1,
5556                                          BTRFS_MAX_EXTENT_SIZE);
5557         BTRFS_I(inode)->outstanding_extents += nr_extents;
5558         nr_extents = 0;
5559
5560         if (BTRFS_I(inode)->outstanding_extents >
5561             BTRFS_I(inode)->reserved_extents)
5562                 nr_extents = BTRFS_I(inode)->outstanding_extents -
5563                         BTRFS_I(inode)->reserved_extents;
5564
5565         /*
5566          * Add an item to reserve for updating the inode when we complete the
5567          * delalloc io.
5568          */
5569         if (!test_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
5570                       &BTRFS_I(inode)->runtime_flags)) {
5571                 nr_extents++;
5572                 extra_reserve = 1;
5573         }
5574
5575         to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
5576         to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);
5577         csum_bytes = BTRFS_I(inode)->csum_bytes;
5578         spin_unlock(&BTRFS_I(inode)->lock);
5579
5580         if (root->fs_info->quota_enabled) {
5581                 ret = btrfs_qgroup_reserve(root, nr_extents * root->nodesize);
5582                 if (ret)
5583                         goto out_fail;
5584         }
5585
5586         ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush);
5587         if (unlikely(ret)) {
5588                 if (root->fs_info->quota_enabled)
5589                         btrfs_qgroup_free(root, nr_extents * root->nodesize);
5590                 goto out_fail;
5591         }
5592
5593         spin_lock(&BTRFS_I(inode)->lock);
5594         if (extra_reserve) {
5595                 set_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
5596                         &BTRFS_I(inode)->runtime_flags);
5597                 nr_extents--;
5598         }
5599         BTRFS_I(inode)->reserved_extents += nr_extents;
5600         spin_unlock(&BTRFS_I(inode)->lock);
5601
5602         if (delalloc_lock)
5603                 mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
5604
5605         if (to_reserve)
5606                 trace_btrfs_space_reservation(root->fs_info, "delalloc",
5607                                               btrfs_ino(inode), to_reserve, 1);
5608         block_rsv_add_bytes(block_rsv, to_reserve, 1);
5609
5610         return 0;
5611
5612 out_fail:
5613         spin_lock(&BTRFS_I(inode)->lock);
5614         dropped = drop_outstanding_extent(inode, num_bytes);
5615         /*
5616          * If the inodes csum_bytes is the same as the original
5617          * csum_bytes then we know we haven't raced with any free()ers
5618          * so we can just reduce our inodes csum bytes and carry on.
5619          */
5620         if (BTRFS_I(inode)->csum_bytes == csum_bytes) {
5621                 calc_csum_metadata_size(inode, num_bytes, 0);
5622         } else {
5623                 u64 orig_csum_bytes = BTRFS_I(inode)->csum_bytes;
5624                 u64 bytes;
5625
5626                 /*
5627                  * This is tricky, but first we need to figure out how much we
5628                  * free'd from any free-ers that occured during this
5629                  * reservation, so we reset ->csum_bytes to the csum_bytes
5630                  * before we dropped our lock, and then call the free for the
5631                  * number of bytes that were freed while we were trying our
5632                  * reservation.
5633                  */
5634                 bytes = csum_bytes - BTRFS_I(inode)->csum_bytes;
5635                 BTRFS_I(inode)->csum_bytes = csum_bytes;
5636                 to_free = calc_csum_metadata_size(inode, bytes, 0);
5637
5638
5639                 /*
5640                  * Now we need to see how much we would have freed had we not
5641                  * been making this reservation and our ->csum_bytes were not
5642                  * artificially inflated.
5643                  */
5644                 BTRFS_I(inode)->csum_bytes = csum_bytes - num_bytes;
5645                 bytes = csum_bytes - orig_csum_bytes;
5646                 bytes = calc_csum_metadata_size(inode, bytes, 0);
5647
5648                 /*
5649                  * Now reset ->csum_bytes to what it should be.  If bytes is
5650                  * more than to_free then we would have free'd more space had we
5651                  * not had an artificially high ->csum_bytes, so we need to free
5652                  * the remainder.  If bytes is the same or less then we don't
5653                  * need to do anything, the other free-ers did the correct
5654                  * thing.
5655                  */
5656                 BTRFS_I(inode)->csum_bytes = orig_csum_bytes - num_bytes;
5657                 if (bytes > to_free)
5658                         to_free = bytes - to_free;
5659                 else
5660                         to_free = 0;
5661         }
5662         spin_unlock(&BTRFS_I(inode)->lock);
5663         if (dropped)
5664                 to_free += btrfs_calc_trans_metadata_size(root, dropped);
5665
5666         if (to_free) {
5667                 btrfs_block_rsv_release(root, block_rsv, to_free);
5668                 trace_btrfs_space_reservation(root->fs_info, "delalloc",
5669                                               btrfs_ino(inode), to_free, 0);
5670         }
5671         if (delalloc_lock)
5672                 mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
5673         return ret;
5674 }
5675
5676 /**
5677  * btrfs_delalloc_release_metadata - release a metadata reservation for an inode
5678  * @inode: the inode to release the reservation for
5679  * @num_bytes: the number of bytes we're releasing
5680  *
5681  * This will release the metadata reservation for an inode.  This can be called
5682  * once we complete IO for a given set of bytes to release their metadata
5683  * reservations.
5684  */
5685 void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
5686 {
5687         struct btrfs_root *root = BTRFS_I(inode)->root;
5688         u64 to_free = 0;
5689         unsigned dropped;
5690
5691         num_bytes = ALIGN(num_bytes, root->sectorsize);
5692         spin_lock(&BTRFS_I(inode)->lock);
5693         dropped = drop_outstanding_extent(inode, num_bytes);
5694
5695         if (num_bytes)
5696                 to_free = calc_csum_metadata_size(inode, num_bytes, 0);
5697         spin_unlock(&BTRFS_I(inode)->lock);
5698         if (dropped > 0)
5699                 to_free += btrfs_calc_trans_metadata_size(root, dropped);
5700
5701         if (btrfs_test_is_dummy_root(root))
5702                 return;
5703
5704         trace_btrfs_space_reservation(root->fs_info, "delalloc",
5705                                       btrfs_ino(inode), to_free, 0);
5706
5707         btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv,
5708                                 to_free);
5709 }
5710
5711 /**
5712  * btrfs_delalloc_reserve_space - reserve data and metadata space for delalloc
5713  * @inode: inode we're writing to
5714  * @num_bytes: the number of bytes we want to allocate
5715  *
5716  * This will do the following things
5717  *
5718  * o reserve space in the data space info for num_bytes
5719  * o reserve space in the metadata space info based on number of outstanding
5720  *   extents and how much csums will be needed
5721  * o add to the inodes ->delalloc_bytes
5722  * o add it to the fs_info's delalloc inodes list.
5723  *
5724  * This will return 0 for success and -ENOSPC if there is no space left.
5725  */
5726 int btrfs_delalloc_reserve_space(struct inode *inode, u64 num_bytes)
5727 {
5728         int ret;
5729
5730         ret = btrfs_check_data_free_space(inode, num_bytes, num_bytes);
5731         if (ret)
5732                 return ret;
5733
5734         ret = btrfs_delalloc_reserve_metadata(inode, num_bytes);
5735         if (ret) {
5736                 btrfs_free_reserved_data_space(inode, num_bytes);
5737                 return ret;
5738         }
5739
5740         return 0;
5741 }
5742
5743 /**
5744  * btrfs_delalloc_release_space - release data and metadata space for delalloc
5745  * @inode: inode we're releasing space for
5746  * @num_bytes: the number of bytes we want to free up
5747  *
5748  * This must be matched with a call to btrfs_delalloc_reserve_space.  This is
5749  * called in the case that we don't need the metadata AND data reservations
5750  * anymore.  So if there is an error or we insert an inline extent.
5751  *
5752  * This function will release the metadata space that was not used and will
5753  * decrement ->delalloc_bytes and remove it from the fs_info delalloc_inodes
5754  * list if there are no delalloc bytes left.
5755  */
5756 void btrfs_delalloc_release_space(struct inode *inode, u64 num_bytes)
5757 {
5758         btrfs_delalloc_release_metadata(inode, num_bytes);
5759         btrfs_free_reserved_data_space(inode, num_bytes);
5760 }
5761
5762 static int update_block_group(struct btrfs_trans_handle *trans,
5763                               struct btrfs_root *root, u64 bytenr,
5764                               u64 num_bytes, int alloc)
5765 {
5766         struct btrfs_block_group_cache *cache = NULL;
5767         struct btrfs_fs_info *info = root->fs_info;
5768         u64 total = num_bytes;
5769         u64 old_val;
5770         u64 byte_in_group;
5771         int factor;
5772
5773         /* block accounting for super block */
5774         spin_lock(&info->delalloc_root_lock);
5775         old_val = btrfs_super_bytes_used(info->super_copy);
5776         if (alloc)
5777                 old_val += num_bytes;
5778         else
5779                 old_val -= num_bytes;
5780         btrfs_set_super_bytes_used(info->super_copy, old_val);
5781         spin_unlock(&info->delalloc_root_lock);
5782
5783         while (total) {
5784                 cache = btrfs_lookup_block_group(info, bytenr);
5785                 if (!cache)
5786                         return -ENOENT;
5787                 if (cache->flags & (BTRFS_BLOCK_GROUP_DUP |
5788                                     BTRFS_BLOCK_GROUP_RAID1 |
5789                                     BTRFS_BLOCK_GROUP_RAID10))
5790                         factor = 2;
5791                 else
5792                         factor = 1;
5793                 /*
5794                  * If this block group has free space cache written out, we
5795                  * need to make sure to load it if we are removing space.  This
5796                  * is because we need the unpinning stage to actually add the
5797                  * space back to the block group, otherwise we will leak space.
5798                  */
5799                 if (!alloc && cache->cached == BTRFS_CACHE_NO)
5800                         cache_block_group(cache, 1);
5801
5802                 byte_in_group = bytenr - cache->key.objectid;
5803                 WARN_ON(byte_in_group > cache->key.offset);
5804
5805                 spin_lock(&cache->space_info->lock);
5806                 spin_lock(&cache->lock);
5807
5808                 if (btrfs_test_opt(root, SPACE_CACHE) &&
5809                     cache->disk_cache_state < BTRFS_DC_CLEAR)
5810                         cache->disk_cache_state = BTRFS_DC_CLEAR;
5811
5812                 old_val = btrfs_block_group_used(&cache->item);
5813                 num_bytes = min(total, cache->key.offset - byte_in_group);
5814                 if (alloc) {
5815                         old_val += num_bytes;
5816                         btrfs_set_block_group_used(&cache->item, old_val);
5817                         cache->reserved -= num_bytes;
5818                         cache->space_info->bytes_reserved -= num_bytes;
5819                         cache->space_info->bytes_used += num_bytes;
5820                         cache->space_info->disk_used += num_bytes * factor;
5821                         spin_unlock(&cache->lock);
5822                         spin_unlock(&cache->space_info->lock);
5823                 } else {
5824                         old_val -= num_bytes;
5825                         btrfs_set_block_group_used(&cache->item, old_val);
5826                         cache->pinned += num_bytes;
5827                         cache->space_info->bytes_pinned += num_bytes;
5828                         cache->space_info->bytes_used -= num_bytes;
5829                         cache->space_info->disk_used -= num_bytes * factor;
5830                         spin_unlock(&cache->lock);
5831                         spin_unlock(&cache->space_info->lock);
5832
5833                         set_extent_dirty(info->pinned_extents,
5834                                          bytenr, bytenr + num_bytes - 1,
5835                                          GFP_NOFS | __GFP_NOFAIL);
5836                         /*
5837                          * No longer have used bytes in this block group, queue
5838                          * it for deletion.
5839                          */
5840                         if (old_val == 0) {
5841                                 spin_lock(&info->unused_bgs_lock);
5842                                 if (list_empty(&cache->bg_list)) {
5843                                         btrfs_get_block_group(cache);
5844                                         list_add_tail(&cache->bg_list,
5845                                                       &info->unused_bgs);
5846                                 }
5847                                 spin_unlock(&info->unused_bgs_lock);
5848                         }
5849                 }
5850
5851                 spin_lock(&trans->transaction->dirty_bgs_lock);
5852                 if (list_empty(&cache->dirty_list)) {
5853                         list_add_tail(&cache->dirty_list,
5854                                       &trans->transaction->dirty_bgs);
5855                                 trans->transaction->num_dirty_bgs++;
5856                         btrfs_get_block_group(cache);
5857                 }
5858                 spin_unlock(&trans->transaction->dirty_bgs_lock);
5859
5860                 btrfs_put_block_group(cache);
5861                 total -= num_bytes;
5862                 bytenr += num_bytes;
5863         }
5864         return 0;
5865 }
5866
5867 static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
5868 {
5869         struct btrfs_block_group_cache *cache;
5870         u64 bytenr;
5871
5872         spin_lock(&root->fs_info->block_group_cache_lock);
5873         bytenr = root->fs_info->first_logical_byte;
5874         spin_unlock(&root->fs_info->block_group_cache_lock);
5875
5876         if (bytenr < (u64)-1)
5877                 return bytenr;
5878
5879         cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
5880         if (!cache)
5881                 return 0;
5882
5883         bytenr = cache->key.objectid;
5884         btrfs_put_block_group(cache);
5885
5886         return bytenr;
5887 }
5888
5889 static int pin_down_extent(struct btrfs_root *root,
5890                            struct btrfs_block_group_cache *cache,
5891                            u64 bytenr, u64 num_bytes, int reserved)
5892 {
5893         spin_lock(&cache->space_info->lock);
5894         spin_lock(&cache->lock);
5895         cache->pinned += num_bytes;
5896         cache->space_info->bytes_pinned += num_bytes;
5897         if (reserved) {
5898                 cache->reserved -= num_bytes;
5899                 cache->space_info->bytes_reserved -= num_bytes;
5900         }
5901         spin_unlock(&cache->lock);
5902         spin_unlock(&cache->space_info->lock);
5903
5904         set_extent_dirty(root->fs_info->pinned_extents, bytenr,
5905                          bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
5906         if (reserved)
5907                 trace_btrfs_reserved_extent_free(root, bytenr, num_bytes);
5908         return 0;
5909 }
5910
5911 /*
5912  * this function must be called within transaction
5913  */
5914 int btrfs_pin_extent(struct btrfs_root *root,
5915                      u64 bytenr, u64 num_bytes, int reserved)
5916 {
5917         struct btrfs_block_group_cache *cache;
5918
5919         cache = btrfs_lookup_block_group(root->fs_info, bytenr);
5920         BUG_ON(!cache); /* Logic error */
5921
5922         pin_down_extent(root, cache, bytenr, num_bytes, reserved);
5923
5924         btrfs_put_block_group(cache);
5925         return 0;
5926 }
5927
5928 /*
5929  * this function must be called within transaction
5930  */
5931 int btrfs_pin_extent_for_log_replay(struct btrfs_root *root,
5932                                     u64 bytenr, u64 num_bytes)
5933 {
5934         struct btrfs_block_group_cache *cache;
5935         int ret;
5936
5937         cache = btrfs_lookup_block_group(root->fs_info, bytenr);
5938         if (!cache)
5939                 return -EINVAL;
5940
5941         /*
5942          * pull in the free space cache (if any) so that our pin
5943          * removes the free space from the cache.  We have load_only set
5944          * to one because the slow code to read in the free extents does check
5945          * the pinned extents.
5946          */
5947         cache_block_group(cache, 1);
5948
5949         pin_down_extent(root, cache, bytenr, num_bytes, 0);
5950
5951         /* remove us from the free space cache (if we're there at all) */
5952         ret = btrfs_remove_free_space(cache, bytenr, num_bytes);
5953         btrfs_put_block_group(cache);
5954         return ret;
5955 }
5956
5957 static int __exclude_logged_extent(struct btrfs_root *root, u64 start, u64 num_bytes)
5958 {
5959         int ret;
5960         struct btrfs_block_group_cache *block_group;
5961         struct btrfs_caching_control *caching_ctl;
5962
5963         block_group = btrfs_lookup_block_group(root->fs_info, start);
5964         if (!block_group)
5965                 return -EINVAL;
5966
5967         cache_block_group(block_group, 0);
5968         caching_ctl = get_caching_control(block_group);
5969
5970         if (!caching_ctl) {
5971                 /* Logic error */
5972                 BUG_ON(!block_group_cache_done(block_group));
5973                 ret = btrfs_remove_free_space(block_group, start, num_bytes);
5974         } else {
5975                 mutex_lock(&caching_ctl->mutex);
5976
5977                 if (start >= caching_ctl->progress) {
5978                         ret = add_excluded_extent(root, start, num_bytes);
5979                 } else if (start + num_bytes <= caching_ctl->progress) {
5980                         ret = btrfs_remove_free_space(block_group,
5981                                                       start, num_bytes);
5982                 } else {
5983                         num_bytes = caching_ctl->progress - start;
5984                         ret = btrfs_remove_free_space(block_group,
5985                                                       start, num_bytes);
5986                         if (ret)
5987                                 goto out_lock;
5988
5989                         num_bytes = (start + num_bytes) -
5990                                 caching_ctl->progress;
5991                         start = caching_ctl->progress;
5992                         ret = add_excluded_extent(root, start, num_bytes);
5993                 }
5994 out_lock:
5995                 mutex_unlock(&caching_ctl->mutex);
5996                 put_caching_control(caching_ctl);
5997         }
5998         btrfs_put_block_group(block_group);
5999         return ret;
6000 }
6001
6002 int btrfs_exclude_logged_extents(struct btrfs_root *log,
6003                                  struct extent_buffer *eb)
6004 {
6005         struct btrfs_file_extent_item *item;
6006         struct btrfs_key key;
6007         int found_type;
6008         int i;
6009
6010         if (!btrfs_fs_incompat(log->fs_info, MIXED_GROUPS))
6011                 return 0;
6012
6013         for (i = 0; i < btrfs_header_nritems(eb); i++) {
6014                 btrfs_item_key_to_cpu(eb, &key, i);
6015                 if (key.type != BTRFS_EXTENT_DATA_KEY)
6016                         continue;
6017                 item = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
6018                 found_type = btrfs_file_extent_type(eb, item);
6019                 if (found_type == BTRFS_FILE_EXTENT_INLINE)
6020                         continue;
6021                 if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
6022                         continue;
6023                 key.objectid = btrfs_file_extent_disk_bytenr(eb, item);
6024                 key.offset = btrfs_file_extent_disk_num_bytes(eb, item);
6025                 __exclude_logged_extent(log, key.objectid, key.offset);
6026         }
6027
6028         return 0;
6029 }
6030
6031 /**
6032  * btrfs_update_reserved_bytes - update the block_group and space info counters
6033  * @cache:      The cache we are manipulating
6034  * @num_bytes:  The number of bytes in question
6035  * @reserve:    One of the reservation enums
6036  * @delalloc:   The blocks are allocated for the delalloc write
6037  *
6038  * This is called by the allocator when it reserves space, or by somebody who is
6039  * freeing space that was never actually used on disk.  For example if you
6040  * reserve some space for a new leaf in transaction A and before transaction A
6041  * commits you free that leaf, you call this with reserve set to 0 in order to
6042  * clear the reservation.
6043  *
6044  * Metadata reservations should be called with RESERVE_ALLOC so we do the proper
6045  * ENOSPC accounting.  For data we handle the reservation through clearing the
6046  * delalloc bits in the io_tree.  We have to do this since we could end up
6047  * allocating less disk space for the amount of data we have reserved in the
6048  * case of compression.
6049  *
6050  * If this is a reservation and the block group has become read only we cannot
6051  * make the reservation and return -EAGAIN, otherwise this function always
6052  * succeeds.
6053  */
6054 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
6055                                        u64 num_bytes, int reserve, int delalloc)
6056 {
6057         struct btrfs_space_info *space_info = cache->space_info;
6058         int ret = 0;
6059
6060         spin_lock(&space_info->lock);
6061         spin_lock(&cache->lock);
6062         if (reserve != RESERVE_FREE) {
6063                 if (cache->ro) {
6064                         ret = -EAGAIN;
6065                 } else {
6066                         cache->reserved += num_bytes;
6067                         space_info->bytes_reserved += num_bytes;
6068                         if (reserve == RESERVE_ALLOC) {
6069                                 trace_btrfs_space_reservation(cache->fs_info,
6070                                                 "space_info", space_info->flags,
6071                                                 num_bytes, 0);
6072                                 space_info->bytes_may_use -= num_bytes;
6073                         }
6074
6075                         if (delalloc)
6076                                 cache->delalloc_bytes += num_bytes;
6077                 }
6078         } else {
6079                 if (cache->ro)
6080                         space_info->bytes_readonly += num_bytes;
6081                 cache->reserved -= num_bytes;
6082                 space_info->bytes_reserved -= num_bytes;
6083
6084                 if (delalloc)
6085                         cache->delalloc_bytes -= num_bytes;
6086         }
6087         spin_unlock(&cache->lock);
6088         spin_unlock(&space_info->lock);
6089         return ret;
6090 }
6091
6092 void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
6093                                 struct btrfs_root *root)
6094 {
6095         struct btrfs_fs_info *fs_info = root->fs_info;
6096         struct btrfs_caching_control *next;
6097         struct btrfs_caching_control *caching_ctl;
6098         struct btrfs_block_group_cache *cache;
6099
6100         down_write(&fs_info->commit_root_sem);
6101
6102         list_for_each_entry_safe(caching_ctl, next,
6103                                  &fs_info->caching_block_groups, list) {
6104                 cache = caching_ctl->block_group;
6105                 if (block_group_cache_done(cache)) {
6106                         cache->last_byte_to_unpin = (u64)-1;
6107                         list_del_init(&caching_ctl->list);
6108                         put_caching_control(caching_ctl);
6109                 } else {
6110                         cache->last_byte_to_unpin = caching_ctl->progress;
6111                 }
6112         }
6113
6114         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
6115                 fs_info->pinned_extents = &fs_info->freed_extents[1];
6116         else
6117                 fs_info->pinned_extents = &fs_info->freed_extents[0];
6118
6119         up_write(&fs_info->commit_root_sem);
6120
6121         update_global_block_rsv(fs_info);
6122 }
6123
6124 static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end,
6125                               const bool return_free_space)
6126 {
6127         struct btrfs_fs_info *fs_info = root->fs_info;
6128         struct btrfs_block_group_cache *cache = NULL;
6129         struct btrfs_space_info *space_info;
6130         struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
6131         u64 len;
6132         bool readonly;
6133
6134         while (start <= end) {
6135                 readonly = false;
6136                 if (!cache ||
6137                     start >= cache->key.objectid + cache->key.offset) {
6138                         if (cache)
6139                                 btrfs_put_block_group(cache);
6140                         cache = btrfs_lookup_block_group(fs_info, start);
6141                         BUG_ON(!cache); /* Logic error */
6142                 }
6143
6144                 len = cache->key.objectid + cache->key.offset - start;
6145                 len = min(len, end + 1 - start);
6146
6147                 if (start < cache->last_byte_to_unpin) {
6148                         len = min(len, cache->last_byte_to_unpin - start);
6149                         if (return_free_space)
6150                                 btrfs_add_free_space(cache, start, len);
6151                 }
6152
6153                 start += len;
6154                 space_info = cache->space_info;
6155
6156                 spin_lock(&space_info->lock);
6157                 spin_lock(&cache->lock);
6158                 cache->pinned -= len;
6159                 space_info->bytes_pinned -= len;
6160                 percpu_counter_add(&space_info->total_bytes_pinned, -len);
6161                 if (cache->ro) {
6162                         space_info->bytes_readonly += len;
6163                         readonly = true;
6164                 }
6165                 spin_unlock(&cache->lock);
6166                 if (!readonly && global_rsv->space_info == space_info) {
6167                         spin_lock(&global_rsv->lock);
6168                         if (!global_rsv->full) {
6169                                 len = min(len, global_rsv->size -
6170                                           global_rsv->reserved);
6171                                 global_rsv->reserved += len;
6172                                 space_info->bytes_may_use += len;
6173                                 if (global_rsv->reserved >= global_rsv->size)
6174                                         global_rsv->full = 1;
6175                         }
6176                         spin_unlock(&global_rsv->lock);
6177                 }
6178                 spin_unlock(&space_info->lock);
6179         }
6180
6181         if (cache)
6182                 btrfs_put_block_group(cache);
6183         return 0;
6184 }
6185
6186 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
6187                                struct btrfs_root *root)
6188 {
6189         struct btrfs_fs_info *fs_info = root->fs_info;
6190         struct btrfs_block_group_cache *block_group, *tmp;
6191         struct list_head *deleted_bgs;
6192         struct extent_io_tree *unpin;
6193         u64 start;
6194         u64 end;
6195         int ret;
6196
6197         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
6198                 unpin = &fs_info->freed_extents[1];
6199         else
6200                 unpin = &fs_info->freed_extents[0];
6201
6202         while (!trans->aborted) {
6203                 mutex_lock(&fs_info->unused_bg_unpin_mutex);
6204                 ret = find_first_extent_bit(unpin, 0, &start, &end,
6205                                             EXTENT_DIRTY, NULL);
6206                 if (ret) {
6207                         mutex_unlock(&fs_info->unused_bg_unpin_mutex);
6208                         break;
6209                 }
6210
6211                 if (btrfs_test_opt(root, DISCARD))
6212                         ret = btrfs_discard_extent(root, start,
6213                                                    end + 1 - start, NULL);
6214
6215                 clear_extent_dirty(unpin, start, end, GFP_NOFS);
6216                 unpin_extent_range(root, start, end, true);
6217                 mutex_unlock(&fs_info->unused_bg_unpin_mutex);
6218                 cond_resched();
6219         }
6220
6221         /*
6222          * Transaction is finished.  We don't need the lock anymore.  We
6223          * do need to clean up the block groups in case of a transaction
6224          * abort.
6225          */
6226         deleted_bgs = &trans->transaction->deleted_bgs;
6227         list_for_each_entry_safe(block_group, tmp, deleted_bgs, bg_list) {
6228                 u64 trimmed = 0;
6229
6230                 ret = -EROFS;
6231                 if (!trans->aborted)
6232                         ret = btrfs_discard_extent(root,
6233                                                    block_group->key.objectid,
6234                                                    block_group->key.offset,
6235                                                    &trimmed);
6236
6237                 list_del_init(&block_group->bg_list);
6238                 btrfs_put_block_group_trimming(block_group);
6239                 btrfs_put_block_group(block_group);
6240
6241                 if (ret) {
6242                         const char *errstr = btrfs_decode_error(ret);
6243                         btrfs_warn(fs_info,
6244                                    "Discard failed while removing blockgroup: errno=%d %s\n",
6245                                    ret, errstr);
6246                 }
6247         }
6248
6249         return 0;
6250 }
6251
6252 static void add_pinned_bytes(struct btrfs_fs_info *fs_info, u64 num_bytes,
6253                              u64 owner, u64 root_objectid)
6254 {
6255         struct btrfs_space_info *space_info;
6256         u64 flags;
6257
6258         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
6259                 if (root_objectid == BTRFS_CHUNK_TREE_OBJECTID)
6260                         flags = BTRFS_BLOCK_GROUP_SYSTEM;
6261                 else
6262                         flags = BTRFS_BLOCK_GROUP_METADATA;
6263         } else {
6264                 flags = BTRFS_BLOCK_GROUP_DATA;
6265         }
6266
6267         space_info = __find_space_info(fs_info, flags);
6268         BUG_ON(!space_info); /* Logic bug */
6269         percpu_counter_add(&space_info->total_bytes_pinned, num_bytes);
6270 }
6271
6272
6273 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
6274                                 struct btrfs_root *root,
6275                                 struct btrfs_delayed_ref_node *node, u64 parent,
6276                                 u64 root_objectid, u64 owner_objectid,
6277                                 u64 owner_offset, int refs_to_drop,
6278                                 struct btrfs_delayed_extent_op *extent_op)
6279 {
6280         struct btrfs_key key;
6281         struct btrfs_path *path;
6282         struct btrfs_fs_info *info = root->fs_info;
6283         struct btrfs_root *extent_root = info->extent_root;
6284         struct extent_buffer *leaf;
6285         struct btrfs_extent_item *ei;
6286         struct btrfs_extent_inline_ref *iref;
6287         int ret;
6288         int is_data;
6289         int extent_slot = 0;
6290         int found_extent = 0;
6291         int num_to_del = 1;
6292         int no_quota = node->no_quota;
6293         u32 item_size;
6294         u64 refs;
6295         u64 bytenr = node->bytenr;
6296         u64 num_bytes = node->num_bytes;
6297         int last_ref = 0;
6298         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
6299                                                  SKINNY_METADATA);
6300
6301         if (!info->quota_enabled || !is_fstree(root_objectid))
6302                 no_quota = 1;
6303
6304         path = btrfs_alloc_path();
6305         if (!path)
6306                 return -ENOMEM;
6307
6308         path->reada = 1;
6309         path->leave_spinning = 1;
6310
6311         is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
6312         BUG_ON(!is_data && refs_to_drop != 1);
6313
6314         if (is_data)
6315                 skinny_metadata = 0;
6316
6317         ret = lookup_extent_backref(trans, extent_root, path, &iref,
6318                                     bytenr, num_bytes, parent,
6319                                     root_objectid, owner_objectid,
6320                                     owner_offset);
6321         if (ret == 0) {
6322                 extent_slot = path->slots[0];
6323                 while (extent_slot >= 0) {
6324                         btrfs_item_key_to_cpu(path->nodes[0], &key,
6325                                               extent_slot);
6326                         if (key.objectid != bytenr)
6327                                 break;
6328                         if (key.type == BTRFS_EXTENT_ITEM_KEY &&
6329                             key.offset == num_bytes) {
6330                                 found_extent = 1;
6331                                 break;
6332                         }
6333                         if (key.type == BTRFS_METADATA_ITEM_KEY &&
6334                             key.offset == owner_objectid) {
6335                                 found_extent = 1;
6336                                 break;
6337                         }
6338                         if (path->slots[0] - extent_slot > 5)
6339                                 break;
6340                         extent_slot--;
6341                 }
6342 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
6343                 item_size = btrfs_item_size_nr(path->nodes[0], extent_slot);
6344                 if (found_extent && item_size < sizeof(*ei))
6345                         found_extent = 0;
6346 #endif
6347                 if (!found_extent) {
6348                         BUG_ON(iref);
6349                         ret = remove_extent_backref(trans, extent_root, path,
6350                                                     NULL, refs_to_drop,
6351                                                     is_data, &last_ref);
6352                         if (ret) {
6353                                 btrfs_abort_transaction(trans, extent_root, ret);
6354                                 goto out;
6355                         }
6356                         btrfs_release_path(path);
6357                         path->leave_spinning = 1;
6358
6359                         key.objectid = bytenr;
6360                         key.type = BTRFS_EXTENT_ITEM_KEY;
6361                         key.offset = num_bytes;
6362
6363                         if (!is_data && skinny_metadata) {
6364                                 key.type = BTRFS_METADATA_ITEM_KEY;
6365                                 key.offset = owner_objectid;
6366                         }
6367
6368                         ret = btrfs_search_slot(trans, extent_root,
6369                                                 &key, path, -1, 1);
6370                         if (ret > 0 && skinny_metadata && path->slots[0]) {
6371                                 /*
6372                                  * Couldn't find our skinny metadata item,
6373                                  * see if we have ye olde extent item.
6374                                  */
6375                                 path->slots[0]--;
6376                                 btrfs_item_key_to_cpu(path->nodes[0], &key,
6377                                                       path->slots[0]);
6378                                 if (key.objectid == bytenr &&
6379                                     key.type == BTRFS_EXTENT_ITEM_KEY &&
6380                                     key.offset == num_bytes)
6381                                         ret = 0;
6382                         }
6383
6384                         if (ret > 0 && skinny_metadata) {
6385                                 skinny_metadata = false;
6386                                 key.objectid = bytenr;
6387                                 key.type = BTRFS_EXTENT_ITEM_KEY;
6388                                 key.offset = num_bytes;
6389                                 btrfs_release_path(path);
6390                                 ret = btrfs_search_slot(trans, extent_root,
6391                                                         &key, path, -1, 1);
6392                         }
6393
6394                         if (ret) {
6395                                 btrfs_err(info, "umm, got %d back from search, was looking for %llu",
6396                                         ret, bytenr);
6397                                 if (ret > 0)
6398                                         btrfs_print_leaf(extent_root,
6399                                                          path->nodes[0]);
6400                         }
6401                         if (ret < 0) {
6402                                 btrfs_abort_transaction(trans, extent_root, ret);
6403                                 goto out;
6404                         }
6405                         extent_slot = path->slots[0];
6406                 }
6407         } else if (WARN_ON(ret == -ENOENT)) {
6408                 btrfs_print_leaf(extent_root, path->nodes[0]);
6409                 btrfs_err(info,
6410                         "unable to find ref byte nr %llu parent %llu root %llu  owner %llu offset %llu",
6411                         bytenr, parent, root_objectid, owner_objectid,
6412                         owner_offset);
6413                 btrfs_abort_transaction(trans, extent_root, ret);
6414                 goto out;
6415         } else {
6416                 btrfs_abort_transaction(trans, extent_root, ret);
6417                 goto out;
6418         }
6419
6420         leaf = path->nodes[0];
6421         item_size = btrfs_item_size_nr(leaf, extent_slot);
6422 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
6423         if (item_size < sizeof(*ei)) {
6424                 BUG_ON(found_extent || extent_slot != path->slots[0]);
6425                 ret = convert_extent_item_v0(trans, extent_root, path,
6426                                              owner_objectid, 0);
6427                 if (ret < 0) {
6428                         btrfs_abort_transaction(trans, extent_root, ret);
6429                         goto out;
6430                 }
6431
6432                 btrfs_release_path(path);
6433                 path->leave_spinning = 1;
6434
6435                 key.objectid = bytenr;
6436                 key.type = BTRFS_EXTENT_ITEM_KEY;
6437                 key.offset = num_bytes;
6438
6439                 ret = btrfs_search_slot(trans, extent_root, &key, path,
6440                                         -1, 1);
6441                 if (ret) {
6442                         btrfs_err(info, "umm, got %d back from search, was looking for %llu",
6443                                 ret, bytenr);
6444                         btrfs_print_leaf(extent_root, path->nodes[0]);
6445                 }
6446                 if (ret < 0) {
6447                         btrfs_abort_transaction(trans, extent_root, ret);
6448                         goto out;
6449                 }
6450
6451                 extent_slot = path->slots[0];
6452                 leaf = path->nodes[0];
6453                 item_size = btrfs_item_size_nr(leaf, extent_slot);
6454         }
6455 #endif
6456         BUG_ON(item_size < sizeof(*ei));
6457         ei = btrfs_item_ptr(leaf, extent_slot,
6458                             struct btrfs_extent_item);
6459         if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID &&
6460             key.type == BTRFS_EXTENT_ITEM_KEY) {
6461                 struct btrfs_tree_block_info *bi;
6462                 BUG_ON(item_size < sizeof(*ei) + sizeof(*bi));
6463                 bi = (struct btrfs_tree_block_info *)(ei + 1);
6464                 WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
6465         }
6466
6467         refs = btrfs_extent_refs(leaf, ei);
6468         if (refs < refs_to_drop) {
6469                 btrfs_err(info, "trying to drop %d refs but we only have %Lu "
6470                           "for bytenr %Lu", refs_to_drop, refs, bytenr);
6471                 ret = -EINVAL;
6472                 btrfs_abort_transaction(trans, extent_root, ret);
6473                 goto out;
6474         }
6475         refs -= refs_to_drop;
6476
6477         if (refs > 0) {
6478                 if (extent_op)
6479                         __run_delayed_extent_op(extent_op, leaf, ei);
6480                 /*
6481                  * In the case of inline back ref, reference count will
6482                  * be updated by remove_extent_backref
6483                  */
6484                 if (iref) {
6485                         BUG_ON(!found_extent);
6486                 } else {
6487                         btrfs_set_extent_refs(leaf, ei, refs);
6488                         btrfs_mark_buffer_dirty(leaf);
6489                 }
6490                 if (found_extent) {
6491                         ret = remove_extent_backref(trans, extent_root, path,
6492                                                     iref, refs_to_drop,
6493                                                     is_data, &last_ref);
6494                         if (ret) {
6495                                 btrfs_abort_transaction(trans, extent_root, ret);
6496                                 goto out;
6497                         }
6498                 }
6499                 add_pinned_bytes(root->fs_info, -num_bytes, owner_objectid,
6500                                  root_objectid);
6501         } else {
6502                 if (found_extent) {
6503                         BUG_ON(is_data && refs_to_drop !=
6504                                extent_data_ref_count(path, iref));
6505                         if (iref) {
6506                                 BUG_ON(path->slots[0] != extent_slot);
6507                         } else {
6508                                 BUG_ON(path->slots[0] != extent_slot + 1);
6509                                 path->slots[0] = extent_slot;
6510                                 num_to_del = 2;
6511                         }
6512                 }
6513
6514                 last_ref = 1;
6515                 ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
6516                                       num_to_del);
6517                 if (ret) {
6518                         btrfs_abort_transaction(trans, extent_root, ret);
6519                         goto out;
6520                 }
6521                 btrfs_release_path(path);
6522
6523                 if (is_data) {
6524                         ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
6525                         if (ret) {
6526                                 btrfs_abort_transaction(trans, extent_root, ret);
6527                                 goto out;
6528                         }
6529                 }
6530
6531                 ret = update_block_group(trans, root, bytenr, num_bytes, 0);
6532                 if (ret) {
6533                         btrfs_abort_transaction(trans, extent_root, ret);
6534                         goto out;
6535                 }
6536         }
6537         btrfs_release_path(path);
6538
6539 out:
6540         btrfs_free_path(path);
6541         return ret;
6542 }
6543
6544 /*
6545  * when we free an block, it is possible (and likely) that we free the last
6546  * delayed ref for that extent as well.  This searches the delayed ref tree for
6547  * a given extent, and if there are no other delayed refs to be processed, it
6548  * removes it from the tree.
6549  */
6550 static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
6551                                       struct btrfs_root *root, u64 bytenr)
6552 {
6553         struct btrfs_delayed_ref_head *head;
6554         struct btrfs_delayed_ref_root *delayed_refs;
6555         int ret = 0;
6556
6557         delayed_refs = &trans->transaction->delayed_refs;
6558         spin_lock(&delayed_refs->lock);
6559         head = btrfs_find_delayed_ref_head(trans, bytenr);
6560         if (!head)
6561                 goto out_delayed_unlock;
6562
6563         spin_lock(&head->lock);
6564         if (!list_empty(&head->ref_list))
6565                 goto out;
6566
6567         if (head->extent_op) {
6568                 if (!head->must_insert_reserved)
6569                         goto out;
6570                 btrfs_free_delayed_extent_op(head->extent_op);
6571                 head->extent_op = NULL;
6572         }
6573
6574         /*
6575          * waiting for the lock here would deadlock.  If someone else has it
6576          * locked they are already in the process of dropping it anyway
6577          */
6578         if (!mutex_trylock(&head->mutex))
6579                 goto out;
6580
6581         /*
6582          * at this point we have a head with no other entries.  Go
6583          * ahead and process it.
6584          */
6585         head->node.in_tree = 0;
6586         rb_erase(&head->href_node, &delayed_refs->href_root);
6587
6588         atomic_dec(&delayed_refs->num_entries);
6589
6590         /*
6591          * we don't take a ref on the node because we're removing it from the
6592          * tree, so we just steal the ref the tree was holding.
6593          */
6594         delayed_refs->num_heads--;
6595         if (head->processing == 0)
6596                 delayed_refs->num_heads_ready--;
6597         head->processing = 0;
6598         spin_unlock(&head->lock);
6599         spin_unlock(&delayed_refs->lock);
6600
6601         BUG_ON(head->extent_op);
6602         if (head->must_insert_reserved)
6603                 ret = 1;
6604
6605         mutex_unlock(&head->mutex);
6606         btrfs_put_delayed_ref(&head->node);
6607         return ret;
6608 out:
6609         spin_unlock(&head->lock);
6610
6611 out_delayed_unlock:
6612         spin_unlock(&delayed_refs->lock);
6613         return 0;
6614 }
6615
6616 void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
6617                            struct btrfs_root *root,
6618                            struct extent_buffer *buf,
6619                            u64 parent, int last_ref)
6620 {
6621         int pin = 1;
6622         int ret;
6623
6624         if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
6625                 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
6626                                         buf->start, buf->len,
6627                                         parent, root->root_key.objectid,
6628                                         btrfs_header_level(buf),
6629                                         BTRFS_DROP_DELAYED_REF, NULL, 0);
6630                 BUG_ON(ret); /* -ENOMEM */
6631         }
6632
6633         if (!last_ref)
6634                 return;
6635
6636         if (btrfs_header_generation(buf) == trans->transid) {
6637                 struct btrfs_block_group_cache *cache;
6638
6639                 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
6640                         ret = check_ref_cleanup(trans, root, buf->start);
6641                         if (!ret)
6642                                 goto out;
6643                 }
6644
6645                 cache = btrfs_lookup_block_group(root->fs_info, buf->start);
6646
6647                 if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
6648                         pin_down_extent(root, cache, buf->start, buf->len, 1);
6649                         btrfs_put_block_group(cache);
6650                         goto out;
6651                 }
6652
6653                 WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
6654
6655                 btrfs_add_free_space(cache, buf->start, buf->len);
6656                 btrfs_update_reserved_bytes(cache, buf->len, RESERVE_FREE, 0);
6657                 btrfs_put_block_group(cache);
6658                 trace_btrfs_reserved_extent_free(root, buf->start, buf->len);
6659                 pin = 0;
6660         }
6661 out:
6662         if (pin)
6663                 add_pinned_bytes(root->fs_info, buf->len,
6664                                  btrfs_header_level(buf),
6665                                  root->root_key.objectid);
6666
6667         /*
6668          * Deleting the buffer, clear the corrupt flag since it doesn't matter
6669          * anymore.
6670          */
6671         clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags);
6672 }
6673
6674 /* Can return -ENOMEM */
6675 int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root,
6676                       u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
6677                       u64 owner, u64 offset, int no_quota)
6678 {
6679         int ret;
6680         struct btrfs_fs_info *fs_info = root->fs_info;
6681
6682         if (btrfs_test_is_dummy_root(root))
6683                 return 0;
6684
6685         add_pinned_bytes(root->fs_info, num_bytes, owner, root_objectid);
6686
6687         /*
6688          * tree log blocks never actually go into the extent allocation
6689          * tree, just update pinning info and exit early.
6690          */
6691         if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
6692                 WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
6693                 /* unlocks the pinned mutex */
6694                 btrfs_pin_extent(root, bytenr, num_bytes, 1);
6695                 ret = 0;
6696         } else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
6697                 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
6698                                         num_bytes,
6699                                         parent, root_objectid, (int)owner,
6700                                         BTRFS_DROP_DELAYED_REF, NULL, no_quota);
6701         } else {
6702                 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
6703                                                 num_bytes,
6704                                                 parent, root_objectid, owner,
6705                                                 offset, BTRFS_DROP_DELAYED_REF,
6706                                                 NULL, no_quota);
6707         }
6708         return ret;
6709 }
6710
6711 /*
6712  * when we wait for progress in the block group caching, its because
6713  * our allocation attempt failed at least once.  So, we must sleep
6714  * and let some progress happen before we try again.
6715  *
6716  * This function will sleep at least once waiting for new free space to
6717  * show up, and then it will check the block group free space numbers
6718  * for our min num_bytes.  Another option is to have it go ahead
6719  * and look in the rbtree for a free extent of a given size, but this
6720  * is a good start.
6721  *
6722  * Callers of this must check if cache->cached == BTRFS_CACHE_ERROR before using
6723  * any of the information in this block group.
6724  */
6725 static noinline void
6726 wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
6727                                 u64 num_bytes)
6728 {
6729         struct btrfs_caching_control *caching_ctl;
6730
6731         caching_ctl = get_caching_control(cache);
6732         if (!caching_ctl)
6733                 return;
6734
6735         wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
6736                    (cache->free_space_ctl->free_space >= num_bytes));
6737
6738         put_caching_control(caching_ctl);
6739 }
6740
6741 static noinline int
6742 wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
6743 {
6744         struct btrfs_caching_control *caching_ctl;
6745         int ret = 0;
6746
6747         caching_ctl = get_caching_control(cache);
6748         if (!caching_ctl)
6749                 return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0;
6750
6751         wait_event(caching_ctl->wait, block_group_cache_done(cache));
6752         if (cache->cached == BTRFS_CACHE_ERROR)
6753                 ret = -EIO;
6754         put_caching_control(caching_ctl);
6755         return ret;
6756 }
6757
6758 int __get_raid_index(u64 flags)
6759 {
6760         if (flags & BTRFS_BLOCK_GROUP_RAID10)
6761                 return BTRFS_RAID_RAID10;
6762         else if (flags & BTRFS_BLOCK_GROUP_RAID1)
6763                 return BTRFS_RAID_RAID1;
6764         else if (flags & BTRFS_BLOCK_GROUP_DUP)
6765                 return BTRFS_RAID_DUP;
6766         else if (flags & BTRFS_BLOCK_GROUP_RAID0)
6767                 return BTRFS_RAID_RAID0;
6768         else if (flags & BTRFS_BLOCK_GROUP_RAID5)
6769                 return BTRFS_RAID_RAID5;
6770         else if (flags & BTRFS_BLOCK_GROUP_RAID6)
6771                 return BTRFS_RAID_RAID6;
6772
6773         return BTRFS_RAID_SINGLE; /* BTRFS_BLOCK_GROUP_SINGLE */
6774 }
6775
6776 int get_block_group_index(struct btrfs_block_group_cache *cache)
6777 {
6778         return __get_raid_index(cache->flags);
6779 }
6780
6781 static const char *btrfs_raid_type_names[BTRFS_NR_RAID_TYPES] = {
6782         [BTRFS_RAID_RAID10]     = "raid10",
6783         [BTRFS_RAID_RAID1]      = "raid1",
6784         [BTRFS_RAID_DUP]        = "dup",
6785         [BTRFS_RAID_RAID0]      = "raid0",
6786         [BTRFS_RAID_SINGLE]     = "single",
6787         [BTRFS_RAID_RAID5]      = "raid5",
6788         [BTRFS_RAID_RAID6]      = "raid6",
6789 };
6790
6791 static const char *get_raid_name(enum btrfs_raid_types type)
6792 {
6793         if (type >= BTRFS_NR_RAID_TYPES)
6794                 return NULL;
6795
6796         return btrfs_raid_type_names[type];
6797 }
6798
6799 enum btrfs_loop_type {
6800         LOOP_CACHING_NOWAIT = 0,
6801         LOOP_CACHING_WAIT = 1,
6802         LOOP_ALLOC_CHUNK = 2,
6803         LOOP_NO_EMPTY_SIZE = 3,
6804 };
6805
6806 static inline void
6807 btrfs_lock_block_group(struct btrfs_block_group_cache *cache,
6808                        int delalloc)
6809 {
6810         if (delalloc)
6811                 down_read(&cache->data_rwsem);
6812 }
6813
6814 static inline void
6815 btrfs_grab_block_group(struct btrfs_block_group_cache *cache,
6816                        int delalloc)
6817 {
6818         btrfs_get_block_group(cache);
6819         if (delalloc)
6820                 down_read(&cache->data_rwsem);
6821 }
6822
6823 static struct btrfs_block_group_cache *
6824 btrfs_lock_cluster(struct btrfs_block_group_cache *block_group,
6825                    struct btrfs_free_cluster *cluster,
6826                    int delalloc)
6827 {
6828         struct btrfs_block_group_cache *used_bg;
6829         bool locked = false;
6830 again:
6831         spin_lock(&cluster->refill_lock);
6832         if (locked) {
6833                 if (used_bg == cluster->block_group)
6834                         return used_bg;
6835
6836                 up_read(&used_bg->data_rwsem);
6837                 btrfs_put_block_group(used_bg);
6838         }
6839
6840         used_bg = cluster->block_group;
6841         if (!used_bg)
6842                 return NULL;
6843
6844         if (used_bg == block_group)
6845                 return used_bg;
6846
6847         btrfs_get_block_group(used_bg);
6848
6849         if (!delalloc)
6850                 return used_bg;
6851
6852         if (down_read_trylock(&used_bg->data_rwsem))
6853                 return used_bg;
6854
6855         spin_unlock(&cluster->refill_lock);
6856         down_read(&used_bg->data_rwsem);
6857         locked = true;
6858         goto again;
6859 }
6860
6861 static inline void
6862 btrfs_release_block_group(struct btrfs_block_group_cache *cache,
6863                          int delalloc)
6864 {
6865         if (delalloc)
6866                 up_read(&cache->data_rwsem);
6867         btrfs_put_block_group(cache);
6868 }
6869
6870 /*
6871  * walks the btree of allocated extents and find a hole of a given size.
6872  * The key ins is changed to record the hole:
6873  * ins->objectid == start position
6874  * ins->flags = BTRFS_EXTENT_ITEM_KEY
6875  * ins->offset == the size of the hole.
6876  * Any available blocks before search_start are skipped.
6877  *
6878  * If there is no suitable free space, we will record the max size of
6879  * the free space extent currently.
6880  */
6881 static noinline int find_free_extent(struct btrfs_root *orig_root,
6882                                      u64 num_bytes, u64 empty_size,
6883                                      u64 hint_byte, struct btrfs_key *ins,
6884                                      u64 flags, int delalloc)
6885 {
6886         int ret = 0;
6887         struct btrfs_root *root = orig_root->fs_info->extent_root;
6888         struct btrfs_free_cluster *last_ptr = NULL;
6889         struct btrfs_block_group_cache *block_group = NULL;
6890         u64 search_start = 0;
6891         u64 max_extent_size = 0;
6892         int empty_cluster = 2 * 1024 * 1024;
6893         struct btrfs_space_info *space_info;
6894         int loop = 0;
6895         int index = __get_raid_index(flags);
6896         int alloc_type = (flags & BTRFS_BLOCK_GROUP_DATA) ?
6897                 RESERVE_ALLOC_NO_ACCOUNT : RESERVE_ALLOC;
6898         bool failed_cluster_refill = false;
6899         bool failed_alloc = false;
6900         bool use_cluster = true;
6901         bool have_caching_bg = false;
6902
6903         WARN_ON(num_bytes < root->sectorsize);
6904         ins->type = BTRFS_EXTENT_ITEM_KEY;
6905         ins->objectid = 0;
6906         ins->offset = 0;
6907
6908         trace_find_free_extent(orig_root, num_bytes, empty_size, flags);
6909
6910         space_info = __find_space_info(root->fs_info, flags);
6911         if (!space_info) {
6912                 btrfs_err(root->fs_info, "No space info for %llu", flags);
6913                 return -ENOSPC;
6914         }
6915
6916         /*
6917          * If the space info is for both data and metadata it means we have a
6918          * small filesystem and we can't use the clustering stuff.
6919          */
6920         if (btrfs_mixed_space_info(space_info))
6921                 use_cluster = false;
6922
6923         if (flags & BTRFS_BLOCK_GROUP_METADATA && use_cluster) {
6924                 last_ptr = &root->fs_info->meta_alloc_cluster;
6925                 if (!btrfs_test_opt(root, SSD))
6926                         empty_cluster = 64 * 1024;
6927         }
6928
6929         if ((flags & BTRFS_BLOCK_GROUP_DATA) && use_cluster &&
6930             btrfs_test_opt(root, SSD)) {
6931                 last_ptr = &root->fs_info->data_alloc_cluster;
6932         }
6933
6934         if (last_ptr) {
6935                 spin_lock(&last_ptr->lock);
6936                 if (last_ptr->block_group)
6937                         hint_byte = last_ptr->window_start;
6938                 spin_unlock(&last_ptr->lock);
6939         }
6940
6941         search_start = max(search_start, first_logical_byte(root, 0));
6942         search_start = max(search_start, hint_byte);
6943
6944         if (!last_ptr)
6945                 empty_cluster = 0;
6946
6947         if (search_start == hint_byte) {
6948                 block_group = btrfs_lookup_block_group(root->fs_info,
6949                                                        search_start);
6950                 /*
6951                  * we don't want to use the block group if it doesn't match our
6952                  * allocation bits, or if its not cached.
6953                  *
6954                  * However if we are re-searching with an ideal block group
6955                  * picked out then we don't care that the block group is cached.
6956                  */
6957                 if (block_group && block_group_bits(block_group, flags) &&
6958                     block_group->cached != BTRFS_CACHE_NO) {
6959                         down_read(&space_info->groups_sem);
6960                         if (list_empty(&block_group->list) ||
6961                             block_group->ro) {
6962                                 /*
6963                                  * someone is removing this block group,
6964                                  * we can't jump into the have_block_group
6965                                  * target because our list pointers are not
6966                                  * valid
6967                                  */
6968                                 btrfs_put_block_group(block_group);
6969                                 up_read(&space_info->groups_sem);
6970                         } else {
6971                                 index = get_block_group_index(block_group);
6972                                 btrfs_lock_block_group(block_group, delalloc);
6973                                 goto have_block_group;
6974                         }
6975                 } else if (block_group) {
6976                         btrfs_put_block_group(block_group);
6977                 }
6978         }
6979 search:
6980         have_caching_bg = false;
6981         down_read(&space_info->groups_sem);
6982         list_for_each_entry(block_group, &space_info->block_groups[index],
6983                             list) {
6984                 u64 offset;
6985                 int cached;
6986
6987                 btrfs_grab_block_group(block_group, delalloc);
6988                 search_start = block_group->key.objectid;
6989
6990                 /*
6991                  * this can happen if we end up cycling through all the
6992                  * raid types, but we want to make sure we only allocate
6993                  * for the proper type.
6994                  */
6995                 if (!block_group_bits(block_group, flags)) {
6996                     u64 extra = BTRFS_BLOCK_GROUP_DUP |
6997                                 BTRFS_BLOCK_GROUP_RAID1 |
6998                                 BTRFS_BLOCK_GROUP_RAID5 |
6999                                 BTRFS_BLOCK_GROUP_RAID6 |
7000                                 BTRFS_BLOCK_GROUP_RAID10;
7001
7002                         /*
7003                          * if they asked for extra copies and this block group
7004                          * doesn't provide them, bail.  This does allow us to
7005                          * fill raid0 from raid1.
7006                          */
7007                         if ((flags & extra) && !(block_group->flags & extra))
7008                                 goto loop;
7009                 }
7010
7011 have_block_group:
7012                 cached = block_group_cache_done(block_group);
7013                 if (unlikely(!cached)) {
7014                         ret = cache_block_group(block_group, 0);
7015                         BUG_ON(ret < 0);
7016                         ret = 0;
7017                 }
7018
7019                 if (unlikely(block_group->cached == BTRFS_CACHE_ERROR))
7020                         goto loop;
7021                 if (unlikely(block_group->ro))
7022                         goto loop;
7023
7024                 /*
7025                  * Ok we want to try and use the cluster allocator, so
7026                  * lets look there
7027                  */
7028                 if (last_ptr) {
7029                         struct btrfs_block_group_cache *used_block_group;
7030                         unsigned long aligned_cluster;
7031                         /*
7032                          * the refill lock keeps out other
7033                          * people trying to start a new cluster
7034                          */
7035                         used_block_group = btrfs_lock_cluster(block_group,
7036                                                               last_ptr,
7037                                                               delalloc);
7038                         if (!used_block_group)
7039                                 goto refill_cluster;
7040
7041                         if (used_block_group != block_group &&
7042                             (used_block_group->ro ||
7043                              !block_group_bits(used_block_group, flags)))
7044                                 goto release_cluster;
7045
7046                         offset = btrfs_alloc_from_cluster(used_block_group,
7047                                                 last_ptr,
7048                                                 num_bytes,
7049                                                 used_block_group->key.objectid,
7050                                                 &max_extent_size);
7051                         if (offset) {
7052                                 /* we have a block, we're done */
7053                                 spin_unlock(&last_ptr->refill_lock);
7054                                 trace_btrfs_reserve_extent_cluster(root,
7055                                                 used_block_group,
7056                                                 search_start, num_bytes);
7057                                 if (used_block_group != block_group) {
7058                                         btrfs_release_block_group(block_group,
7059                                                                   delalloc);
7060                                         block_group = used_block_group;
7061                                 }
7062                                 goto checks;
7063                         }
7064
7065                         WARN_ON(last_ptr->block_group != used_block_group);
7066 release_cluster:
7067                         /* If we are on LOOP_NO_EMPTY_SIZE, we can't
7068                          * set up a new clusters, so lets just skip it
7069                          * and let the allocator find whatever block
7070                          * it can find.  If we reach this point, we
7071                          * will have tried the cluster allocator
7072                          * plenty of times and not have found
7073                          * anything, so we are likely way too
7074                          * fragmented for the clustering stuff to find
7075                          * anything.
7076                          *
7077                          * However, if the cluster is taken from the
7078                          * current block group, release the cluster
7079                          * first, so that we stand a better chance of
7080                          * succeeding in the unclustered
7081                          * allocation.  */
7082                         if (loop >= LOOP_NO_EMPTY_SIZE &&
7083                             used_block_group != block_group) {
7084                                 spin_unlock(&last_ptr->refill_lock);
7085                                 btrfs_release_block_group(used_block_group,
7086                                                           delalloc);
7087                                 goto unclustered_alloc;
7088                         }
7089
7090                         /*
7091                          * this cluster didn't work out, free it and
7092                          * start over
7093                          */
7094                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
7095
7096                         if (used_block_group != block_group)
7097                                 btrfs_release_block_group(used_block_group,
7098                                                           delalloc);
7099 refill_cluster:
7100                         if (loop >= LOOP_NO_EMPTY_SIZE) {
7101                                 spin_unlock(&last_ptr->refill_lock);
7102                                 goto unclustered_alloc;
7103                         }
7104
7105                         aligned_cluster = max_t(unsigned long,
7106                                                 empty_cluster + empty_size,
7107                                               block_group->full_stripe_len);
7108
7109                         /* allocate a cluster in this block group */
7110                         ret = btrfs_find_space_cluster(root, block_group,
7111                                                        last_ptr, search_start,
7112                                                        num_bytes,
7113                                                        aligned_cluster);
7114                         if (ret == 0) {
7115                                 /*
7116                                  * now pull our allocation out of this
7117                                  * cluster
7118                                  */
7119                                 offset = btrfs_alloc_from_cluster(block_group,
7120                                                         last_ptr,
7121                                                         num_bytes,
7122                                                         search_start,
7123                                                         &max_extent_size);
7124                                 if (offset) {
7125                                         /* we found one, proceed */
7126                                         spin_unlock(&last_ptr->refill_lock);
7127                                         trace_btrfs_reserve_extent_cluster(root,
7128                                                 block_group, search_start,
7129                                                 num_bytes);
7130                                         goto checks;
7131                                 }
7132                         } else if (!cached && loop > LOOP_CACHING_NOWAIT
7133                                    && !failed_cluster_refill) {
7134                                 spin_unlock(&last_ptr->refill_lock);
7135
7136                                 failed_cluster_refill = true;
7137                                 wait_block_group_cache_progress(block_group,
7138                                        num_bytes + empty_cluster + empty_size);
7139                                 goto have_block_group;
7140                         }
7141
7142                         /*
7143                          * at this point we either didn't find a cluster
7144                          * or we weren't able to allocate a block from our
7145                          * cluster.  Free the cluster we've been trying
7146                          * to use, and go to the next block group
7147                          */
7148                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
7149                         spin_unlock(&last_ptr->refill_lock);
7150                         goto loop;
7151                 }
7152
7153 unclustered_alloc:
7154                 spin_lock(&block_group->free_space_ctl->tree_lock);
7155                 if (cached &&
7156                     block_group->free_space_ctl->free_space <
7157                     num_bytes + empty_cluster + empty_size) {
7158                         if (block_group->free_space_ctl->free_space >
7159                             max_extent_size)
7160                                 max_extent_size =
7161                                         block_group->free_space_ctl->free_space;
7162                         spin_unlock(&block_group->free_space_ctl->tree_lock);
7163                         goto loop;
7164                 }
7165                 spin_unlock(&block_group->free_space_ctl->tree_lock);
7166
7167                 offset = btrfs_find_space_for_alloc(block_group, search_start,
7168                                                     num_bytes, empty_size,
7169                                                     &max_extent_size);
7170                 /*
7171                  * If we didn't find a chunk, and we haven't failed on this
7172                  * block group before, and this block group is in the middle of
7173                  * caching and we are ok with waiting, then go ahead and wait
7174                  * for progress to be made, and set failed_alloc to true.
7175                  *
7176                  * If failed_alloc is true then we've already waited on this
7177                  * block group once and should move on to the next block group.
7178                  */
7179                 if (!offset && !failed_alloc && !cached &&
7180                     loop > LOOP_CACHING_NOWAIT) {
7181                         wait_block_group_cache_progress(block_group,
7182                                                 num_bytes + empty_size);
7183                         failed_alloc = true;
7184                         goto have_block_group;
7185                 } else if (!offset) {
7186                         if (!cached)
7187                                 have_caching_bg = true;
7188                         goto loop;
7189                 }
7190 checks:
7191                 search_start = ALIGN(offset, root->stripesize);
7192
7193                 /* move on to the next group */
7194                 if (search_start + num_bytes >
7195                     block_group->key.objectid + block_group->key.offset) {
7196                         btrfs_add_free_space(block_group, offset, num_bytes);
7197                         goto loop;
7198                 }
7199
7200                 if (offset < search_start)
7201                         btrfs_add_free_space(block_group, offset,
7202                                              search_start - offset);
7203                 BUG_ON(offset > search_start);
7204
7205                 ret = btrfs_update_reserved_bytes(block_group, num_bytes,
7206                                                   alloc_type, delalloc);
7207                 if (ret == -EAGAIN) {
7208                         btrfs_add_free_space(block_group, offset, num_bytes);
7209                         goto loop;
7210                 }
7211
7212                 /* we are all good, lets return */
7213                 ins->objectid = search_start;
7214                 ins->offset = num_bytes;
7215
7216                 trace_btrfs_reserve_extent(orig_root, block_group,
7217                                            search_start, num_bytes);
7218                 btrfs_release_block_group(block_group, delalloc);
7219                 break;
7220 loop:
7221                 failed_cluster_refill = false;
7222                 failed_alloc = false;
7223                 BUG_ON(index != get_block_group_index(block_group));
7224                 btrfs_release_block_group(block_group, delalloc);
7225         }
7226         up_read(&space_info->groups_sem);
7227
7228         if (!ins->objectid && loop >= LOOP_CACHING_WAIT && have_caching_bg)
7229                 goto search;
7230
7231         if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES)
7232                 goto search;
7233
7234         /*
7235          * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
7236          *                      caching kthreads as we move along
7237          * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
7238          * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
7239          * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
7240          *                      again
7241          */
7242         if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) {
7243                 index = 0;
7244                 loop++;
7245                 if (loop == LOOP_ALLOC_CHUNK) {
7246                         struct btrfs_trans_handle *trans;
7247                         int exist = 0;
7248
7249                         trans = current->journal_info;
7250                         if (trans)
7251                                 exist = 1;
7252                         else
7253                                 trans = btrfs_join_transaction(root);
7254
7255                         if (IS_ERR(trans)) {
7256                                 ret = PTR_ERR(trans);
7257                                 goto out;
7258                         }
7259
7260                         ret = do_chunk_alloc(trans, root, flags,
7261                                              CHUNK_ALLOC_FORCE);
7262                         /*
7263                          * Do not bail out on ENOSPC since we
7264                          * can do more things.
7265                          */
7266                         if (ret < 0 && ret != -ENOSPC)
7267                                 btrfs_abort_transaction(trans,
7268                                                         root, ret);
7269                         else
7270                                 ret = 0;
7271                         if (!exist)
7272                                 btrfs_end_transaction(trans, root);
7273                         if (ret)
7274                                 goto out;
7275                 }
7276
7277                 if (loop == LOOP_NO_EMPTY_SIZE) {
7278                         empty_size = 0;
7279                         empty_cluster = 0;
7280                 }
7281
7282                 goto search;
7283         } else if (!ins->objectid) {
7284                 ret = -ENOSPC;
7285         } else if (ins->objectid) {
7286                 ret = 0;
7287         }
7288 out:
7289         if (ret == -ENOSPC)
7290                 ins->offset = max_extent_size;
7291         return ret;
7292 }
7293
7294 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
7295                             int dump_block_groups)
7296 {
7297         struct btrfs_block_group_cache *cache;
7298         int index = 0;
7299
7300         spin_lock(&info->lock);
7301         printk(KERN_INFO "BTRFS: space_info %llu has %llu free, is %sfull\n",
7302                info->flags,
7303                info->total_bytes - info->bytes_used - info->bytes_pinned -
7304                info->bytes_reserved - info->bytes_readonly,
7305                (info->full) ? "" : "not ");
7306         printk(KERN_INFO "BTRFS: space_info total=%llu, used=%llu, pinned=%llu, "
7307                "reserved=%llu, may_use=%llu, readonly=%llu\n",
7308                info->total_bytes, info->bytes_used, info->bytes_pinned,
7309                info->bytes_reserved, info->bytes_may_use,
7310                info->bytes_readonly);
7311         spin_unlock(&info->lock);
7312
7313         if (!dump_block_groups)
7314                 return;
7315
7316         down_read(&info->groups_sem);
7317 again:
7318         list_for_each_entry(cache, &info->block_groups[index], list) {
7319                 spin_lock(&cache->lock);
7320                 printk(KERN_INFO "BTRFS: "
7321                            "block group %llu has %llu bytes, "
7322                            "%llu used %llu pinned %llu reserved %s\n",
7323                        cache->key.objectid, cache->key.offset,
7324                        btrfs_block_group_used(&cache->item), cache->pinned,
7325                        cache->reserved, cache->ro ? "[readonly]" : "");
7326                 btrfs_dump_free_space(cache, bytes);
7327                 spin_unlock(&cache->lock);
7328         }
7329         if (++index < BTRFS_NR_RAID_TYPES)
7330                 goto again;
7331         up_read(&info->groups_sem);
7332 }
7333
7334 int btrfs_reserve_extent(struct btrfs_root *root,
7335                          u64 num_bytes, u64 min_alloc_size,
7336                          u64 empty_size, u64 hint_byte,
7337                          struct btrfs_key *ins, int is_data, int delalloc)
7338 {
7339         bool final_tried = false;
7340         u64 flags;
7341         int ret;
7342
7343         flags = btrfs_get_alloc_profile(root, is_data);
7344 again:
7345         WARN_ON(num_bytes < root->sectorsize);
7346         ret = find_free_extent(root, num_bytes, empty_size, hint_byte, ins,
7347                                flags, delalloc);
7348
7349         if (ret == -ENOSPC) {
7350                 if (!final_tried && ins->offset) {
7351                         num_bytes = min(num_bytes >> 1, ins->offset);
7352                         num_bytes = round_down(num_bytes, root->sectorsize);
7353                         num_bytes = max(num_bytes, min_alloc_size);
7354                         if (num_bytes == min_alloc_size)
7355                                 final_tried = true;
7356                         goto again;
7357                 } else if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
7358                         struct btrfs_space_info *sinfo;
7359
7360                         sinfo = __find_space_info(root->fs_info, flags);
7361                         btrfs_err(root->fs_info, "allocation failed flags %llu, wanted %llu",
7362                                 flags, num_bytes);
7363                         if (sinfo)
7364                                 dump_space_info(sinfo, num_bytes, 1);
7365                 }
7366         }
7367
7368         return ret;
7369 }
7370
7371 static int __btrfs_free_reserved_extent(struct btrfs_root *root,
7372                                         u64 start, u64 len,
7373                                         int pin, int delalloc)
7374 {
7375         struct btrfs_block_group_cache *cache;
7376         int ret = 0;
7377
7378         cache = btrfs_lookup_block_group(root->fs_info, start);
7379         if (!cache) {
7380                 btrfs_err(root->fs_info, "Unable to find block group for %llu",
7381                         start);
7382                 return -ENOSPC;
7383         }
7384
7385         if (pin)
7386                 pin_down_extent(root, cache, start, len, 1);
7387         else {
7388                 if (btrfs_test_opt(root, DISCARD))
7389                         ret = btrfs_discard_extent(root, start, len, NULL);
7390                 btrfs_add_free_space(cache, start, len);
7391                 btrfs_update_reserved_bytes(cache, len, RESERVE_FREE, delalloc);
7392         }
7393
7394         btrfs_put_block_group(cache);
7395
7396         trace_btrfs_reserved_extent_free(root, start, len);
7397
7398         return ret;
7399 }
7400
7401 int btrfs_free_reserved_extent(struct btrfs_root *root,
7402                                u64 start, u64 len, int delalloc)
7403 {
7404         return __btrfs_free_reserved_extent(root, start, len, 0, delalloc);
7405 }
7406
7407 int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root,
7408                                        u64 start, u64 len)
7409 {
7410         return __btrfs_free_reserved_extent(root, start, len, 1, 0);
7411 }
7412
7413 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
7414                                       struct btrfs_root *root,
7415                                       u64 parent, u64 root_objectid,
7416                                       u64 flags, u64 owner, u64 offset,
7417                                       struct btrfs_key *ins, int ref_mod)
7418 {
7419         int ret;
7420         struct btrfs_fs_info *fs_info = root->fs_info;
7421         struct btrfs_extent_item *extent_item;
7422         struct btrfs_extent_inline_ref *iref;
7423         struct btrfs_path *path;
7424         struct extent_buffer *leaf;
7425         int type;
7426         u32 size;
7427
7428         if (parent > 0)
7429                 type = BTRFS_SHARED_DATA_REF_KEY;
7430         else
7431                 type = BTRFS_EXTENT_DATA_REF_KEY;
7432
7433         size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
7434
7435         path = btrfs_alloc_path();
7436         if (!path)
7437                 return -ENOMEM;
7438
7439         path->leave_spinning = 1;
7440         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
7441                                       ins, size);
7442         if (ret) {
7443                 btrfs_free_path(path);
7444                 return ret;
7445         }
7446
7447         leaf = path->nodes[0];
7448         extent_item = btrfs_item_ptr(leaf, path->slots[0],
7449                                      struct btrfs_extent_item);
7450         btrfs_set_extent_refs(leaf, extent_item, ref_mod);
7451         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
7452         btrfs_set_extent_flags(leaf, extent_item,
7453                                flags | BTRFS_EXTENT_FLAG_DATA);
7454
7455         iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
7456         btrfs_set_extent_inline_ref_type(leaf, iref, type);
7457         if (parent > 0) {
7458                 struct btrfs_shared_data_ref *ref;
7459                 ref = (struct btrfs_shared_data_ref *)(iref + 1);
7460                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
7461                 btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
7462         } else {
7463                 struct btrfs_extent_data_ref *ref;
7464                 ref = (struct btrfs_extent_data_ref *)(&iref->offset);
7465                 btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
7466                 btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
7467                 btrfs_set_extent_data_ref_offset(leaf, ref, offset);
7468                 btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
7469         }
7470
7471         btrfs_mark_buffer_dirty(path->nodes[0]);
7472         btrfs_free_path(path);
7473
7474         ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
7475         if (ret) { /* -ENOENT, logic error */
7476                 btrfs_err(fs_info, "update block group failed for %llu %llu",
7477                         ins->objectid, ins->offset);
7478                 BUG();
7479         }
7480         trace_btrfs_reserved_extent_alloc(root, ins->objectid, ins->offset);
7481         return ret;
7482 }
7483
7484 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
7485                                      struct btrfs_root *root,
7486                                      u64 parent, u64 root_objectid,
7487                                      u64 flags, struct btrfs_disk_key *key,
7488                                      int level, struct btrfs_key *ins,
7489                                      int no_quota)
7490 {
7491         int ret;
7492         struct btrfs_fs_info *fs_info = root->fs_info;
7493         struct btrfs_extent_item *extent_item;
7494         struct btrfs_tree_block_info *block_info;
7495         struct btrfs_extent_inline_ref *iref;
7496         struct btrfs_path *path;
7497         struct extent_buffer *leaf;
7498         u32 size = sizeof(*extent_item) + sizeof(*iref);
7499         u64 num_bytes = ins->offset;
7500         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
7501                                                  SKINNY_METADATA);
7502
7503         if (!skinny_metadata)
7504                 size += sizeof(*block_info);
7505
7506         path = btrfs_alloc_path();
7507         if (!path) {
7508                 btrfs_free_and_pin_reserved_extent(root, ins->objectid,
7509                                                    root->nodesize);
7510                 return -ENOMEM;
7511         }
7512
7513         path->leave_spinning = 1;
7514         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
7515                                       ins, size);
7516         if (ret) {
7517                 btrfs_free_path(path);
7518                 btrfs_free_and_pin_reserved_extent(root, ins->objectid,
7519                                                    root->nodesize);
7520                 return ret;
7521         }
7522
7523         leaf = path->nodes[0];
7524         extent_item = btrfs_item_ptr(leaf, path->slots[0],
7525                                      struct btrfs_extent_item);
7526         btrfs_set_extent_refs(leaf, extent_item, 1);
7527         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
7528         btrfs_set_extent_flags(leaf, extent_item,
7529                                flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
7530
7531         if (skinny_metadata) {
7532                 iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
7533                 num_bytes = root->nodesize;
7534         } else {
7535                 block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
7536                 btrfs_set_tree_block_key(leaf, block_info, key);
7537                 btrfs_set_tree_block_level(leaf, block_info, level);
7538                 iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
7539         }
7540
7541         if (parent > 0) {
7542                 BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
7543                 btrfs_set_extent_inline_ref_type(leaf, iref,
7544                                                  BTRFS_SHARED_BLOCK_REF_KEY);
7545                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
7546         } else {
7547                 btrfs_set_extent_inline_ref_type(leaf, iref,
7548                                                  BTRFS_TREE_BLOCK_REF_KEY);
7549                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
7550         }
7551
7552         btrfs_mark_buffer_dirty(leaf);
7553         btrfs_free_path(path);
7554
7555         ret = update_block_group(trans, root, ins->objectid, root->nodesize,
7556                                  1);
7557         if (ret) { /* -ENOENT, logic error */
7558                 btrfs_err(fs_info, "update block group failed for %llu %llu",
7559                         ins->objectid, ins->offset);
7560                 BUG();
7561         }
7562
7563         trace_btrfs_reserved_extent_alloc(root, ins->objectid, root->nodesize);
7564         return ret;
7565 }
7566
7567 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
7568                                      struct btrfs_root *root,
7569                                      u64 root_objectid, u64 owner,
7570                                      u64 offset, struct btrfs_key *ins)
7571 {
7572         int ret;
7573
7574         BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID);
7575
7576         ret = btrfs_add_delayed_data_ref(root->fs_info, trans, ins->objectid,
7577                                          ins->offset, 0,
7578                                          root_objectid, owner, offset,
7579                                          BTRFS_ADD_DELAYED_EXTENT, NULL, 0);
7580         return ret;
7581 }
7582
7583 /*
7584  * this is used by the tree logging recovery code.  It records that
7585  * an extent has been allocated and makes sure to clear the free
7586  * space cache bits as well
7587  */
7588 int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
7589                                    struct btrfs_root *root,
7590                                    u64 root_objectid, u64 owner, u64 offset,
7591                                    struct btrfs_key *ins)
7592 {
7593         int ret;
7594         struct btrfs_block_group_cache *block_group;
7595
7596         /*
7597          * Mixed block groups will exclude before processing the log so we only
7598          * need to do the exlude dance if this fs isn't mixed.
7599          */
7600         if (!btrfs_fs_incompat(root->fs_info, MIXED_GROUPS)) {
7601                 ret = __exclude_logged_extent(root, ins->objectid, ins->offset);
7602                 if (ret)
7603                         return ret;
7604         }
7605
7606         block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
7607         if (!block_group)
7608                 return -EINVAL;
7609
7610         ret = btrfs_update_reserved_bytes(block_group, ins->offset,
7611                                           RESERVE_ALLOC_NO_ACCOUNT, 0);
7612         BUG_ON(ret); /* logic error */
7613         ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
7614                                          0, owner, offset, ins, 1);
7615         btrfs_put_block_group(block_group);
7616         return ret;
7617 }
7618
7619 static struct extent_buffer *
7620 btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
7621                       u64 bytenr, int level)
7622 {
7623         struct extent_buffer *buf;
7624
7625         buf = btrfs_find_create_tree_block(root, bytenr);
7626         if (!buf)
7627                 return ERR_PTR(-ENOMEM);
7628         btrfs_set_header_generation(buf, trans->transid);
7629         btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
7630         btrfs_tree_lock(buf);
7631         clean_tree_block(trans, root->fs_info, buf);
7632         clear_bit(EXTENT_BUFFER_STALE, &buf->bflags);
7633
7634         btrfs_set_lock_blocking(buf);
7635         btrfs_set_buffer_uptodate(buf);
7636
7637         if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
7638                 buf->log_index = root->log_transid % 2;
7639                 /*
7640                  * we allow two log transactions at a time, use different
7641                  * EXENT bit to differentiate dirty pages.
7642                  */
7643                 if (buf->log_index == 0)
7644                         set_extent_dirty(&root->dirty_log_pages, buf->start,
7645                                         buf->start + buf->len - 1, GFP_NOFS);
7646                 else
7647                         set_extent_new(&root->dirty_log_pages, buf->start,
7648                                         buf->start + buf->len - 1, GFP_NOFS);
7649         } else {
7650                 buf->log_index = -1;
7651                 set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
7652                          buf->start + buf->len - 1, GFP_NOFS);
7653         }
7654         trans->blocks_used++;
7655         /* this returns a buffer locked for blocking */
7656         return buf;
7657 }
7658
7659 static struct btrfs_block_rsv *
7660 use_block_rsv(struct btrfs_trans_handle *trans,
7661               struct btrfs_root *root, u32 blocksize)
7662 {
7663         struct btrfs_block_rsv *block_rsv;
7664         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
7665         int ret;
7666         bool global_updated = false;
7667
7668         block_rsv = get_block_rsv(trans, root);
7669
7670         if (unlikely(block_rsv->size == 0))
7671                 goto try_reserve;
7672 again:
7673         ret = block_rsv_use_bytes(block_rsv, blocksize);
7674         if (!ret)
7675                 return block_rsv;
7676
7677         if (block_rsv->failfast)
7678                 return ERR_PTR(ret);
7679
7680         if (block_rsv->type == BTRFS_BLOCK_RSV_GLOBAL && !global_updated) {
7681                 global_updated = true;
7682                 update_global_block_rsv(root->fs_info);
7683                 goto again;
7684         }
7685
7686         if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
7687                 static DEFINE_RATELIMIT_STATE(_rs,
7688                                 DEFAULT_RATELIMIT_INTERVAL * 10,
7689                                 /*DEFAULT_RATELIMIT_BURST*/ 1);
7690                 if (__ratelimit(&_rs))
7691                         WARN(1, KERN_DEBUG
7692                                 "BTRFS: block rsv returned %d\n", ret);
7693         }
7694 try_reserve:
7695         ret = reserve_metadata_bytes(root, block_rsv, blocksize,
7696                                      BTRFS_RESERVE_NO_FLUSH);
7697         if (!ret)
7698                 return block_rsv;
7699         /*
7700          * If we couldn't reserve metadata bytes try and use some from
7701          * the global reserve if its space type is the same as the global
7702          * reservation.
7703          */
7704         if (block_rsv->type != BTRFS_BLOCK_RSV_GLOBAL &&
7705             block_rsv->space_info == global_rsv->space_info) {
7706                 ret = block_rsv_use_bytes(global_rsv, blocksize);
7707                 if (!ret)
7708                         return global_rsv;
7709         }
7710         return ERR_PTR(ret);
7711 }
7712
7713 static void unuse_block_rsv(struct btrfs_fs_info *fs_info,
7714                             struct btrfs_block_rsv *block_rsv, u32 blocksize)
7715 {
7716         block_rsv_add_bytes(block_rsv, blocksize, 0);
7717         block_rsv_release_bytes(fs_info, block_rsv, NULL, 0);
7718 }
7719
7720 /*
7721  * finds a free extent and does all the dirty work required for allocation
7722  * returns the tree buffer or an ERR_PTR on error.
7723  */
7724 struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
7725                                         struct btrfs_root *root,
7726                                         u64 parent, u64 root_objectid,
7727                                         struct btrfs_disk_key *key, int level,
7728                                         u64 hint, u64 empty_size)
7729 {
7730         struct btrfs_key ins;
7731         struct btrfs_block_rsv *block_rsv;
7732         struct extent_buffer *buf;
7733         struct btrfs_delayed_extent_op *extent_op;
7734         u64 flags = 0;
7735         int ret;
7736         u32 blocksize = root->nodesize;
7737         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
7738                                                  SKINNY_METADATA);
7739
7740         if (btrfs_test_is_dummy_root(root)) {
7741                 buf = btrfs_init_new_buffer(trans, root, root->alloc_bytenr,
7742                                             level);
7743                 if (!IS_ERR(buf))
7744                         root->alloc_bytenr += blocksize;
7745                 return buf;
7746         }
7747
7748         block_rsv = use_block_rsv(trans, root, blocksize);
7749         if (IS_ERR(block_rsv))
7750                 return ERR_CAST(block_rsv);
7751
7752         ret = btrfs_reserve_extent(root, blocksize, blocksize,
7753                                    empty_size, hint, &ins, 0, 0);
7754         if (ret)
7755                 goto out_unuse;
7756
7757         buf = btrfs_init_new_buffer(trans, root, ins.objectid, level);
7758         if (IS_ERR(buf)) {
7759                 ret = PTR_ERR(buf);
7760                 goto out_free_reserved;
7761         }
7762
7763         if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
7764                 if (parent == 0)
7765                         parent = ins.objectid;
7766                 flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
7767         } else
7768                 BUG_ON(parent > 0);
7769
7770         if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
7771                 extent_op = btrfs_alloc_delayed_extent_op();
7772                 if (!extent_op) {
7773                         ret = -ENOMEM;
7774                         goto out_free_buf;
7775                 }
7776                 if (key)
7777                         memcpy(&extent_op->key, key, sizeof(extent_op->key));
7778                 else
7779                         memset(&extent_op->key, 0, sizeof(extent_op->key));
7780                 extent_op->flags_to_set = flags;
7781                 if (skinny_metadata)
7782                         extent_op->update_key = 0;
7783                 else
7784                         extent_op->update_key = 1;
7785                 extent_op->update_flags = 1;
7786                 extent_op->is_data = 0;
7787                 extent_op->level = level;
7788
7789                 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
7790                                                  ins.objectid, ins.offset,
7791                                                  parent, root_objectid, level,
7792                                                  BTRFS_ADD_DELAYED_EXTENT,
7793                                                  extent_op, 0);
7794                 if (ret)
7795                         goto out_free_delayed;
7796         }
7797         return buf;
7798
7799 out_free_delayed:
7800         btrfs_free_delayed_extent_op(extent_op);
7801 out_free_buf:
7802         free_extent_buffer(buf);
7803 out_free_reserved:
7804         btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 0);
7805 out_unuse:
7806         unuse_block_rsv(root->fs_info, block_rsv, blocksize);
7807         return ERR_PTR(ret);
7808 }
7809
7810 struct walk_control {
7811         u64 refs[BTRFS_MAX_LEVEL];
7812         u64 flags[BTRFS_MAX_LEVEL];
7813         struct btrfs_key update_progress;
7814         int stage;
7815         int level;
7816         int shared_level;
7817         int update_ref;
7818         int keep_locks;
7819         int reada_slot;
7820         int reada_count;
7821         int for_reloc;
7822 };
7823
7824 #define DROP_REFERENCE  1
7825 #define UPDATE_BACKREF  2
7826
7827 static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
7828                                      struct btrfs_root *root,
7829                                      struct walk_control *wc,
7830                                      struct btrfs_path *path)
7831 {
7832         u64 bytenr;
7833         u64 generation;
7834         u64 refs;
7835         u64 flags;
7836         u32 nritems;
7837         u32 blocksize;
7838         struct btrfs_key key;
7839         struct extent_buffer *eb;
7840         int ret;
7841         int slot;
7842         int nread = 0;
7843
7844         if (path->slots[wc->level] < wc->reada_slot) {
7845                 wc->reada_count = wc->reada_count * 2 / 3;
7846                 wc->reada_count = max(wc->reada_count, 2);
7847         } else {
7848                 wc->reada_count = wc->reada_count * 3 / 2;
7849                 wc->reada_count = min_t(int, wc->reada_count,
7850                                         BTRFS_NODEPTRS_PER_BLOCK(root));
7851         }
7852
7853         eb = path->nodes[wc->level];
7854         nritems = btrfs_header_nritems(eb);
7855         blocksize = root->nodesize;
7856
7857         for (slot = path->slots[wc->level]; slot < nritems; slot++) {
7858                 if (nread >= wc->reada_count)
7859                         break;
7860
7861                 cond_resched();
7862                 bytenr = btrfs_node_blockptr(eb, slot);
7863                 generation = btrfs_node_ptr_generation(eb, slot);
7864
7865                 if (slot == path->slots[wc->level])
7866                         goto reada;
7867
7868                 if (wc->stage == UPDATE_BACKREF &&
7869                     generation <= root->root_key.offset)
7870                         continue;
7871
7872                 /* We don't lock the tree block, it's OK to be racy here */
7873                 ret = btrfs_lookup_extent_info(trans, root, bytenr,
7874                                                wc->level - 1, 1, &refs,
7875                                                &flags);
7876                 /* We don't care about errors in readahead. */
7877                 if (ret < 0)
7878                         continue;
7879                 BUG_ON(refs == 0);
7880
7881                 if (wc->stage == DROP_REFERENCE) {
7882                         if (refs == 1)
7883                                 goto reada;
7884
7885                         if (wc->level == 1 &&
7886                             (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
7887                                 continue;
7888                         if (!wc->update_ref ||
7889                             generation <= root->root_key.offset)
7890                                 continue;
7891                         btrfs_node_key_to_cpu(eb, &key, slot);
7892                         ret = btrfs_comp_cpu_keys(&key,
7893                                                   &wc->update_progress);
7894                         if (ret < 0)
7895                                 continue;
7896                 } else {
7897                         if (wc->level == 1 &&
7898                             (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
7899                                 continue;
7900                 }
7901 reada:
7902                 readahead_tree_block(root, bytenr);
7903                 nread++;
7904         }
7905         wc->reada_slot = slot;
7906 }
7907
7908 /*
7909  * TODO: Modify related function to add related node/leaf to dirty_extent_root,
7910  * for later qgroup accounting.
7911  *
7912  * Current, this function does nothing.
7913  */
7914 static int account_leaf_items(struct btrfs_trans_handle *trans,
7915                               struct btrfs_root *root,
7916                               struct extent_buffer *eb)
7917 {
7918         int nr = btrfs_header_nritems(eb);
7919         int i, extent_type;
7920         struct btrfs_key key;
7921         struct btrfs_file_extent_item *fi;
7922         u64 bytenr, num_bytes;
7923
7924         for (i = 0; i < nr; i++) {
7925                 btrfs_item_key_to_cpu(eb, &key, i);
7926
7927                 if (key.type != BTRFS_EXTENT_DATA_KEY)
7928                         continue;
7929
7930                 fi = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
7931                 /* filter out non qgroup-accountable extents  */
7932                 extent_type = btrfs_file_extent_type(eb, fi);
7933
7934                 if (extent_type == BTRFS_FILE_EXTENT_INLINE)
7935                         continue;
7936
7937                 bytenr = btrfs_file_extent_disk_bytenr(eb, fi);
7938                 if (!bytenr)
7939                         continue;
7940
7941                 num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi);
7942         }
7943         return 0;
7944 }
7945
7946 /*
7947  * Walk up the tree from the bottom, freeing leaves and any interior
7948  * nodes which have had all slots visited. If a node (leaf or
7949  * interior) is freed, the node above it will have it's slot
7950  * incremented. The root node will never be freed.
7951  *
7952  * At the end of this function, we should have a path which has all
7953  * slots incremented to the next position for a search. If we need to
7954  * read a new node it will be NULL and the node above it will have the
7955  * correct slot selected for a later read.
7956  *
7957  * If we increment the root nodes slot counter past the number of
7958  * elements, 1 is returned to signal completion of the search.
7959  */
7960 static int adjust_slots_upwards(struct btrfs_root *root,
7961                                 struct btrfs_path *path, int root_level)
7962 {
7963         int level = 0;
7964         int nr, slot;
7965         struct extent_buffer *eb;
7966
7967         if (root_level == 0)
7968                 return 1;
7969
7970         while (level <= root_level) {
7971                 eb = path->nodes[level];
7972                 nr = btrfs_header_nritems(eb);
7973                 path->slots[level]++;
7974                 slot = path->slots[level];
7975                 if (slot >= nr || level == 0) {
7976                         /*
7977                          * Don't free the root -  we will detect this
7978                          * condition after our loop and return a
7979                          * positive value for caller to stop walking the tree.
7980                          */
7981                         if (level != root_level) {
7982                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
7983                                 path->locks[level] = 0;
7984
7985                                 free_extent_buffer(eb);
7986                                 path->nodes[level] = NULL;
7987                                 path->slots[level] = 0;
7988                         }
7989                 } else {
7990                         /*
7991                          * We have a valid slot to walk back down
7992                          * from. Stop here so caller can process these
7993                          * new nodes.
7994                          */
7995                         break;
7996                 }
7997
7998                 level++;
7999         }
8000
8001         eb = path->nodes[root_level];
8002         if (path->slots[root_level] >= btrfs_header_nritems(eb))
8003                 return 1;
8004
8005         return 0;
8006 }
8007
8008 /*
8009  * root_eb is the subtree root and is locked before this function is called.
8010  * TODO: Modify this function to mark all (including complete shared node)
8011  * to dirty_extent_root to allow it get accounted in qgroup.
8012  */
8013 static int account_shared_subtree(struct btrfs_trans_handle *trans,
8014                                   struct btrfs_root *root,
8015                                   struct extent_buffer *root_eb,
8016                                   u64 root_gen,
8017                                   int root_level)
8018 {
8019         int ret = 0;
8020         int level;
8021         struct extent_buffer *eb = root_eb;
8022         struct btrfs_path *path = NULL;
8023
8024         BUG_ON(root_level < 0 || root_level > BTRFS_MAX_LEVEL);
8025         BUG_ON(root_eb == NULL);
8026
8027         if (!root->fs_info->quota_enabled)
8028                 return 0;
8029
8030         if (!extent_buffer_uptodate(root_eb)) {
8031                 ret = btrfs_read_buffer(root_eb, root_gen);
8032                 if (ret)
8033                         goto out;
8034         }
8035
8036         if (root_level == 0) {
8037                 ret = account_leaf_items(trans, root, root_eb);
8038                 goto out;
8039         }
8040
8041         path = btrfs_alloc_path();
8042         if (!path)
8043                 return -ENOMEM;
8044
8045         /*
8046          * Walk down the tree.  Missing extent blocks are filled in as
8047          * we go. Metadata is accounted every time we read a new
8048          * extent block.
8049          *
8050          * When we reach a leaf, we account for file extent items in it,
8051          * walk back up the tree (adjusting slot pointers as we go)
8052          * and restart the search process.
8053          */
8054         extent_buffer_get(root_eb); /* For path */
8055         path->nodes[root_level] = root_eb;
8056         path->slots[root_level] = 0;
8057         path->locks[root_level] = 0; /* so release_path doesn't try to unlock */
8058 walk_down:
8059         level = root_level;
8060         while (level >= 0) {
8061                 if (path->nodes[level] == NULL) {
8062                         int parent_slot;
8063                         u64 child_gen;
8064                         u64 child_bytenr;
8065
8066                         /* We need to get child blockptr/gen from
8067                          * parent before we can read it. */
8068                         eb = path->nodes[level + 1];
8069                         parent_slot = path->slots[level + 1];
8070                         child_bytenr = btrfs_node_blockptr(eb, parent_slot);
8071                         child_gen = btrfs_node_ptr_generation(eb, parent_slot);
8072
8073                         eb = read_tree_block(root, child_bytenr, child_gen);
8074                         if (IS_ERR(eb)) {
8075                                 ret = PTR_ERR(eb);
8076                                 goto out;
8077                         } else if (!extent_buffer_uptodate(eb)) {
8078                                 free_extent_buffer(eb);
8079                                 ret = -EIO;
8080                                 goto out;
8081                         }
8082
8083                         path->nodes[level] = eb;
8084                         path->slots[level] = 0;
8085
8086                         btrfs_tree_read_lock(eb);
8087                         btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
8088                         path->locks[level] = BTRFS_READ_LOCK_BLOCKING;
8089                 }
8090
8091                 if (level == 0) {
8092                         ret = account_leaf_items(trans, root, path->nodes[level]);
8093                         if (ret)
8094                                 goto out;
8095
8096                         /* Nonzero return here means we completed our search */
8097                         ret = adjust_slots_upwards(root, path, root_level);
8098                         if (ret)
8099                                 break;
8100
8101                         /* Restart search with new slots */
8102                         goto walk_down;
8103                 }
8104
8105                 level--;
8106         }
8107
8108         ret = 0;
8109 out:
8110         btrfs_free_path(path);
8111
8112         return ret;
8113 }
8114
8115 /*
8116  * helper to process tree block while walking down the tree.
8117  *
8118  * when wc->stage == UPDATE_BACKREF, this function updates
8119  * back refs for pointers in the block.
8120  *
8121  * NOTE: return value 1 means we should stop walking down.
8122  */
8123 static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
8124                                    struct btrfs_root *root,
8125                                    struct btrfs_path *path,
8126                                    struct walk_control *wc, int lookup_info)
8127 {
8128         int level = wc->level;
8129         struct extent_buffer *eb = path->nodes[level];
8130         u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
8131         int ret;
8132
8133         if (wc->stage == UPDATE_BACKREF &&
8134             btrfs_header_owner(eb) != root->root_key.objectid)
8135                 return 1;
8136
8137         /*
8138          * when reference count of tree block is 1, it won't increase
8139          * again. once full backref flag is set, we never clear it.
8140          */
8141         if (lookup_info &&
8142             ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
8143              (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
8144                 BUG_ON(!path->locks[level]);
8145                 ret = btrfs_lookup_extent_info(trans, root,
8146                                                eb->start, level, 1,
8147                                                &wc->refs[level],
8148                                                &wc->flags[level]);
8149                 BUG_ON(ret == -ENOMEM);
8150                 if (ret)
8151                         return ret;
8152                 BUG_ON(wc->refs[level] == 0);
8153         }
8154
8155         if (wc->stage == DROP_REFERENCE) {
8156                 if (wc->refs[level] > 1)
8157                         return 1;
8158
8159                 if (path->locks[level] && !wc->keep_locks) {
8160                         btrfs_tree_unlock_rw(eb, path->locks[level]);
8161                         path->locks[level] = 0;
8162                 }
8163                 return 0;
8164         }
8165
8166         /* wc->stage == UPDATE_BACKREF */
8167         if (!(wc->flags[level] & flag)) {
8168                 BUG_ON(!path->locks[level]);
8169                 ret = btrfs_inc_ref(trans, root, eb, 1);
8170                 BUG_ON(ret); /* -ENOMEM */
8171                 ret = btrfs_dec_ref(trans, root, eb, 0);
8172                 BUG_ON(ret); /* -ENOMEM */
8173                 ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
8174                                                   eb->len, flag,
8175                                                   btrfs_header_level(eb), 0);
8176                 BUG_ON(ret); /* -ENOMEM */
8177                 wc->flags[level] |= flag;
8178         }
8179
8180         /*
8181          * the block is shared by multiple trees, so it's not good to
8182          * keep the tree lock
8183          */
8184         if (path->locks[level] && level > 0) {
8185                 btrfs_tree_unlock_rw(eb, path->locks[level]);
8186                 path->locks[level] = 0;
8187         }
8188         return 0;
8189 }
8190
8191 /*
8192  * helper to process tree block pointer.
8193  *
8194  * when wc->stage == DROP_REFERENCE, this function checks
8195  * reference count of the block pointed to. if the block
8196  * is shared and we need update back refs for the subtree
8197  * rooted at the block, this function changes wc->stage to
8198  * UPDATE_BACKREF. if the block is shared and there is no
8199  * need to update back, this function drops the reference
8200  * to the block.
8201  *
8202  * NOTE: return value 1 means we should stop walking down.
8203  */
8204 static noinline int do_walk_down(struct btrfs_trans_handle *trans,
8205                                  struct btrfs_root *root,
8206                                  struct btrfs_path *path,
8207                                  struct walk_control *wc, int *lookup_info)
8208 {
8209         u64 bytenr;
8210         u64 generation;
8211         u64 parent;
8212         u32 blocksize;
8213         struct btrfs_key key;
8214         struct extent_buffer *next;
8215         int level = wc->level;
8216         int reada = 0;
8217         int ret = 0;
8218         bool need_account = false;
8219
8220         generation = btrfs_node_ptr_generation(path->nodes[level],
8221                                                path->slots[level]);
8222         /*
8223          * if the lower level block was created before the snapshot
8224          * was created, we know there is no need to update back refs
8225          * for the subtree
8226          */
8227         if (wc->stage == UPDATE_BACKREF &&
8228             generation <= root->root_key.offset) {
8229                 *lookup_info = 1;
8230                 return 1;
8231         }
8232
8233         bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
8234         blocksize = root->nodesize;
8235
8236         next = btrfs_find_tree_block(root->fs_info, bytenr);
8237         if (!next) {
8238                 next = btrfs_find_create_tree_block(root, bytenr);
8239                 if (!next)
8240                         return -ENOMEM;
8241                 btrfs_set_buffer_lockdep_class(root->root_key.objectid, next,
8242                                                level - 1);
8243                 reada = 1;
8244         }
8245         btrfs_tree_lock(next);
8246         btrfs_set_lock_blocking(next);
8247
8248         ret = btrfs_lookup_extent_info(trans, root, bytenr, level - 1, 1,
8249                                        &wc->refs[level - 1],
8250                                        &wc->flags[level - 1]);
8251         if (ret < 0) {
8252                 btrfs_tree_unlock(next);
8253                 return ret;
8254         }
8255
8256         if (unlikely(wc->refs[level - 1] == 0)) {
8257                 btrfs_err(root->fs_info, "Missing references.");
8258                 BUG();
8259         }
8260         *lookup_info = 0;
8261
8262         if (wc->stage == DROP_REFERENCE) {
8263                 if (wc->refs[level - 1] > 1) {
8264                         need_account = true;
8265                         if (level == 1 &&
8266                             (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
8267                                 goto skip;
8268
8269                         if (!wc->update_ref ||
8270                             generation <= root->root_key.offset)
8271                                 goto skip;
8272
8273                         btrfs_node_key_to_cpu(path->nodes[level], &key,
8274                                               path->slots[level]);
8275                         ret = btrfs_comp_cpu_keys(&key, &wc->update_progress);
8276                         if (ret < 0)
8277                                 goto skip;
8278
8279                         wc->stage = UPDATE_BACKREF;
8280                         wc->shared_level = level - 1;
8281                 }
8282         } else {
8283                 if (level == 1 &&
8284                     (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
8285                         goto skip;
8286         }
8287
8288         if (!btrfs_buffer_uptodate(next, generation, 0)) {
8289                 btrfs_tree_unlock(next);
8290                 free_extent_buffer(next);
8291                 next = NULL;
8292                 *lookup_info = 1;
8293         }
8294
8295         if (!next) {
8296                 if (reada && level == 1)
8297                         reada_walk_down(trans, root, wc, path);
8298                 next = read_tree_block(root, bytenr, generation);
8299                 if (IS_ERR(next)) {
8300                         return PTR_ERR(next);
8301                 } else if (!extent_buffer_uptodate(next)) {
8302                         free_extent_buffer(next);
8303                         return -EIO;
8304                 }
8305                 btrfs_tree_lock(next);
8306                 btrfs_set_lock_blocking(next);
8307         }
8308
8309         level--;
8310         BUG_ON(level != btrfs_header_level(next));
8311         path->nodes[level] = next;
8312         path->slots[level] = 0;
8313         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8314         wc->level = level;
8315         if (wc->level == 1)
8316                 wc->reada_slot = 0;
8317         return 0;
8318 skip:
8319         wc->refs[level - 1] = 0;
8320         wc->flags[level - 1] = 0;
8321         if (wc->stage == DROP_REFERENCE) {
8322                 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
8323                         parent = path->nodes[level]->start;
8324                 } else {
8325                         BUG_ON(root->root_key.objectid !=
8326                                btrfs_header_owner(path->nodes[level]));
8327                         parent = 0;
8328                 }
8329
8330                 if (need_account) {
8331                         ret = account_shared_subtree(trans, root, next,
8332                                                      generation, level - 1);
8333                         if (ret) {
8334                                 btrfs_err_rl(root->fs_info,
8335                                         "Error "
8336                                         "%d accounting shared subtree. Quota "
8337                                         "is out of sync, rescan required.",
8338                                         ret);
8339                         }
8340                 }
8341                 ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
8342                                 root->root_key.objectid, level - 1, 0, 0);
8343                 BUG_ON(ret); /* -ENOMEM */
8344         }
8345         btrfs_tree_unlock(next);
8346         free_extent_buffer(next);
8347         *lookup_info = 1;
8348         return 1;
8349 }
8350
8351 /*
8352  * helper to process tree block while walking up the tree.
8353  *
8354  * when wc->stage == DROP_REFERENCE, this function drops
8355  * reference count on the block.
8356  *
8357  * when wc->stage == UPDATE_BACKREF, this function changes
8358  * wc->stage back to DROP_REFERENCE if we changed wc->stage
8359  * to UPDATE_BACKREF previously while processing the block.
8360  *
8361  * NOTE: return value 1 means we should stop walking up.
8362  */
8363 static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
8364                                  struct btrfs_root *root,
8365                                  struct btrfs_path *path,
8366                                  struct walk_control *wc)
8367 {
8368         int ret;
8369         int level = wc->level;
8370         struct extent_buffer *eb = path->nodes[level];
8371         u64 parent = 0;
8372
8373         if (wc->stage == UPDATE_BACKREF) {
8374                 BUG_ON(wc->shared_level < level);
8375                 if (level < wc->shared_level)
8376                         goto out;
8377
8378                 ret = find_next_key(path, level + 1, &wc->update_progress);
8379                 if (ret > 0)
8380                         wc->update_ref = 0;
8381
8382                 wc->stage = DROP_REFERENCE;
8383                 wc->shared_level = -1;
8384                 path->slots[level] = 0;
8385
8386                 /*
8387                  * check reference count again if the block isn't locked.
8388                  * we should start walking down the tree again if reference
8389                  * count is one.
8390                  */
8391                 if (!path->locks[level]) {
8392                         BUG_ON(level == 0);
8393                         btrfs_tree_lock(eb);
8394                         btrfs_set_lock_blocking(eb);
8395                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8396
8397                         ret = btrfs_lookup_extent_info(trans, root,
8398                                                        eb->start, level, 1,
8399                                                        &wc->refs[level],
8400                                                        &wc->flags[level]);
8401                         if (ret < 0) {
8402                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
8403                                 path->locks[level] = 0;
8404                                 return ret;
8405                         }
8406                         BUG_ON(wc->refs[level] == 0);
8407                         if (wc->refs[level] == 1) {
8408                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
8409                                 path->locks[level] = 0;
8410                                 return 1;
8411                         }
8412                 }
8413         }
8414
8415         /* wc->stage == DROP_REFERENCE */
8416         BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
8417
8418         if (wc->refs[level] == 1) {
8419                 if (level == 0) {
8420                         if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
8421                                 ret = btrfs_dec_ref(trans, root, eb, 1);
8422                         else
8423                                 ret = btrfs_dec_ref(trans, root, eb, 0);
8424                         BUG_ON(ret); /* -ENOMEM */
8425                         ret = account_leaf_items(trans, root, eb);
8426                         if (ret) {
8427                                 btrfs_err_rl(root->fs_info,
8428                                         "error "
8429                                         "%d accounting leaf items. Quota "
8430                                         "is out of sync, rescan required.",
8431                                         ret);
8432                         }
8433                 }
8434                 /* make block locked assertion in clean_tree_block happy */
8435                 if (!path->locks[level] &&
8436                     btrfs_header_generation(eb) == trans->transid) {
8437                         btrfs_tree_lock(eb);
8438                         btrfs_set_lock_blocking(eb);
8439                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8440                 }
8441                 clean_tree_block(trans, root->fs_info, eb);
8442         }
8443
8444         if (eb == root->node) {
8445                 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
8446                         parent = eb->start;
8447                 else
8448                         BUG_ON(root->root_key.objectid !=
8449                                btrfs_header_owner(eb));
8450         } else {
8451                 if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
8452                         parent = path->nodes[level + 1]->start;
8453                 else
8454                         BUG_ON(root->root_key.objectid !=
8455                                btrfs_header_owner(path->nodes[level + 1]));
8456         }
8457
8458         btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1);
8459 out:
8460         wc->refs[level] = 0;
8461         wc->flags[level] = 0;
8462         return 0;
8463 }
8464
8465 static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
8466                                    struct btrfs_root *root,
8467                                    struct btrfs_path *path,
8468                                    struct walk_control *wc)
8469 {
8470         int level = wc->level;
8471         int lookup_info = 1;
8472         int ret;
8473
8474         while (level >= 0) {
8475                 ret = walk_down_proc(trans, root, path, wc, lookup_info);
8476                 if (ret > 0)
8477                         break;
8478
8479                 if (level == 0)
8480                         break;
8481
8482                 if (path->slots[level] >=
8483                     btrfs_header_nritems(path->nodes[level]))
8484                         break;
8485
8486                 ret = do_walk_down(trans, root, path, wc, &lookup_info);
8487                 if (ret > 0) {
8488                         path->slots[level]++;
8489                         continue;
8490                 } else if (ret < 0)
8491                         return ret;
8492                 level = wc->level;
8493         }
8494         return 0;
8495 }
8496
8497 static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
8498                                  struct btrfs_root *root,
8499                                  struct btrfs_path *path,
8500                                  struct walk_control *wc, int max_level)
8501 {
8502         int level = wc->level;
8503         int ret;
8504
8505         path->slots[level] = btrfs_header_nritems(path->nodes[level]);
8506         while (level < max_level && path->nodes[level]) {
8507                 wc->level = level;
8508                 if (path->slots[level] + 1 <
8509                     btrfs_header_nritems(path->nodes[level])) {
8510                         path->slots[level]++;
8511                         return 0;
8512                 } else {
8513                         ret = walk_up_proc(trans, root, path, wc);
8514                         if (ret > 0)
8515                                 return 0;
8516
8517                         if (path->locks[level]) {
8518                                 btrfs_tree_unlock_rw(path->nodes[level],
8519                                                      path->locks[level]);
8520                                 path->locks[level] = 0;
8521                         }
8522                         free_extent_buffer(path->nodes[level]);
8523                         path->nodes[level] = NULL;
8524                         level++;
8525                 }
8526         }
8527         return 1;
8528 }
8529
8530 /*
8531  * drop a subvolume tree.
8532  *
8533  * this function traverses the tree freeing any blocks that only
8534  * referenced by the tree.
8535  *
8536  * when a shared tree block is found. this function decreases its
8537  * reference count by one. if update_ref is true, this function
8538  * also make sure backrefs for the shared block and all lower level
8539  * blocks are properly updated.
8540  *
8541  * If called with for_reloc == 0, may exit early with -EAGAIN
8542  */
8543 int btrfs_drop_snapshot(struct btrfs_root *root,
8544                          struct btrfs_block_rsv *block_rsv, int update_ref,
8545                          int for_reloc)
8546 {
8547         struct btrfs_path *path;
8548         struct btrfs_trans_handle *trans;
8549         struct btrfs_root *tree_root = root->fs_info->tree_root;
8550         struct btrfs_root_item *root_item = &root->root_item;
8551         struct walk_control *wc;
8552         struct btrfs_key key;
8553         int err = 0;
8554         int ret;
8555         int level;
8556         bool root_dropped = false;
8557
8558         btrfs_debug(root->fs_info, "Drop subvolume %llu", root->objectid);
8559
8560         path = btrfs_alloc_path();
8561         if (!path) {
8562                 err = -ENOMEM;
8563                 goto out;
8564         }
8565
8566         wc = kzalloc(sizeof(*wc), GFP_NOFS);
8567         if (!wc) {
8568                 btrfs_free_path(path);
8569                 err = -ENOMEM;
8570                 goto out;
8571         }
8572
8573         trans = btrfs_start_transaction(tree_root, 0);
8574         if (IS_ERR(trans)) {
8575                 err = PTR_ERR(trans);
8576                 goto out_free;
8577         }
8578
8579         if (block_rsv)
8580                 trans->block_rsv = block_rsv;
8581
8582         if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
8583                 level = btrfs_header_level(root->node);
8584                 path->nodes[level] = btrfs_lock_root_node(root);
8585                 btrfs_set_lock_blocking(path->nodes[level]);
8586                 path->slots[level] = 0;
8587                 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8588                 memset(&wc->update_progress, 0,
8589                        sizeof(wc->update_progress));
8590         } else {
8591                 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
8592                 memcpy(&wc->update_progress, &key,
8593                        sizeof(wc->update_progress));
8594
8595                 level = root_item->drop_level;
8596                 BUG_ON(level == 0);
8597                 path->lowest_level = level;
8598                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
8599                 path->lowest_level = 0;
8600                 if (ret < 0) {
8601                         err = ret;
8602                         goto out_end_trans;
8603                 }
8604                 WARN_ON(ret > 0);
8605
8606                 /*
8607                  * unlock our path, this is safe because only this
8608                  * function is allowed to delete this snapshot
8609                  */
8610                 btrfs_unlock_up_safe(path, 0);
8611
8612                 level = btrfs_header_level(root->node);
8613                 while (1) {
8614                         btrfs_tree_lock(path->nodes[level]);
8615                         btrfs_set_lock_blocking(path->nodes[level]);
8616                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8617
8618                         ret = btrfs_lookup_extent_info(trans, root,
8619                                                 path->nodes[level]->start,
8620                                                 level, 1, &wc->refs[level],
8621                                                 &wc->flags[level]);
8622                         if (ret < 0) {
8623                                 err = ret;
8624                                 goto out_end_trans;
8625                         }
8626                         BUG_ON(wc->refs[level] == 0);
8627
8628                         if (level == root_item->drop_level)
8629                                 break;
8630
8631                         btrfs_tree_unlock(path->nodes[level]);
8632                         path->locks[level] = 0;
8633                         WARN_ON(wc->refs[level] != 1);
8634                         level--;
8635                 }
8636         }
8637
8638         wc->level = level;
8639         wc->shared_level = -1;
8640         wc->stage = DROP_REFERENCE;
8641         wc->update_ref = update_ref;
8642         wc->keep_locks = 0;
8643         wc->for_reloc = for_reloc;
8644         wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
8645
8646         while (1) {
8647
8648                 ret = walk_down_tree(trans, root, path, wc);
8649                 if (ret < 0) {
8650                         err = ret;
8651                         break;
8652                 }
8653
8654                 ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
8655                 if (ret < 0) {
8656                         err = ret;
8657                         break;
8658                 }
8659
8660                 if (ret > 0) {
8661                         BUG_ON(wc->stage != DROP_REFERENCE);
8662                         break;
8663                 }
8664
8665                 if (wc->stage == DROP_REFERENCE) {
8666                         level = wc->level;
8667                         btrfs_node_key(path->nodes[level],
8668                                        &root_item->drop_progress,
8669                                        path->slots[level]);
8670                         root_item->drop_level = level;
8671                 }
8672
8673                 BUG_ON(wc->level == 0);
8674                 if (btrfs_should_end_transaction(trans, tree_root) ||
8675                     (!for_reloc && btrfs_need_cleaner_sleep(root))) {
8676                         ret = btrfs_update_root(trans, tree_root,
8677                                                 &root->root_key,
8678                                                 root_item);
8679                         if (ret) {
8680                                 btrfs_abort_transaction(trans, tree_root, ret);
8681                                 err = ret;
8682                                 goto out_end_trans;
8683                         }
8684
8685                         btrfs_end_transaction_throttle(trans, tree_root);
8686                         if (!for_reloc && btrfs_need_cleaner_sleep(root)) {
8687                                 pr_debug("BTRFS: drop snapshot early exit\n");
8688                                 err = -EAGAIN;
8689                                 goto out_free;
8690                         }
8691
8692                         trans = btrfs_start_transaction(tree_root, 0);
8693                         if (IS_ERR(trans)) {
8694                                 err = PTR_ERR(trans);
8695                                 goto out_free;
8696                         }
8697                         if (block_rsv)
8698                                 trans->block_rsv = block_rsv;
8699                 }
8700         }
8701         btrfs_release_path(path);
8702         if (err)
8703                 goto out_end_trans;
8704
8705         ret = btrfs_del_root(trans, tree_root, &root->root_key);
8706         if (ret) {
8707                 btrfs_abort_transaction(trans, tree_root, ret);
8708                 goto out_end_trans;
8709         }
8710
8711         if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
8712                 ret = btrfs_find_root(tree_root, &root->root_key, path,
8713                                       NULL, NULL);
8714                 if (ret < 0) {
8715                         btrfs_abort_transaction(trans, tree_root, ret);
8716                         err = ret;
8717                         goto out_end_trans;
8718                 } else if (ret > 0) {
8719                         /* if we fail to delete the orphan item this time
8720                          * around, it'll get picked up the next time.
8721                          *
8722                          * The most common failure here is just -ENOENT.
8723                          */
8724                         btrfs_del_orphan_item(trans, tree_root,
8725                                               root->root_key.objectid);
8726                 }
8727         }
8728
8729         if (test_bit(BTRFS_ROOT_IN_RADIX, &root->state)) {
8730                 btrfs_add_dropped_root(trans, root);
8731         } else {
8732                 free_extent_buffer(root->node);
8733                 free_extent_buffer(root->commit_root);
8734                 btrfs_put_fs_root(root);
8735         }
8736         root_dropped = true;
8737 out_end_trans:
8738         btrfs_end_transaction_throttle(trans, tree_root);
8739 out_free:
8740         kfree(wc);
8741         btrfs_free_path(path);
8742 out:
8743         /*
8744          * So if we need to stop dropping the snapshot for whatever reason we
8745          * need to make sure to add it back to the dead root list so that we
8746          * keep trying to do the work later.  This also cleans up roots if we
8747          * don't have it in the radix (like when we recover after a power fail
8748          * or unmount) so we don't leak memory.
8749          */
8750         if (!for_reloc && root_dropped == false)
8751                 btrfs_add_dead_root(root);
8752         if (err && err != -EAGAIN)
8753                 btrfs_std_error(root->fs_info, err, NULL);
8754         return err;
8755 }
8756
8757 /*
8758  * drop subtree rooted at tree block 'node'.
8759  *
8760  * NOTE: this function will unlock and release tree block 'node'
8761  * only used by relocation code
8762  */
8763 int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
8764                         struct btrfs_root *root,
8765                         struct extent_buffer *node,
8766                         struct extent_buffer *parent)
8767 {
8768         struct btrfs_path *path;
8769         struct walk_control *wc;
8770         int level;
8771         int parent_level;
8772         int ret = 0;
8773         int wret;
8774
8775         BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
8776
8777         path = btrfs_alloc_path();
8778         if (!path)
8779                 return -ENOMEM;
8780
8781         wc = kzalloc(sizeof(*wc), GFP_NOFS);
8782         if (!wc) {
8783                 btrfs_free_path(path);
8784                 return -ENOMEM;
8785         }
8786
8787         btrfs_assert_tree_locked(parent);
8788         parent_level = btrfs_header_level(parent);
8789         extent_buffer_get(parent);
8790         path->nodes[parent_level] = parent;
8791         path->slots[parent_level] = btrfs_header_nritems(parent);
8792
8793         btrfs_assert_tree_locked(node);
8794         level = btrfs_header_level(node);
8795         path->nodes[level] = node;
8796         path->slots[level] = 0;
8797         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8798
8799         wc->refs[parent_level] = 1;
8800         wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
8801         wc->level = level;
8802         wc->shared_level = -1;
8803         wc->stage = DROP_REFERENCE;
8804         wc->update_ref = 0;
8805         wc->keep_locks = 1;
8806         wc->for_reloc = 1;
8807         wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
8808
8809         while (1) {
8810                 wret = walk_down_tree(trans, root, path, wc);
8811                 if (wret < 0) {
8812                         ret = wret;
8813                         break;
8814                 }
8815
8816                 wret = walk_up_tree(trans, root, path, wc, parent_level);
8817                 if (wret < 0)
8818                         ret = wret;
8819                 if (wret != 0)
8820                         break;
8821         }
8822
8823         kfree(wc);
8824         btrfs_free_path(path);
8825         return ret;
8826 }
8827
8828 static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
8829 {
8830         u64 num_devices;
8831         u64 stripped;
8832
8833         /*
8834          * if restripe for this chunk_type is on pick target profile and
8835          * return, otherwise do the usual balance
8836          */
8837         stripped = get_restripe_target(root->fs_info, flags);
8838         if (stripped)
8839                 return extended_to_chunk(stripped);
8840
8841         num_devices = root->fs_info->fs_devices->rw_devices;
8842
8843         stripped = BTRFS_BLOCK_GROUP_RAID0 |
8844                 BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6 |
8845                 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
8846
8847         if (num_devices == 1) {
8848                 stripped |= BTRFS_BLOCK_GROUP_DUP;
8849                 stripped = flags & ~stripped;
8850
8851                 /* turn raid0 into single device chunks */
8852                 if (flags & BTRFS_BLOCK_GROUP_RAID0)
8853                         return stripped;
8854
8855                 /* turn mirroring into duplication */
8856                 if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
8857                              BTRFS_BLOCK_GROUP_RAID10))
8858                         return stripped | BTRFS_BLOCK_GROUP_DUP;
8859         } else {
8860                 /* they already had raid on here, just return */
8861                 if (flags & stripped)
8862                         return flags;
8863
8864                 stripped |= BTRFS_BLOCK_GROUP_DUP;
8865                 stripped = flags & ~stripped;
8866
8867                 /* switch duplicated blocks with raid1 */
8868                 if (flags & BTRFS_BLOCK_GROUP_DUP)
8869                         return stripped | BTRFS_BLOCK_GROUP_RAID1;
8870
8871                 /* this is drive concat, leave it alone */
8872         }
8873
8874         return flags;
8875 }
8876
8877 static int inc_block_group_ro(struct btrfs_block_group_cache *cache, int force)
8878 {
8879         struct btrfs_space_info *sinfo = cache->space_info;
8880         u64 num_bytes;
8881         u64 min_allocable_bytes;
8882         int ret = -ENOSPC;
8883
8884         /*
8885          * We need some metadata space and system metadata space for
8886          * allocating chunks in some corner cases until we force to set
8887          * it to be readonly.
8888          */
8889         if ((sinfo->flags &
8890              (BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA)) &&
8891             !force)
8892                 min_allocable_bytes = 1 * 1024 * 1024;
8893         else
8894                 min_allocable_bytes = 0;
8895
8896         spin_lock(&sinfo->lock);
8897         spin_lock(&cache->lock);
8898
8899         if (cache->ro) {
8900                 cache->ro++;
8901                 ret = 0;
8902                 goto out;
8903         }
8904
8905         num_bytes = cache->key.offset - cache->reserved - cache->pinned -
8906                     cache->bytes_super - btrfs_block_group_used(&cache->item);
8907
8908         if (sinfo->bytes_used + sinfo->bytes_reserved + sinfo->bytes_pinned +
8909             sinfo->bytes_may_use + sinfo->bytes_readonly + num_bytes +
8910             min_allocable_bytes <= sinfo->total_bytes) {
8911                 sinfo->bytes_readonly += num_bytes;
8912                 cache->ro++;
8913                 list_add_tail(&cache->ro_list, &sinfo->ro_bgs);
8914                 ret = 0;
8915         }
8916 out:
8917         spin_unlock(&cache->lock);
8918         spin_unlock(&sinfo->lock);
8919         return ret;
8920 }
8921
8922 int btrfs_inc_block_group_ro(struct btrfs_root *root,
8923                              struct btrfs_block_group_cache *cache)
8924
8925 {
8926         struct btrfs_trans_handle *trans;
8927         u64 alloc_flags;
8928         int ret;
8929
8930 again:
8931         trans = btrfs_join_transaction(root);
8932         if (IS_ERR(trans))
8933                 return PTR_ERR(trans);
8934
8935         /*
8936          * we're not allowed to set block groups readonly after the dirty
8937          * block groups cache has started writing.  If it already started,
8938          * back off and let this transaction commit
8939          */
8940         mutex_lock(&root->fs_info->ro_block_group_mutex);
8941         if (trans->transaction->dirty_bg_run) {
8942                 u64 transid = trans->transid;
8943
8944                 mutex_unlock(&root->fs_info->ro_block_group_mutex);
8945                 btrfs_end_transaction(trans, root);
8946
8947                 ret = btrfs_wait_for_commit(root, transid);
8948                 if (ret)
8949                         return ret;
8950                 goto again;
8951         }
8952
8953         /*
8954          * if we are changing raid levels, try to allocate a corresponding
8955          * block group with the new raid level.
8956          */
8957         alloc_flags = update_block_group_flags(root, cache->flags);
8958         if (alloc_flags != cache->flags) {
8959                 ret = do_chunk_alloc(trans, root, alloc_flags,
8960                                      CHUNK_ALLOC_FORCE);
8961                 /*
8962                  * ENOSPC is allowed here, we may have enough space
8963                  * already allocated at the new raid level to
8964                  * carry on
8965                  */
8966                 if (ret == -ENOSPC)
8967                         ret = 0;
8968                 if (ret < 0)
8969                         goto out;
8970         }
8971
8972         ret = inc_block_group_ro(cache, 0);
8973         if (!ret)
8974                 goto out;
8975         alloc_flags = get_alloc_profile(root, cache->space_info->flags);
8976         ret = do_chunk_alloc(trans, root, alloc_flags,
8977                              CHUNK_ALLOC_FORCE);
8978         if (ret < 0)
8979                 goto out;
8980         ret = inc_block_group_ro(cache, 0);
8981 out:
8982         if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) {
8983                 alloc_flags = update_block_group_flags(root, cache->flags);
8984                 lock_chunks(root->fs_info->chunk_root);
8985                 check_system_chunk(trans, root, alloc_flags);
8986                 unlock_chunks(root->fs_info->chunk_root);
8987         }
8988         mutex_unlock(&root->fs_info->ro_block_group_mutex);
8989
8990         btrfs_end_transaction(trans, root);
8991         return ret;
8992 }
8993
8994 int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
8995                             struct btrfs_root *root, u64 type)
8996 {
8997         u64 alloc_flags = get_alloc_profile(root, type);
8998         return do_chunk_alloc(trans, root, alloc_flags,
8999                               CHUNK_ALLOC_FORCE);
9000 }
9001
9002 /*
9003  * helper to account the unused space of all the readonly block group in the
9004  * space_info. takes mirrors into account.
9005  */
9006 u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
9007 {
9008         struct btrfs_block_group_cache *block_group;
9009         u64 free_bytes = 0;
9010         int factor;
9011
9012         /* It's df, we don't care if it's racey */
9013         if (list_empty(&sinfo->ro_bgs))
9014                 return 0;
9015
9016         spin_lock(&sinfo->lock);
9017         list_for_each_entry(block_group, &sinfo->ro_bgs, ro_list) {
9018                 spin_lock(&block_group->lock);
9019
9020                 if (!block_group->ro) {
9021                         spin_unlock(&block_group->lock);
9022                         continue;
9023                 }
9024
9025                 if (block_group->flags & (BTRFS_BLOCK_GROUP_RAID1 |
9026                                           BTRFS_BLOCK_GROUP_RAID10 |
9027                                           BTRFS_BLOCK_GROUP_DUP))
9028                         factor = 2;
9029                 else
9030                         factor = 1;
9031
9032                 free_bytes += (block_group->key.offset -
9033                                btrfs_block_group_used(&block_group->item)) *
9034                                factor;
9035
9036                 spin_unlock(&block_group->lock);
9037         }
9038         spin_unlock(&sinfo->lock);
9039
9040         return free_bytes;
9041 }
9042
9043 void btrfs_dec_block_group_ro(struct btrfs_root *root,
9044                               struct btrfs_block_group_cache *cache)
9045 {
9046         struct btrfs_space_info *sinfo = cache->space_info;
9047         u64 num_bytes;
9048
9049         BUG_ON(!cache->ro);
9050
9051         spin_lock(&sinfo->lock);
9052         spin_lock(&cache->lock);
9053         if (!--cache->ro) {
9054                 num_bytes = cache->key.offset - cache->reserved -
9055                             cache->pinned - cache->bytes_super -
9056                             btrfs_block_group_used(&cache->item);
9057                 sinfo->bytes_readonly -= num_bytes;
9058                 list_del_init(&cache->ro_list);
9059         }
9060         spin_unlock(&cache->lock);
9061         spin_unlock(&sinfo->lock);
9062 }
9063
9064 /*
9065  * checks to see if its even possible to relocate this block group.
9066  *
9067  * @return - -1 if it's not a good idea to relocate this block group, 0 if its
9068  * ok to go ahead and try.
9069  */
9070 int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
9071 {
9072         struct btrfs_block_group_cache *block_group;
9073         struct btrfs_space_info *space_info;
9074         struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
9075         struct btrfs_device *device;
9076         struct btrfs_trans_handle *trans;
9077         u64 min_free;
9078         u64 dev_min = 1;
9079         u64 dev_nr = 0;
9080         u64 target;
9081         int index;
9082         int full = 0;
9083         int ret = 0;
9084
9085         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
9086
9087         /* odd, couldn't find the block group, leave it alone */
9088         if (!block_group)
9089                 return -1;
9090
9091         min_free = btrfs_block_group_used(&block_group->item);
9092
9093         /* no bytes used, we're good */
9094         if (!min_free)
9095                 goto out;
9096
9097         space_info = block_group->space_info;
9098         spin_lock(&space_info->lock);
9099
9100         full = space_info->full;
9101
9102         /*
9103          * if this is the last block group we have in this space, we can't
9104          * relocate it unless we're able to allocate a new chunk below.
9105          *
9106          * Otherwise, we need to make sure we have room in the space to handle
9107          * all of the extents from this block group.  If we can, we're good
9108          */
9109         if ((space_info->total_bytes != block_group->key.offset) &&
9110             (space_info->bytes_used + space_info->bytes_reserved +
9111              space_info->bytes_pinned + space_info->bytes_readonly +
9112              min_free < space_info->total_bytes)) {
9113                 spin_unlock(&space_info->lock);
9114                 goto out;
9115         }
9116         spin_unlock(&space_info->lock);
9117
9118         /*
9119          * ok we don't have enough space, but maybe we have free space on our
9120          * devices to allocate new chunks for relocation, so loop through our
9121          * alloc devices and guess if we have enough space.  if this block
9122          * group is going to be restriped, run checks against the target
9123          * profile instead of the current one.
9124          */
9125         ret = -1;
9126
9127         /*
9128          * index:
9129          *      0: raid10
9130          *      1: raid1
9131          *      2: dup
9132          *      3: raid0
9133          *      4: single
9134          */
9135         target = get_restripe_target(root->fs_info, block_group->flags);
9136         if (target) {
9137                 index = __get_raid_index(extended_to_chunk(target));
9138         } else {
9139                 /*
9140                  * this is just a balance, so if we were marked as full
9141                  * we know there is no space for a new chunk
9142                  */
9143                 if (full)
9144                         goto out;
9145
9146                 index = get_block_group_index(block_group);
9147         }
9148
9149         if (index == BTRFS_RAID_RAID10) {
9150                 dev_min = 4;
9151                 /* Divide by 2 */
9152                 min_free >>= 1;
9153         } else if (index == BTRFS_RAID_RAID1) {
9154                 dev_min = 2;
9155         } else if (index == BTRFS_RAID_DUP) {
9156                 /* Multiply by 2 */
9157                 min_free <<= 1;
9158         } else if (index == BTRFS_RAID_RAID0) {
9159                 dev_min = fs_devices->rw_devices;
9160                 min_free = div64_u64(min_free, dev_min);
9161         }
9162
9163         /* We need to do this so that we can look at pending chunks */
9164         trans = btrfs_join_transaction(root);
9165         if (IS_ERR(trans)) {
9166                 ret = PTR_ERR(trans);
9167                 goto out;
9168         }
9169
9170         mutex_lock(&root->fs_info->chunk_mutex);
9171         list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
9172                 u64 dev_offset;
9173
9174                 /*
9175                  * check to make sure we can actually find a chunk with enough
9176                  * space to fit our block group in.
9177                  */
9178                 if (device->total_bytes > device->bytes_used + min_free &&
9179                     !device->is_tgtdev_for_dev_replace) {
9180                         ret = find_free_dev_extent(trans, device, min_free,
9181                                                    &dev_offset, NULL);
9182                         if (!ret)
9183                                 dev_nr++;
9184
9185                         if (dev_nr >= dev_min)
9186                                 break;
9187
9188                         ret = -1;
9189                 }
9190         }
9191         mutex_unlock(&root->fs_info->chunk_mutex);
9192         btrfs_end_transaction(trans, root);
9193 out:
9194         btrfs_put_block_group(block_group);
9195         return ret;
9196 }
9197
9198 static int find_first_block_group(struct btrfs_root *root,
9199                 struct btrfs_path *path, struct btrfs_key *key)
9200 {
9201         int ret = 0;
9202         struct btrfs_key found_key;
9203         struct extent_buffer *leaf;
9204         int slot;
9205
9206         ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
9207         if (ret < 0)
9208                 goto out;
9209
9210         while (1) {
9211                 slot = path->slots[0];
9212                 leaf = path->nodes[0];
9213                 if (slot >= btrfs_header_nritems(leaf)) {
9214                         ret = btrfs_next_leaf(root, path);
9215                         if (ret == 0)
9216                                 continue;
9217                         if (ret < 0)
9218                                 goto out;
9219                         break;
9220                 }
9221                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
9222
9223                 if (found_key.objectid >= key->objectid &&
9224                     found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
9225                         ret = 0;
9226                         goto out;
9227                 }
9228                 path->slots[0]++;
9229         }
9230 out:
9231         return ret;
9232 }
9233
9234 void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
9235 {
9236         struct btrfs_block_group_cache *block_group;
9237         u64 last = 0;
9238
9239         while (1) {
9240                 struct inode *inode;
9241
9242                 block_group = btrfs_lookup_first_block_group(info, last);
9243                 while (block_group) {
9244                         spin_lock(&block_group->lock);
9245                         if (block_group->iref)
9246                                 break;
9247                         spin_unlock(&block_group->lock);
9248                         block_group = next_block_group(info->tree_root,
9249                                                        block_group);
9250                 }
9251                 if (!block_group) {
9252                         if (last == 0)
9253                                 break;
9254                         last = 0;
9255                         continue;
9256                 }
9257
9258                 inode = block_group->inode;
9259                 block_group->iref = 0;
9260                 block_group->inode = NULL;
9261                 spin_unlock(&block_group->lock);
9262                 iput(inode);
9263                 last = block_group->key.objectid + block_group->key.offset;
9264                 btrfs_put_block_group(block_group);
9265         }
9266 }
9267
9268 int btrfs_free_block_groups(struct btrfs_fs_info *info)
9269 {
9270         struct btrfs_block_group_cache *block_group;
9271         struct btrfs_space_info *space_info;
9272         struct btrfs_caching_control *caching_ctl;
9273         struct rb_node *n;
9274
9275         down_write(&info->commit_root_sem);
9276         while (!list_empty(&info->caching_block_groups)) {
9277                 caching_ctl = list_entry(info->caching_block_groups.next,
9278                                          struct btrfs_caching_control, list);
9279                 list_del(&caching_ctl->list);
9280                 put_caching_control(caching_ctl);
9281         }
9282         up_write(&info->commit_root_sem);
9283
9284         spin_lock(&info->unused_bgs_lock);
9285         while (!list_empty(&info->unused_bgs)) {
9286                 block_group = list_first_entry(&info->unused_bgs,
9287                                                struct btrfs_block_group_cache,
9288                                                bg_list);
9289                 list_del_init(&block_group->bg_list);
9290                 btrfs_put_block_group(block_group);
9291         }
9292         spin_unlock(&info->unused_bgs_lock);
9293
9294         spin_lock(&info->block_group_cache_lock);
9295         while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
9296                 block_group = rb_entry(n, struct btrfs_block_group_cache,
9297                                        cache_node);
9298                 rb_erase(&block_group->cache_node,
9299                          &info->block_group_cache_tree);
9300                 RB_CLEAR_NODE(&block_group->cache_node);
9301                 spin_unlock(&info->block_group_cache_lock);
9302
9303                 down_write(&block_group->space_info->groups_sem);
9304                 list_del(&block_group->list);
9305                 up_write(&block_group->space_info->groups_sem);
9306
9307                 if (block_group->cached == BTRFS_CACHE_STARTED)
9308                         wait_block_group_cache_done(block_group);
9309
9310                 /*
9311                  * We haven't cached this block group, which means we could
9312                  * possibly have excluded extents on this block group.
9313                  */
9314                 if (block_group->cached == BTRFS_CACHE_NO ||
9315                     block_group->cached == BTRFS_CACHE_ERROR)
9316                         free_excluded_extents(info->extent_root, block_group);
9317
9318                 btrfs_remove_free_space_cache(block_group);
9319                 btrfs_put_block_group(block_group);
9320
9321                 spin_lock(&info->block_group_cache_lock);
9322         }
9323         spin_unlock(&info->block_group_cache_lock);
9324
9325         /* now that all the block groups are freed, go through and
9326          * free all the space_info structs.  This is only called during
9327          * the final stages of unmount, and so we know nobody is
9328          * using them.  We call synchronize_rcu() once before we start,
9329          * just to be on the safe side.
9330          */
9331         synchronize_rcu();
9332
9333         release_global_block_rsv(info);
9334
9335         while (!list_empty(&info->space_info)) {
9336                 int i;
9337
9338                 space_info = list_entry(info->space_info.next,
9339                                         struct btrfs_space_info,
9340                                         list);
9341                 if (btrfs_test_opt(info->tree_root, ENOSPC_DEBUG)) {
9342                         if (WARN_ON(space_info->bytes_pinned > 0 ||
9343                             space_info->bytes_reserved > 0 ||
9344                             space_info->bytes_may_use > 0)) {
9345                                 dump_space_info(space_info, 0, 0);
9346                         }
9347                 }
9348                 list_del(&space_info->list);
9349                 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
9350                         struct kobject *kobj;
9351                         kobj = space_info->block_group_kobjs[i];
9352                         space_info->block_group_kobjs[i] = NULL;
9353                         if (kobj) {
9354                                 kobject_del(kobj);
9355                                 kobject_put(kobj);
9356                         }
9357                 }
9358                 kobject_del(&space_info->kobj);
9359                 kobject_put(&space_info->kobj);
9360         }
9361         return 0;
9362 }
9363
9364 static void __link_block_group(struct btrfs_space_info *space_info,
9365                                struct btrfs_block_group_cache *cache)
9366 {
9367         int index = get_block_group_index(cache);
9368         bool first = false;
9369
9370         down_write(&space_info->groups_sem);
9371         if (list_empty(&space_info->block_groups[index]))
9372                 first = true;
9373         list_add_tail(&cache->list, &space_info->block_groups[index]);
9374         up_write(&space_info->groups_sem);
9375
9376         if (first) {
9377                 struct raid_kobject *rkobj;
9378                 int ret;
9379
9380                 rkobj = kzalloc(sizeof(*rkobj), GFP_NOFS);
9381                 if (!rkobj)
9382                         goto out_err;
9383                 rkobj->raid_type = index;
9384                 kobject_init(&rkobj->kobj, &btrfs_raid_ktype);
9385                 ret = kobject_add(&rkobj->kobj, &space_info->kobj,
9386                                   "%s", get_raid_name(index));
9387                 if (ret) {
9388                         kobject_put(&rkobj->kobj);
9389                         goto out_err;
9390                 }
9391                 space_info->block_group_kobjs[index] = &rkobj->kobj;
9392         }
9393
9394         return;
9395 out_err:
9396         pr_warn("BTRFS: failed to add kobject for block cache. ignoring.\n");
9397 }
9398
9399 static struct btrfs_block_group_cache *
9400 btrfs_create_block_group_cache(struct btrfs_root *root, u64 start, u64 size)
9401 {
9402         struct btrfs_block_group_cache *cache;
9403
9404         cache = kzalloc(sizeof(*cache), GFP_NOFS);
9405         if (!cache)
9406                 return NULL;
9407
9408         cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
9409                                         GFP_NOFS);
9410         if (!cache->free_space_ctl) {
9411                 kfree(cache);
9412                 return NULL;
9413         }
9414
9415         cache->key.objectid = start;
9416         cache->key.offset = size;
9417         cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
9418
9419         cache->sectorsize = root->sectorsize;
9420         cache->fs_info = root->fs_info;
9421         cache->full_stripe_len = btrfs_full_stripe_len(root,
9422                                                &root->fs_info->mapping_tree,
9423                                                start);
9424         atomic_set(&cache->count, 1);
9425         spin_lock_init(&cache->lock);
9426         init_rwsem(&cache->data_rwsem);
9427         INIT_LIST_HEAD(&cache->list);
9428         INIT_LIST_HEAD(&cache->cluster_list);
9429         INIT_LIST_HEAD(&cache->bg_list);
9430         INIT_LIST_HEAD(&cache->ro_list);
9431         INIT_LIST_HEAD(&cache->dirty_list);
9432         INIT_LIST_HEAD(&cache->io_list);
9433         btrfs_init_free_space_ctl(cache);
9434         atomic_set(&cache->trimming, 0);
9435
9436         return cache;
9437 }
9438
9439 int btrfs_read_block_groups(struct btrfs_root *root)
9440 {
9441         struct btrfs_path *path;
9442         int ret;
9443         struct btrfs_block_group_cache *cache;
9444         struct btrfs_fs_info *info = root->fs_info;
9445         struct btrfs_space_info *space_info;
9446         struct btrfs_key key;
9447         struct btrfs_key found_key;
9448         struct extent_buffer *leaf;
9449         int need_clear = 0;
9450         u64 cache_gen;
9451
9452         root = info->extent_root;
9453         key.objectid = 0;
9454         key.offset = 0;
9455         key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
9456         path = btrfs_alloc_path();
9457         if (!path)
9458                 return -ENOMEM;
9459         path->reada = 1;
9460
9461         cache_gen = btrfs_super_cache_generation(root->fs_info->super_copy);
9462         if (btrfs_test_opt(root, SPACE_CACHE) &&
9463             btrfs_super_generation(root->fs_info->super_copy) != cache_gen)
9464                 need_clear = 1;
9465         if (btrfs_test_opt(root, CLEAR_CACHE))
9466                 need_clear = 1;
9467
9468         while (1) {
9469                 ret = find_first_block_group(root, path, &key);
9470                 if (ret > 0)
9471                         break;
9472                 if (ret != 0)
9473                         goto error;
9474
9475                 leaf = path->nodes[0];
9476                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
9477
9478                 cache = btrfs_create_block_group_cache(root, found_key.objectid,
9479                                                        found_key.offset);
9480                 if (!cache) {
9481                         ret = -ENOMEM;
9482                         goto error;
9483                 }
9484
9485                 if (need_clear) {
9486                         /*
9487                          * When we mount with old space cache, we need to
9488                          * set BTRFS_DC_CLEAR and set dirty flag.
9489                          *
9490                          * a) Setting 'BTRFS_DC_CLEAR' makes sure that we
9491                          *    truncate the old free space cache inode and
9492                          *    setup a new one.
9493                          * b) Setting 'dirty flag' makes sure that we flush
9494                          *    the new space cache info onto disk.
9495                          */
9496                         if (btrfs_test_opt(root, SPACE_CACHE))
9497                                 cache->disk_cache_state = BTRFS_DC_CLEAR;
9498                 }
9499
9500                 read_extent_buffer(leaf, &cache->item,
9501                                    btrfs_item_ptr_offset(leaf, path->slots[0]),
9502                                    sizeof(cache->item));
9503                 cache->flags = btrfs_block_group_flags(&cache->item);
9504
9505                 key.objectid = found_key.objectid + found_key.offset;
9506                 btrfs_release_path(path);
9507
9508                 /*
9509                  * We need to exclude the super stripes now so that the space
9510                  * info has super bytes accounted for, otherwise we'll think
9511                  * we have more space than we actually do.
9512                  */
9513                 ret = exclude_super_stripes(root, cache);
9514                 if (ret) {
9515                         /*
9516                          * We may have excluded something, so call this just in
9517                          * case.
9518                          */
9519                         free_excluded_extents(root, cache);
9520                         btrfs_put_block_group(cache);
9521                         goto error;
9522                 }
9523
9524                 /*
9525                  * check for two cases, either we are full, and therefore
9526                  * don't need to bother with the caching work since we won't
9527                  * find any space, or we are empty, and we can just add all
9528                  * the space in and be done with it.  This saves us _alot_ of
9529                  * time, particularly in the full case.
9530                  */
9531                 if (found_key.offset == btrfs_block_group_used(&cache->item)) {
9532                         cache->last_byte_to_unpin = (u64)-1;
9533                         cache->cached = BTRFS_CACHE_FINISHED;
9534                         free_excluded_extents(root, cache);
9535                 } else if (btrfs_block_group_used(&cache->item) == 0) {
9536                         cache->last_byte_to_unpin = (u64)-1;
9537                         cache->cached = BTRFS_CACHE_FINISHED;
9538                         add_new_free_space(cache, root->fs_info,
9539                                            found_key.objectid,
9540                                            found_key.objectid +
9541                                            found_key.offset);
9542                         free_excluded_extents(root, cache);
9543                 }
9544
9545                 ret = btrfs_add_block_group_cache(root->fs_info, cache);
9546                 if (ret) {
9547                         btrfs_remove_free_space_cache(cache);
9548                         btrfs_put_block_group(cache);
9549                         goto error;
9550                 }
9551
9552                 ret = update_space_info(info, cache->flags, found_key.offset,
9553                                         btrfs_block_group_used(&cache->item),
9554                                         &space_info);
9555                 if (ret) {
9556                         btrfs_remove_free_space_cache(cache);
9557                         spin_lock(&info->block_group_cache_lock);
9558                         rb_erase(&cache->cache_node,
9559                                  &info->block_group_cache_tree);
9560                         RB_CLEAR_NODE(&cache->cache_node);
9561                         spin_unlock(&info->block_group_cache_lock);
9562                         btrfs_put_block_group(cache);
9563                         goto error;
9564                 }
9565
9566                 cache->space_info = space_info;
9567                 spin_lock(&cache->space_info->lock);
9568                 cache->space_info->bytes_readonly += cache->bytes_super;
9569                 spin_unlock(&cache->space_info->lock);
9570
9571                 __link_block_group(space_info, cache);
9572
9573                 set_avail_alloc_bits(root->fs_info, cache->flags);
9574                 if (btrfs_chunk_readonly(root, cache->key.objectid)) {
9575                         inc_block_group_ro(cache, 1);
9576                 } else if (btrfs_block_group_used(&cache->item) == 0) {
9577                         spin_lock(&info->unused_bgs_lock);
9578                         /* Should always be true but just in case. */
9579                         if (list_empty(&cache->bg_list)) {
9580                                 btrfs_get_block_group(cache);
9581                                 list_add_tail(&cache->bg_list,
9582                                               &info->unused_bgs);
9583                         }
9584                         spin_unlock(&info->unused_bgs_lock);
9585                 }
9586         }
9587
9588         list_for_each_entry_rcu(space_info, &root->fs_info->space_info, list) {
9589                 if (!(get_alloc_profile(root, space_info->flags) &
9590                       (BTRFS_BLOCK_GROUP_RAID10 |
9591                        BTRFS_BLOCK_GROUP_RAID1 |
9592                        BTRFS_BLOCK_GROUP_RAID5 |
9593                        BTRFS_BLOCK_GROUP_RAID6 |
9594                        BTRFS_BLOCK_GROUP_DUP)))
9595                         continue;
9596                 /*
9597                  * avoid allocating from un-mirrored block group if there are
9598                  * mirrored block groups.
9599                  */
9600                 list_for_each_entry(cache,
9601                                 &space_info->block_groups[BTRFS_RAID_RAID0],
9602                                 list)
9603                         inc_block_group_ro(cache, 1);
9604                 list_for_each_entry(cache,
9605                                 &space_info->block_groups[BTRFS_RAID_SINGLE],
9606                                 list)
9607                         inc_block_group_ro(cache, 1);
9608         }
9609
9610         init_global_block_rsv(info);
9611         ret = 0;
9612 error:
9613         btrfs_free_path(path);
9614         return ret;
9615 }
9616
9617 void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
9618                                        struct btrfs_root *root)
9619 {
9620         struct btrfs_block_group_cache *block_group, *tmp;
9621         struct btrfs_root *extent_root = root->fs_info->extent_root;
9622         struct btrfs_block_group_item item;
9623         struct btrfs_key key;
9624         int ret = 0;
9625         bool can_flush_pending_bgs = trans->can_flush_pending_bgs;
9626
9627         trans->can_flush_pending_bgs = false;
9628         list_for_each_entry_safe(block_group, tmp, &trans->new_bgs, bg_list) {
9629                 if (ret)
9630                         goto next;
9631
9632                 spin_lock(&block_group->lock);
9633                 memcpy(&item, &block_group->item, sizeof(item));
9634                 memcpy(&key, &block_group->key, sizeof(key));
9635                 spin_unlock(&block_group->lock);
9636
9637                 ret = btrfs_insert_item(trans, extent_root, &key, &item,
9638                                         sizeof(item));
9639                 if (ret)
9640                         btrfs_abort_transaction(trans, extent_root, ret);
9641                 ret = btrfs_finish_chunk_alloc(trans, extent_root,
9642                                                key.objectid, key.offset);
9643                 if (ret)
9644                         btrfs_abort_transaction(trans, extent_root, ret);
9645 next:
9646                 list_del_init(&block_group->bg_list);
9647         }
9648         trans->can_flush_pending_bgs = can_flush_pending_bgs;
9649 }
9650
9651 int btrfs_make_block_group(struct btrfs_trans_handle *trans,
9652                            struct btrfs_root *root, u64 bytes_used,
9653                            u64 type, u64 chunk_objectid, u64 chunk_offset,
9654                            u64 size)
9655 {
9656         int ret;
9657         struct btrfs_root *extent_root;
9658         struct btrfs_block_group_cache *cache;
9659
9660         extent_root = root->fs_info->extent_root;
9661
9662         btrfs_set_log_full_commit(root->fs_info, trans);
9663
9664         cache = btrfs_create_block_group_cache(root, chunk_offset, size);
9665         if (!cache)
9666                 return -ENOMEM;
9667
9668         btrfs_set_block_group_used(&cache->item, bytes_used);
9669         btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
9670         btrfs_set_block_group_flags(&cache->item, type);
9671
9672         cache->flags = type;
9673         cache->last_byte_to_unpin = (u64)-1;
9674         cache->cached = BTRFS_CACHE_FINISHED;
9675         ret = exclude_super_stripes(root, cache);
9676         if (ret) {
9677                 /*
9678                  * We may have excluded something, so call this just in
9679                  * case.
9680                  */
9681                 free_excluded_extents(root, cache);
9682                 btrfs_put_block_group(cache);
9683                 return ret;
9684         }
9685
9686         add_new_free_space(cache, root->fs_info, chunk_offset,
9687                            chunk_offset + size);
9688
9689         free_excluded_extents(root, cache);
9690
9691 #ifdef CONFIG_BTRFS_DEBUG
9692         if (btrfs_should_fragment_free_space(root, cache)) {
9693                 u64 new_bytes_used = size - bytes_used;
9694
9695                 bytes_used += new_bytes_used >> 1;
9696                 fragment_free_space(root, cache);
9697         }
9698 #endif
9699         /*
9700          * Call to ensure the corresponding space_info object is created and
9701          * assigned to our block group, but don't update its counters just yet.
9702          * We want our bg to be added to the rbtree with its ->space_info set.
9703          */
9704         ret = update_space_info(root->fs_info, cache->flags, 0, 0,
9705                                 &cache->space_info);
9706         if (ret) {
9707                 btrfs_remove_free_space_cache(cache);
9708                 btrfs_put_block_group(cache);
9709                 return ret;
9710         }
9711
9712         ret = btrfs_add_block_group_cache(root->fs_info, cache);
9713         if (ret) {
9714                 btrfs_remove_free_space_cache(cache);
9715                 btrfs_put_block_group(cache);
9716                 return ret;
9717         }
9718
9719         /*
9720          * Now that our block group has its ->space_info set and is inserted in
9721          * the rbtree, update the space info's counters.
9722          */
9723         ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
9724                                 &cache->space_info);
9725         if (ret) {
9726                 btrfs_remove_free_space_cache(cache);
9727                 spin_lock(&root->fs_info->block_group_cache_lock);
9728                 rb_erase(&cache->cache_node,
9729                          &root->fs_info->block_group_cache_tree);
9730                 RB_CLEAR_NODE(&cache->cache_node);
9731                 spin_unlock(&root->fs_info->block_group_cache_lock);
9732                 btrfs_put_block_group(cache);
9733                 return ret;
9734         }
9735         update_global_block_rsv(root->fs_info);
9736
9737         spin_lock(&cache->space_info->lock);
9738         cache->space_info->bytes_readonly += cache->bytes_super;
9739         spin_unlock(&cache->space_info->lock);
9740
9741         __link_block_group(cache->space_info, cache);
9742
9743         list_add_tail(&cache->bg_list, &trans->new_bgs);
9744
9745         set_avail_alloc_bits(extent_root->fs_info, type);
9746
9747         return 0;
9748 }
9749
9750 static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
9751 {
9752         u64 extra_flags = chunk_to_extended(flags) &
9753                                 BTRFS_EXTENDED_PROFILE_MASK;
9754
9755         write_seqlock(&fs_info->profiles_lock);
9756         if (flags & BTRFS_BLOCK_GROUP_DATA)
9757                 fs_info->avail_data_alloc_bits &= ~extra_flags;
9758         if (flags & BTRFS_BLOCK_GROUP_METADATA)
9759                 fs_info->avail_metadata_alloc_bits &= ~extra_flags;
9760         if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
9761                 fs_info->avail_system_alloc_bits &= ~extra_flags;
9762         write_sequnlock(&fs_info->profiles_lock);
9763 }
9764
9765 int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
9766                              struct btrfs_root *root, u64 group_start,
9767                              struct extent_map *em)
9768 {
9769         struct btrfs_path *path;
9770         struct btrfs_block_group_cache *block_group;
9771         struct btrfs_free_cluster *cluster;
9772         struct btrfs_root *tree_root = root->fs_info->tree_root;
9773         struct btrfs_key key;
9774         struct inode *inode;
9775         struct kobject *kobj = NULL;
9776         int ret;
9777         int index;
9778         int factor;
9779         struct btrfs_caching_control *caching_ctl = NULL;
9780         bool remove_em;
9781
9782         root = root->fs_info->extent_root;
9783
9784         block_group = btrfs_lookup_block_group(root->fs_info, group_start);
9785         BUG_ON(!block_group);
9786         BUG_ON(!block_group->ro);
9787
9788         /*
9789          * Free the reserved super bytes from this block group before
9790          * remove it.
9791          */
9792         free_excluded_extents(root, block_group);
9793
9794         memcpy(&key, &block_group->key, sizeof(key));
9795         index = get_block_group_index(block_group);
9796         if (block_group->flags & (BTRFS_BLOCK_GROUP_DUP |
9797                                   BTRFS_BLOCK_GROUP_RAID1 |
9798                                   BTRFS_BLOCK_GROUP_RAID10))
9799                 factor = 2;
9800         else
9801                 factor = 1;
9802
9803         /* make sure this block group isn't part of an allocation cluster */
9804         cluster = &root->fs_info->data_alloc_cluster;
9805         spin_lock(&cluster->refill_lock);
9806         btrfs_return_cluster_to_free_space(block_group, cluster);
9807         spin_unlock(&cluster->refill_lock);
9808
9809         /*
9810          * make sure this block group isn't part of a metadata
9811          * allocation cluster
9812          */
9813         cluster = &root->fs_info->meta_alloc_cluster;
9814         spin_lock(&cluster->refill_lock);
9815         btrfs_return_cluster_to_free_space(block_group, cluster);
9816         spin_unlock(&cluster->refill_lock);
9817
9818         path = btrfs_alloc_path();
9819         if (!path) {
9820                 ret = -ENOMEM;
9821                 goto out;
9822         }
9823
9824         /*
9825          * get the inode first so any iput calls done for the io_list
9826          * aren't the final iput (no unlinks allowed now)
9827          */
9828         inode = lookup_free_space_inode(tree_root, block_group, path);
9829
9830         mutex_lock(&trans->transaction->cache_write_mutex);
9831         /*
9832          * make sure our free spache cache IO is done before remove the
9833          * free space inode
9834          */
9835         spin_lock(&trans->transaction->dirty_bgs_lock);
9836         if (!list_empty(&block_group->io_list)) {
9837                 list_del_init(&block_group->io_list);
9838
9839                 WARN_ON(!IS_ERR(inode) && inode != block_group->io_ctl.inode);
9840
9841                 spin_unlock(&trans->transaction->dirty_bgs_lock);
9842                 btrfs_wait_cache_io(root, trans, block_group,
9843                                     &block_group->io_ctl, path,
9844                                     block_group->key.objectid);
9845                 btrfs_put_block_group(block_group);
9846                 spin_lock(&trans->transaction->dirty_bgs_lock);
9847         }
9848
9849         if (!list_empty(&block_group->dirty_list)) {
9850                 list_del_init(&block_group->dirty_list);
9851                 btrfs_put_block_group(block_group);
9852         }
9853         spin_unlock(&trans->transaction->dirty_bgs_lock);
9854         mutex_unlock(&trans->transaction->cache_write_mutex);
9855
9856         if (!IS_ERR(inode)) {
9857                 ret = btrfs_orphan_add(trans, inode);
9858                 if (ret) {
9859                         btrfs_add_delayed_iput(inode);
9860                         goto out;
9861                 }
9862                 clear_nlink(inode);
9863                 /* One for the block groups ref */
9864                 spin_lock(&block_group->lock);
9865                 if (block_group->iref) {
9866                         block_group->iref = 0;
9867                         block_group->inode = NULL;
9868                         spin_unlock(&block_group->lock);
9869                         iput(inode);
9870                 } else {
9871                         spin_unlock(&block_group->lock);
9872                 }
9873                 /* One for our lookup ref */
9874                 btrfs_add_delayed_iput(inode);
9875         }
9876
9877         key.objectid = BTRFS_FREE_SPACE_OBJECTID;
9878         key.offset = block_group->key.objectid;
9879         key.type = 0;
9880
9881         ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
9882         if (ret < 0)
9883                 goto out;
9884         if (ret > 0)
9885                 btrfs_release_path(path);
9886         if (ret == 0) {
9887                 ret = btrfs_del_item(trans, tree_root, path);
9888                 if (ret)
9889                         goto out;
9890                 btrfs_release_path(path);
9891         }
9892
9893         spin_lock(&root->fs_info->block_group_cache_lock);
9894         rb_erase(&block_group->cache_node,
9895                  &root->fs_info->block_group_cache_tree);
9896         RB_CLEAR_NODE(&block_group->cache_node);
9897
9898         if (root->fs_info->first_logical_byte == block_group->key.objectid)
9899                 root->fs_info->first_logical_byte = (u64)-1;
9900         spin_unlock(&root->fs_info->block_group_cache_lock);
9901
9902         down_write(&block_group->space_info->groups_sem);
9903         /*
9904          * we must use list_del_init so people can check to see if they
9905          * are still on the list after taking the semaphore
9906          */
9907         list_del_init(&block_group->list);
9908         if (list_empty(&block_group->space_info->block_groups[index])) {
9909                 kobj = block_group->space_info->block_group_kobjs[index];
9910                 block_group->space_info->block_group_kobjs[index] = NULL;
9911                 clear_avail_alloc_bits(root->fs_info, block_group->flags);
9912         }
9913         up_write(&block_group->space_info->groups_sem);
9914         if (kobj) {
9915                 kobject_del(kobj);
9916                 kobject_put(kobj);
9917         }
9918
9919         if (block_group->has_caching_ctl)
9920                 caching_ctl = get_caching_control(block_group);
9921         if (block_group->cached == BTRFS_CACHE_STARTED)
9922                 wait_block_group_cache_done(block_group);
9923         if (block_group->has_caching_ctl) {
9924                 down_write(&root->fs_info->commit_root_sem);
9925                 if (!caching_ctl) {
9926                         struct btrfs_caching_control *ctl;
9927
9928                         list_for_each_entry(ctl,
9929                                     &root->fs_info->caching_block_groups, list)
9930                                 if (ctl->block_group == block_group) {
9931                                         caching_ctl = ctl;
9932                                         atomic_inc(&caching_ctl->count);
9933                                         break;
9934                                 }
9935                 }
9936                 if (caching_ctl)
9937                         list_del_init(&caching_ctl->list);
9938                 up_write(&root->fs_info->commit_root_sem);
9939                 if (caching_ctl) {
9940                         /* Once for the caching bgs list and once for us. */
9941                         put_caching_control(caching_ctl);
9942                         put_caching_control(caching_ctl);
9943                 }
9944         }
9945
9946         spin_lock(&trans->transaction->dirty_bgs_lock);
9947         if (!list_empty(&block_group->dirty_list)) {
9948                 WARN_ON(1);
9949         }
9950         if (!list_empty(&block_group->io_list)) {
9951                 WARN_ON(1);
9952         }
9953         spin_unlock(&trans->transaction->dirty_bgs_lock);
9954         btrfs_remove_free_space_cache(block_group);
9955
9956         spin_lock(&block_group->space_info->lock);
9957         list_del_init(&block_group->ro_list);
9958
9959         if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
9960                 WARN_ON(block_group->space_info->total_bytes
9961                         < block_group->key.offset);
9962                 WARN_ON(block_group->space_info->bytes_readonly
9963                         < block_group->key.offset);
9964                 WARN_ON(block_group->space_info->disk_total
9965                         < block_group->key.offset * factor);
9966         }
9967         block_group->space_info->total_bytes -= block_group->key.offset;
9968         block_group->space_info->bytes_readonly -= block_group->key.offset;
9969         block_group->space_info->disk_total -= block_group->key.offset * factor;
9970
9971         spin_unlock(&block_group->space_info->lock);
9972
9973         memcpy(&key, &block_group->key, sizeof(key));
9974
9975         lock_chunks(root);
9976         if (!list_empty(&em->list)) {
9977                 /* We're in the transaction->pending_chunks list. */
9978                 free_extent_map(em);
9979         }
9980         spin_lock(&block_group->lock);
9981         block_group->removed = 1;
9982         /*
9983          * At this point trimming can't start on this block group, because we
9984          * removed the block group from the tree fs_info->block_group_cache_tree
9985          * so no one can't find it anymore and even if someone already got this
9986          * block group before we removed it from the rbtree, they have already
9987          * incremented block_group->trimming - if they didn't, they won't find
9988          * any free space entries because we already removed them all when we
9989          * called btrfs_remove_free_space_cache().
9990          *
9991          * And we must not remove the extent map from the fs_info->mapping_tree
9992          * to prevent the same logical address range and physical device space
9993          * ranges from being reused for a new block group. This is because our
9994          * fs trim operation (btrfs_trim_fs() / btrfs_ioctl_fitrim()) is
9995          * completely transactionless, so while it is trimming a range the
9996          * currently running transaction might finish and a new one start,
9997          * allowing for new block groups to be created that can reuse the same
9998          * physical device locations unless we take this special care.
9999          *
10000          * There may also be an implicit trim operation if the file system
10001          * is mounted with -odiscard. The same protections must remain
10002          * in place until the extents have been discarded completely when
10003          * the transaction commit has completed.
10004          */
10005         remove_em = (atomic_read(&block_group->trimming) == 0);
10006         /*
10007          * Make sure a trimmer task always sees the em in the pinned_chunks list
10008          * if it sees block_group->removed == 1 (needs to lock block_group->lock
10009          * before checking block_group->removed).
10010          */
10011         if (!remove_em) {
10012                 /*
10013                  * Our em might be in trans->transaction->pending_chunks which
10014                  * is protected by fs_info->chunk_mutex ([lock|unlock]_chunks),
10015                  * and so is the fs_info->pinned_chunks list.
10016                  *
10017                  * So at this point we must be holding the chunk_mutex to avoid
10018                  * any races with chunk allocation (more specifically at
10019                  * volumes.c:contains_pending_extent()), to ensure it always
10020                  * sees the em, either in the pending_chunks list or in the
10021                  * pinned_chunks list.
10022                  */
10023                 list_move_tail(&em->list, &root->fs_info->pinned_chunks);
10024         }
10025         spin_unlock(&block_group->lock);
10026
10027         if (remove_em) {
10028                 struct extent_map_tree *em_tree;
10029
10030                 em_tree = &root->fs_info->mapping_tree.map_tree;
10031                 write_lock(&em_tree->lock);
10032                 /*
10033                  * The em might be in the pending_chunks list, so make sure the
10034                  * chunk mutex is locked, since remove_extent_mapping() will
10035                  * delete us from that list.
10036                  */
10037                 remove_extent_mapping(em_tree, em);
10038                 write_unlock(&em_tree->lock);
10039                 /* once for the tree */
10040                 free_extent_map(em);
10041         }
10042
10043         unlock_chunks(root);
10044
10045         btrfs_put_block_group(block_group);
10046         btrfs_put_block_group(block_group);
10047
10048         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
10049         if (ret > 0)
10050                 ret = -EIO;
10051         if (ret < 0)
10052                 goto out;
10053
10054         ret = btrfs_del_item(trans, root, path);
10055 out:
10056         btrfs_free_path(path);
10057         return ret;
10058 }
10059
10060 /*
10061  * Process the unused_bgs list and remove any that don't have any allocated
10062  * space inside of them.
10063  */
10064 void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
10065 {
10066         struct btrfs_block_group_cache *block_group;
10067         struct btrfs_space_info *space_info;
10068         struct btrfs_root *root = fs_info->extent_root;
10069         struct btrfs_trans_handle *trans;
10070         int ret = 0;
10071
10072         if (!fs_info->open)
10073                 return;
10074
10075         spin_lock(&fs_info->unused_bgs_lock);
10076         while (!list_empty(&fs_info->unused_bgs)) {
10077                 u64 start, end;
10078                 int trimming;
10079
10080                 block_group = list_first_entry(&fs_info->unused_bgs,
10081                                                struct btrfs_block_group_cache,
10082                                                bg_list);
10083                 space_info = block_group->space_info;
10084                 list_del_init(&block_group->bg_list);
10085                 if (ret || btrfs_mixed_space_info(space_info)) {
10086                         btrfs_put_block_group(block_group);
10087                         continue;
10088                 }
10089                 spin_unlock(&fs_info->unused_bgs_lock);
10090
10091                 mutex_lock(&root->fs_info->delete_unused_bgs_mutex);
10092
10093                 /* Don't want to race with allocators so take the groups_sem */
10094                 down_write(&space_info->groups_sem);
10095                 spin_lock(&block_group->lock);
10096                 if (block_group->reserved ||
10097                     btrfs_block_group_used(&block_group->item) ||
10098                     block_group->ro) {
10099                         /*
10100                          * We want to bail if we made new allocations or have
10101                          * outstanding allocations in this block group.  We do
10102                          * the ro check in case balance is currently acting on
10103                          * this block group.
10104                          */
10105                         spin_unlock(&block_group->lock);
10106                         up_write(&space_info->groups_sem);
10107                         goto next;
10108                 }
10109                 spin_unlock(&block_group->lock);
10110
10111                 /* We don't want to force the issue, only flip if it's ok. */
10112                 ret = inc_block_group_ro(block_group, 0);
10113                 up_write(&space_info->groups_sem);
10114                 if (ret < 0) {
10115                         ret = 0;
10116                         goto next;
10117                 }
10118
10119                 /*
10120                  * Want to do this before we do anything else so we can recover
10121                  * properly if we fail to join the transaction.
10122                  */
10123                 /* 1 for btrfs_orphan_reserve_metadata() */
10124                 trans = btrfs_start_transaction(root, 1);
10125                 if (IS_ERR(trans)) {
10126                         btrfs_dec_block_group_ro(root, block_group);
10127                         ret = PTR_ERR(trans);
10128                         goto next;
10129                 }
10130
10131                 /*
10132                  * We could have pending pinned extents for this block group,
10133                  * just delete them, we don't care about them anymore.
10134                  */
10135                 start = block_group->key.objectid;
10136                 end = start + block_group->key.offset - 1;
10137                 /*
10138                  * Hold the unused_bg_unpin_mutex lock to avoid racing with
10139                  * btrfs_finish_extent_commit(). If we are at transaction N,
10140                  * another task might be running finish_extent_commit() for the
10141                  * previous transaction N - 1, and have seen a range belonging
10142                  * to the block group in freed_extents[] before we were able to
10143                  * clear the whole block group range from freed_extents[]. This
10144                  * means that task can lookup for the block group after we
10145                  * unpinned it from freed_extents[] and removed it, leading to
10146                  * a BUG_ON() at btrfs_unpin_extent_range().
10147                  */
10148                 mutex_lock(&fs_info->unused_bg_unpin_mutex);
10149                 ret = clear_extent_bits(&fs_info->freed_extents[0], start, end,
10150                                   EXTENT_DIRTY, GFP_NOFS);
10151                 if (ret) {
10152                         mutex_unlock(&fs_info->unused_bg_unpin_mutex);
10153                         btrfs_dec_block_group_ro(root, block_group);
10154                         goto end_trans;
10155                 }
10156                 ret = clear_extent_bits(&fs_info->freed_extents[1], start, end,
10157                                   EXTENT_DIRTY, GFP_NOFS);
10158                 if (ret) {
10159                         mutex_unlock(&fs_info->unused_bg_unpin_mutex);
10160                         btrfs_dec_block_group_ro(root, block_group);
10161                         goto end_trans;
10162                 }
10163                 mutex_unlock(&fs_info->unused_bg_unpin_mutex);
10164
10165                 /* Reset pinned so btrfs_put_block_group doesn't complain */
10166                 spin_lock(&space_info->lock);
10167                 spin_lock(&block_group->lock);
10168
10169                 space_info->bytes_pinned -= block_group->pinned;
10170                 space_info->bytes_readonly += block_group->pinned;
10171                 percpu_counter_add(&space_info->total_bytes_pinned,
10172                                    -block_group->pinned);
10173                 block_group->pinned = 0;
10174
10175                 spin_unlock(&block_group->lock);
10176                 spin_unlock(&space_info->lock);
10177
10178                 /* DISCARD can flip during remount */
10179                 trimming = btrfs_test_opt(root, DISCARD);
10180
10181                 /* Implicit trim during transaction commit. */
10182                 if (trimming)
10183                         btrfs_get_block_group_trimming(block_group);
10184
10185                 /*
10186                  * Btrfs_remove_chunk will abort the transaction if things go
10187                  * horribly wrong.
10188                  */
10189                 ret = btrfs_remove_chunk(trans, root,
10190                                          block_group->key.objectid);
10191
10192                 if (ret) {
10193                         if (trimming)
10194                                 btrfs_put_block_group_trimming(block_group);
10195                         goto end_trans;
10196                 }
10197
10198                 /*
10199                  * If we're not mounted with -odiscard, we can just forget
10200                  * about this block group. Otherwise we'll need to wait
10201                  * until transaction commit to do the actual discard.
10202                  */
10203                 if (trimming) {
10204                         WARN_ON(!list_empty(&block_group->bg_list));
10205                         spin_lock(&trans->transaction->deleted_bgs_lock);
10206                         list_move(&block_group->bg_list,
10207                                   &trans->transaction->deleted_bgs);
10208                         spin_unlock(&trans->transaction->deleted_bgs_lock);
10209                         btrfs_get_block_group(block_group);
10210                 }
10211 end_trans:
10212                 btrfs_end_transaction(trans, root);
10213 next:
10214                 mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
10215                 btrfs_put_block_group(block_group);
10216                 spin_lock(&fs_info->unused_bgs_lock);
10217         }
10218         spin_unlock(&fs_info->unused_bgs_lock);
10219 }
10220
10221 int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
10222 {
10223         struct btrfs_space_info *space_info;
10224         struct btrfs_super_block *disk_super;
10225         u64 features;
10226         u64 flags;
10227         int mixed = 0;
10228         int ret;
10229
10230         disk_super = fs_info->super_copy;
10231         if (!btrfs_super_root(disk_super))
10232                 return 1;
10233
10234         features = btrfs_super_incompat_flags(disk_super);
10235         if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
10236                 mixed = 1;
10237
10238         flags = BTRFS_BLOCK_GROUP_SYSTEM;
10239         ret = update_space_info(fs_info, flags, 0, 0, &space_info);
10240         if (ret)
10241                 goto out;
10242
10243         if (mixed) {
10244                 flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
10245                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
10246         } else {
10247                 flags = BTRFS_BLOCK_GROUP_METADATA;
10248                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
10249                 if (ret)
10250                         goto out;
10251
10252                 flags = BTRFS_BLOCK_GROUP_DATA;
10253                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
10254         }
10255 out:
10256         return ret;
10257 }
10258
10259 int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
10260 {
10261         return unpin_extent_range(root, start, end, false);
10262 }
10263
10264 /*
10265  * It used to be that old block groups would be left around forever.
10266  * Iterating over them would be enough to trim unused space.  Since we
10267  * now automatically remove them, we also need to iterate over unallocated
10268  * space.
10269  *
10270  * We don't want a transaction for this since the discard may take a
10271  * substantial amount of time.  We don't require that a transaction be
10272  * running, but we do need to take a running transaction into account
10273  * to ensure that we're not discarding chunks that were released in
10274  * the current transaction.
10275  *
10276  * Holding the chunks lock will prevent other threads from allocating
10277  * or releasing chunks, but it won't prevent a running transaction
10278  * from committing and releasing the memory that the pending chunks
10279  * list head uses.  For that, we need to take a reference to the
10280  * transaction.
10281  */
10282 static int btrfs_trim_free_extents(struct btrfs_device *device,
10283                                    u64 minlen, u64 *trimmed)
10284 {
10285         u64 start = 0, len = 0;
10286         int ret;
10287
10288         *trimmed = 0;
10289
10290         /* Not writeable = nothing to do. */
10291         if (!device->writeable)
10292                 return 0;
10293
10294         /* No free space = nothing to do. */
10295         if (device->total_bytes <= device->bytes_used)
10296                 return 0;
10297
10298         ret = 0;
10299
10300         while (1) {
10301                 struct btrfs_fs_info *fs_info = device->dev_root->fs_info;
10302                 struct btrfs_transaction *trans;
10303                 u64 bytes;
10304
10305                 ret = mutex_lock_interruptible(&fs_info->chunk_mutex);
10306                 if (ret)
10307                         return ret;
10308
10309                 down_read(&fs_info->commit_root_sem);
10310
10311                 spin_lock(&fs_info->trans_lock);
10312                 trans = fs_info->running_transaction;
10313                 if (trans)
10314                         atomic_inc(&trans->use_count);
10315                 spin_unlock(&fs_info->trans_lock);
10316
10317                 ret = find_free_dev_extent_start(trans, device, minlen, start,
10318                                                  &start, &len);
10319                 if (trans)
10320                         btrfs_put_transaction(trans);
10321
10322                 if (ret) {
10323                         up_read(&fs_info->commit_root_sem);
10324                         mutex_unlock(&fs_info->chunk_mutex);
10325                         if (ret == -ENOSPC)
10326                                 ret = 0;
10327                         break;
10328                 }
10329
10330                 ret = btrfs_issue_discard(device->bdev, start, len, &bytes);
10331                 up_read(&fs_info->commit_root_sem);
10332                 mutex_unlock(&fs_info->chunk_mutex);
10333
10334                 if (ret)
10335                         break;
10336
10337                 start += len;
10338                 *trimmed += bytes;
10339
10340                 if (fatal_signal_pending(current)) {
10341                         ret = -ERESTARTSYS;
10342                         break;
10343                 }
10344
10345                 cond_resched();
10346         }
10347
10348         return ret;
10349 }
10350
10351 int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
10352 {
10353         struct btrfs_fs_info *fs_info = root->fs_info;
10354         struct btrfs_block_group_cache *cache = NULL;
10355         struct btrfs_device *device;
10356         struct list_head *devices;
10357         u64 group_trimmed;
10358         u64 start;
10359         u64 end;
10360         u64 trimmed = 0;
10361         u64 total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
10362         int ret = 0;
10363
10364         /*
10365          * try to trim all FS space, our block group may start from non-zero.
10366          */
10367         if (range->len == total_bytes)
10368                 cache = btrfs_lookup_first_block_group(fs_info, range->start);
10369         else
10370                 cache = btrfs_lookup_block_group(fs_info, range->start);
10371
10372         while (cache) {
10373                 if (cache->key.objectid >= (range->start + range->len)) {
10374                         btrfs_put_block_group(cache);
10375                         break;
10376                 }
10377
10378                 start = max(range->start, cache->key.objectid);
10379                 end = min(range->start + range->len,
10380                                 cache->key.objectid + cache->key.offset);
10381
10382                 if (end - start >= range->minlen) {
10383                         if (!block_group_cache_done(cache)) {
10384                                 ret = cache_block_group(cache, 0);
10385                                 if (ret) {
10386                                         btrfs_put_block_group(cache);
10387                                         break;
10388                                 }
10389                                 ret = wait_block_group_cache_done(cache);
10390                                 if (ret) {
10391                                         btrfs_put_block_group(cache);
10392                                         break;
10393                                 }
10394                         }
10395                         ret = btrfs_trim_block_group(cache,
10396                                                      &group_trimmed,
10397                                                      start,
10398                                                      end,
10399                                                      range->minlen);
10400
10401                         trimmed += group_trimmed;
10402                         if (ret) {
10403                                 btrfs_put_block_group(cache);
10404                                 break;
10405                         }
10406                 }
10407
10408                 cache = next_block_group(fs_info->tree_root, cache);
10409         }
10410
10411         mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
10412         devices = &root->fs_info->fs_devices->alloc_list;
10413         list_for_each_entry(device, devices, dev_alloc_list) {
10414                 ret = btrfs_trim_free_extents(device, range->minlen,
10415                                               &group_trimmed);
10416                 if (ret)
10417                         break;
10418
10419                 trimmed += group_trimmed;
10420         }
10421         mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
10422
10423         range->len = trimmed;
10424         return ret;
10425 }
10426
10427 /*
10428  * btrfs_{start,end}_write_no_snapshoting() are similar to
10429  * mnt_{want,drop}_write(), they are used to prevent some tasks from writing
10430  * data into the page cache through nocow before the subvolume is snapshoted,
10431  * but flush the data into disk after the snapshot creation, or to prevent
10432  * operations while snapshoting is ongoing and that cause the snapshot to be
10433  * inconsistent (writes followed by expanding truncates for example).
10434  */
10435 void btrfs_end_write_no_snapshoting(struct btrfs_root *root)
10436 {
10437         percpu_counter_dec(&root->subv_writers->counter);
10438         /*
10439          * Make sure counter is updated before we wake up waiters.
10440          */
10441         smp_mb();
10442         if (waitqueue_active(&root->subv_writers->wait))
10443                 wake_up(&root->subv_writers->wait);
10444 }
10445
10446 int btrfs_start_write_no_snapshoting(struct btrfs_root *root)
10447 {
10448         if (atomic_read(&root->will_be_snapshoted))
10449                 return 0;
10450
10451         percpu_counter_inc(&root->subv_writers->counter);
10452         /*
10453          * Make sure counter is updated before we check for snapshot creation.
10454          */
10455         smp_mb();
10456         if (atomic_read(&root->will_be_snapshoted)) {
10457                 btrfs_end_write_no_snapshoting(root);
10458                 return 0;
10459         }
10460         return 1;
10461 }