btrfs: make the chunk allocator utilize the devices better
[linux-2.6-block.git] / fs / btrfs / extent-tree.c
CommitLineData
6cbd5570
CM
1/*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
ec6b910f 18#include <linux/sched.h>
edbd8d4e 19#include <linux/pagemap.h>
ec44a35c 20#include <linux/writeback.h>
21af804c 21#include <linux/blkdev.h>
b7a9f29f 22#include <linux/sort.h>
4184ea7f 23#include <linux/rcupdate.h>
817d52f8 24#include <linux/kthread.h>
5a0e3ad6 25#include <linux/slab.h>
4b4e25f2 26#include "compat.h"
74493f7a 27#include "hash.h"
fec577fb
CM
28#include "ctree.h"
29#include "disk-io.h"
30#include "print-tree.h"
e089f05c 31#include "transaction.h"
0b86a832 32#include "volumes.h"
925baedd 33#include "locking.h"
fa9c0d79 34#include "free-space-cache.h"
fec577fb 35
f3465ca4
JB
36static int update_block_group(struct btrfs_trans_handle *trans,
37 struct btrfs_root *root,
f0486c68
YZ
38 u64 bytenr, u64 num_bytes, int alloc);
39static int update_reserved_bytes(struct btrfs_block_group_cache *cache,
40 u64 num_bytes, int reserve, int sinfo);
5d4f98a2
YZ
41static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
42 struct btrfs_root *root,
43 u64 bytenr, u64 num_bytes, u64 parent,
44 u64 root_objectid, u64 owner_objectid,
45 u64 owner_offset, int refs_to_drop,
46 struct btrfs_delayed_extent_op *extra_op);
47static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
48 struct extent_buffer *leaf,
49 struct btrfs_extent_item *ei);
50static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
51 struct btrfs_root *root,
52 u64 parent, u64 root_objectid,
53 u64 flags, u64 owner, u64 offset,
54 struct btrfs_key *ins, int ref_mod);
55static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
56 struct btrfs_root *root,
57 u64 parent, u64 root_objectid,
58 u64 flags, struct btrfs_disk_key *key,
59 int level, struct btrfs_key *ins);
6a63209f
JB
60static int do_chunk_alloc(struct btrfs_trans_handle *trans,
61 struct btrfs_root *extent_root, u64 alloc_bytes,
62 u64 flags, int force);
11833d66
YZ
63static int find_next_key(struct btrfs_path *path, int level,
64 struct btrfs_key *key);
9ed74f2d
JB
65static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
66 int dump_block_groups);
6a63209f 67
817d52f8
JB
68static noinline int
69block_group_cache_done(struct btrfs_block_group_cache *cache)
70{
71 smp_mb();
72 return cache->cached == BTRFS_CACHE_FINISHED;
73}
74
0f9dd46c
JB
75static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
76{
77 return (cache->flags & bits) == bits;
78}
79
11dfe35a
JB
80void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
81{
82 atomic_inc(&cache->count);
83}
84
85void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
86{
f0486c68
YZ
87 if (atomic_dec_and_test(&cache->count)) {
88 WARN_ON(cache->pinned > 0);
89 WARN_ON(cache->reserved > 0);
90 WARN_ON(cache->reserved_pinned > 0);
11dfe35a 91 kfree(cache);
f0486c68 92 }
11dfe35a
JB
93}
94
0f9dd46c
JB
95/*
96 * this adds the block group to the fs_info rb tree for the block group
97 * cache
98 */
b2950863 99static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
0f9dd46c
JB
100 struct btrfs_block_group_cache *block_group)
101{
102 struct rb_node **p;
103 struct rb_node *parent = NULL;
104 struct btrfs_block_group_cache *cache;
105
106 spin_lock(&info->block_group_cache_lock);
107 p = &info->block_group_cache_tree.rb_node;
108
109 while (*p) {
110 parent = *p;
111 cache = rb_entry(parent, struct btrfs_block_group_cache,
112 cache_node);
113 if (block_group->key.objectid < cache->key.objectid) {
114 p = &(*p)->rb_left;
115 } else if (block_group->key.objectid > cache->key.objectid) {
116 p = &(*p)->rb_right;
117 } else {
118 spin_unlock(&info->block_group_cache_lock);
119 return -EEXIST;
120 }
121 }
122
123 rb_link_node(&block_group->cache_node, parent, p);
124 rb_insert_color(&block_group->cache_node,
125 &info->block_group_cache_tree);
126 spin_unlock(&info->block_group_cache_lock);
127
128 return 0;
129}
130
131/*
132 * This will return the block group at or after bytenr if contains is 0, else
133 * it will return the block group that contains the bytenr
134 */
135static struct btrfs_block_group_cache *
136block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
137 int contains)
138{
139 struct btrfs_block_group_cache *cache, *ret = NULL;
140 struct rb_node *n;
141 u64 end, start;
142
143 spin_lock(&info->block_group_cache_lock);
144 n = info->block_group_cache_tree.rb_node;
145
146 while (n) {
147 cache = rb_entry(n, struct btrfs_block_group_cache,
148 cache_node);
149 end = cache->key.objectid + cache->key.offset - 1;
150 start = cache->key.objectid;
151
152 if (bytenr < start) {
153 if (!contains && (!ret || start < ret->key.objectid))
154 ret = cache;
155 n = n->rb_left;
156 } else if (bytenr > start) {
157 if (contains && bytenr <= end) {
158 ret = cache;
159 break;
160 }
161 n = n->rb_right;
162 } else {
163 ret = cache;
164 break;
165 }
166 }
d2fb3437 167 if (ret)
11dfe35a 168 btrfs_get_block_group(ret);
0f9dd46c
JB
169 spin_unlock(&info->block_group_cache_lock);
170
171 return ret;
172}
173
11833d66
YZ
174static int add_excluded_extent(struct btrfs_root *root,
175 u64 start, u64 num_bytes)
817d52f8 176{
11833d66
YZ
177 u64 end = start + num_bytes - 1;
178 set_extent_bits(&root->fs_info->freed_extents[0],
179 start, end, EXTENT_UPTODATE, GFP_NOFS);
180 set_extent_bits(&root->fs_info->freed_extents[1],
181 start, end, EXTENT_UPTODATE, GFP_NOFS);
182 return 0;
183}
817d52f8 184
11833d66
YZ
185static void free_excluded_extents(struct btrfs_root *root,
186 struct btrfs_block_group_cache *cache)
187{
188 u64 start, end;
817d52f8 189
11833d66
YZ
190 start = cache->key.objectid;
191 end = start + cache->key.offset - 1;
192
193 clear_extent_bits(&root->fs_info->freed_extents[0],
194 start, end, EXTENT_UPTODATE, GFP_NOFS);
195 clear_extent_bits(&root->fs_info->freed_extents[1],
196 start, end, EXTENT_UPTODATE, GFP_NOFS);
817d52f8
JB
197}
198
11833d66
YZ
199static int exclude_super_stripes(struct btrfs_root *root,
200 struct btrfs_block_group_cache *cache)
817d52f8 201{
817d52f8
JB
202 u64 bytenr;
203 u64 *logical;
204 int stripe_len;
205 int i, nr, ret;
206
06b2331f
YZ
207 if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
208 stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
209 cache->bytes_super += stripe_len;
210 ret = add_excluded_extent(root, cache->key.objectid,
211 stripe_len);
212 BUG_ON(ret);
213 }
214
817d52f8
JB
215 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
216 bytenr = btrfs_sb_offset(i);
217 ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
218 cache->key.objectid, bytenr,
219 0, &logical, &nr, &stripe_len);
220 BUG_ON(ret);
11833d66 221
817d52f8 222 while (nr--) {
1b2da372 223 cache->bytes_super += stripe_len;
11833d66
YZ
224 ret = add_excluded_extent(root, logical[nr],
225 stripe_len);
226 BUG_ON(ret);
817d52f8 227 }
11833d66 228
817d52f8
JB
229 kfree(logical);
230 }
817d52f8
JB
231 return 0;
232}
233
11833d66
YZ
234static struct btrfs_caching_control *
235get_caching_control(struct btrfs_block_group_cache *cache)
236{
237 struct btrfs_caching_control *ctl;
238
239 spin_lock(&cache->lock);
240 if (cache->cached != BTRFS_CACHE_STARTED) {
241 spin_unlock(&cache->lock);
dde5abee
JB
242 return NULL;
243 }
244
245 /* We're loading it the fast way, so we don't have a caching_ctl. */
246 if (!cache->caching_ctl) {
247 spin_unlock(&cache->lock);
11833d66
YZ
248 return NULL;
249 }
250
251 ctl = cache->caching_ctl;
252 atomic_inc(&ctl->count);
253 spin_unlock(&cache->lock);
254 return ctl;
255}
256
257static void put_caching_control(struct btrfs_caching_control *ctl)
258{
259 if (atomic_dec_and_test(&ctl->count))
260 kfree(ctl);
261}
262
0f9dd46c
JB
263/*
264 * this is only called by cache_block_group, since we could have freed extents
265 * we need to check the pinned_extents for any extents that can't be used yet
266 * since their free space will be released as soon as the transaction commits.
267 */
817d52f8 268static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
0f9dd46c
JB
269 struct btrfs_fs_info *info, u64 start, u64 end)
270{
817d52f8 271 u64 extent_start, extent_end, size, total_added = 0;
0f9dd46c
JB
272 int ret;
273
274 while (start < end) {
11833d66 275 ret = find_first_extent_bit(info->pinned_extents, start,
0f9dd46c 276 &extent_start, &extent_end,
11833d66 277 EXTENT_DIRTY | EXTENT_UPTODATE);
0f9dd46c
JB
278 if (ret)
279 break;
280
06b2331f 281 if (extent_start <= start) {
0f9dd46c
JB
282 start = extent_end + 1;
283 } else if (extent_start > start && extent_start < end) {
284 size = extent_start - start;
817d52f8 285 total_added += size;
ea6a478e
JB
286 ret = btrfs_add_free_space(block_group, start,
287 size);
0f9dd46c
JB
288 BUG_ON(ret);
289 start = extent_end + 1;
290 } else {
291 break;
292 }
293 }
294
295 if (start < end) {
296 size = end - start;
817d52f8 297 total_added += size;
ea6a478e 298 ret = btrfs_add_free_space(block_group, start, size);
0f9dd46c
JB
299 BUG_ON(ret);
300 }
301
817d52f8 302 return total_added;
0f9dd46c
JB
303}
304
817d52f8 305static int caching_kthread(void *data)
e37c9e69 306{
817d52f8
JB
307 struct btrfs_block_group_cache *block_group = data;
308 struct btrfs_fs_info *fs_info = block_group->fs_info;
11833d66
YZ
309 struct btrfs_caching_control *caching_ctl = block_group->caching_ctl;
310 struct btrfs_root *extent_root = fs_info->extent_root;
e37c9e69 311 struct btrfs_path *path;
5f39d397 312 struct extent_buffer *leaf;
11833d66 313 struct btrfs_key key;
817d52f8 314 u64 total_found = 0;
11833d66
YZ
315 u64 last = 0;
316 u32 nritems;
317 int ret = 0;
f510cfec 318
e37c9e69
CM
319 path = btrfs_alloc_path();
320 if (!path)
321 return -ENOMEM;
7d7d6068 322
11833d66 323 exclude_super_stripes(extent_root, block_group);
1b2da372 324 spin_lock(&block_group->space_info->lock);
f0486c68 325 block_group->space_info->bytes_readonly += block_group->bytes_super;
1b2da372 326 spin_unlock(&block_group->space_info->lock);
11833d66 327
817d52f8 328 last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
11833d66 329
5cd57b2c 330 /*
817d52f8
JB
331 * We don't want to deadlock with somebody trying to allocate a new
332 * extent for the extent root while also trying to search the extent
333 * root to add free space. So we skip locking and search the commit
334 * root, since its read-only
5cd57b2c
CM
335 */
336 path->skip_locking = 1;
817d52f8
JB
337 path->search_commit_root = 1;
338 path->reada = 2;
339
e4404d6e 340 key.objectid = last;
e37c9e69 341 key.offset = 0;
11833d66 342 key.type = BTRFS_EXTENT_ITEM_KEY;
013f1b12 343again:
11833d66 344 mutex_lock(&caching_ctl->mutex);
013f1b12
CM
345 /* need to make sure the commit_root doesn't disappear */
346 down_read(&fs_info->extent_commit_sem);
347
11833d66 348 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
e37c9e69 349 if (ret < 0)
ef8bbdfe 350 goto err;
a512bbf8 351
11833d66
YZ
352 leaf = path->nodes[0];
353 nritems = btrfs_header_nritems(leaf);
354
d397712b 355 while (1) {
817d52f8 356 smp_mb();
11833d66 357 if (fs_info->closing > 1) {
f25784b3 358 last = (u64)-1;
817d52f8 359 break;
f25784b3 360 }
817d52f8 361
11833d66
YZ
362 if (path->slots[0] < nritems) {
363 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
364 } else {
365 ret = find_next_key(path, 0, &key);
366 if (ret)
e37c9e69 367 break;
817d52f8 368
11833d66
YZ
369 caching_ctl->progress = last;
370 btrfs_release_path(extent_root, path);
371 up_read(&fs_info->extent_commit_sem);
372 mutex_unlock(&caching_ctl->mutex);
373 if (btrfs_transaction_in_commit(fs_info))
f36f3042 374 schedule_timeout(1);
11833d66
YZ
375 else
376 cond_resched();
377 goto again;
378 }
817d52f8 379
11833d66
YZ
380 if (key.objectid < block_group->key.objectid) {
381 path->slots[0]++;
817d52f8 382 continue;
e37c9e69 383 }
0f9dd46c 384
e37c9e69 385 if (key.objectid >= block_group->key.objectid +
0f9dd46c 386 block_group->key.offset)
e37c9e69 387 break;
7d7d6068 388
11833d66 389 if (key.type == BTRFS_EXTENT_ITEM_KEY) {
817d52f8
JB
390 total_found += add_new_free_space(block_group,
391 fs_info, last,
392 key.objectid);
7d7d6068 393 last = key.objectid + key.offset;
817d52f8 394
11833d66
YZ
395 if (total_found > (1024 * 1024 * 2)) {
396 total_found = 0;
397 wake_up(&caching_ctl->wait);
398 }
817d52f8 399 }
e37c9e69
CM
400 path->slots[0]++;
401 }
817d52f8 402 ret = 0;
e37c9e69 403
817d52f8
JB
404 total_found += add_new_free_space(block_group, fs_info, last,
405 block_group->key.objectid +
406 block_group->key.offset);
11833d66 407 caching_ctl->progress = (u64)-1;
817d52f8
JB
408
409 spin_lock(&block_group->lock);
11833d66 410 block_group->caching_ctl = NULL;
817d52f8
JB
411 block_group->cached = BTRFS_CACHE_FINISHED;
412 spin_unlock(&block_group->lock);
0f9dd46c 413
54aa1f4d 414err:
e37c9e69 415 btrfs_free_path(path);
276e680d 416 up_read(&fs_info->extent_commit_sem);
817d52f8 417
11833d66
YZ
418 free_excluded_extents(extent_root, block_group);
419
420 mutex_unlock(&caching_ctl->mutex);
421 wake_up(&caching_ctl->wait);
422
423 put_caching_control(caching_ctl);
424 atomic_dec(&block_group->space_info->caching_threads);
11dfe35a
JB
425 btrfs_put_block_group(block_group);
426
817d52f8
JB
427 return 0;
428}
429
9d66e233
JB
430static int cache_block_group(struct btrfs_block_group_cache *cache,
431 struct btrfs_trans_handle *trans,
b8399dee 432 struct btrfs_root *root,
9d66e233 433 int load_cache_only)
817d52f8 434{
11833d66
YZ
435 struct btrfs_fs_info *fs_info = cache->fs_info;
436 struct btrfs_caching_control *caching_ctl;
817d52f8
JB
437 struct task_struct *tsk;
438 int ret = 0;
439
11833d66
YZ
440 smp_mb();
441 if (cache->cached != BTRFS_CACHE_NO)
442 return 0;
443
9d66e233
JB
444 /*
445 * We can't do the read from on-disk cache during a commit since we need
b8399dee
JB
446 * to have the normal tree locking. Also if we are currently trying to
447 * allocate blocks for the tree root we can't do the fast caching since
448 * we likely hold important locks.
9d66e233 449 */
b8399dee
JB
450 if (!trans->transaction->in_commit &&
451 (root && root != root->fs_info->tree_root)) {
9d66e233
JB
452 spin_lock(&cache->lock);
453 if (cache->cached != BTRFS_CACHE_NO) {
454 spin_unlock(&cache->lock);
455 return 0;
456 }
457 cache->cached = BTRFS_CACHE_STARTED;
458 spin_unlock(&cache->lock);
459
460 ret = load_free_space_cache(fs_info, cache);
461
462 spin_lock(&cache->lock);
463 if (ret == 1) {
464 cache->cached = BTRFS_CACHE_FINISHED;
465 cache->last_byte_to_unpin = (u64)-1;
466 } else {
467 cache->cached = BTRFS_CACHE_NO;
468 }
469 spin_unlock(&cache->lock);
470 if (ret == 1)
471 return 0;
472 }
473
474 if (load_cache_only)
475 return 0;
476
11833d66
YZ
477 caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_KERNEL);
478 BUG_ON(!caching_ctl);
479
480 INIT_LIST_HEAD(&caching_ctl->list);
481 mutex_init(&caching_ctl->mutex);
482 init_waitqueue_head(&caching_ctl->wait);
483 caching_ctl->block_group = cache;
484 caching_ctl->progress = cache->key.objectid;
485 /* one for caching kthread, one for caching block group list */
486 atomic_set(&caching_ctl->count, 2);
487
817d52f8
JB
488 spin_lock(&cache->lock);
489 if (cache->cached != BTRFS_CACHE_NO) {
490 spin_unlock(&cache->lock);
11833d66
YZ
491 kfree(caching_ctl);
492 return 0;
817d52f8 493 }
11833d66 494 cache->caching_ctl = caching_ctl;
817d52f8
JB
495 cache->cached = BTRFS_CACHE_STARTED;
496 spin_unlock(&cache->lock);
497
11833d66
YZ
498 down_write(&fs_info->extent_commit_sem);
499 list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
500 up_write(&fs_info->extent_commit_sem);
501
502 atomic_inc(&cache->space_info->caching_threads);
11dfe35a 503 btrfs_get_block_group(cache);
11833d66 504
817d52f8
JB
505 tsk = kthread_run(caching_kthread, cache, "btrfs-cache-%llu\n",
506 cache->key.objectid);
507 if (IS_ERR(tsk)) {
508 ret = PTR_ERR(tsk);
509 printk(KERN_ERR "error running thread %d\n", ret);
510 BUG();
511 }
512
ef8bbdfe 513 return ret;
e37c9e69
CM
514}
515
0f9dd46c
JB
516/*
517 * return the block group that starts at or after bytenr
518 */
d397712b
CM
519static struct btrfs_block_group_cache *
520btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
0ef3e66b 521{
0f9dd46c 522 struct btrfs_block_group_cache *cache;
0ef3e66b 523
0f9dd46c 524 cache = block_group_cache_tree_search(info, bytenr, 0);
0ef3e66b 525
0f9dd46c 526 return cache;
0ef3e66b
CM
527}
528
0f9dd46c 529/*
9f55684c 530 * return the block group that contains the given bytenr
0f9dd46c 531 */
d397712b
CM
532struct btrfs_block_group_cache *btrfs_lookup_block_group(
533 struct btrfs_fs_info *info,
534 u64 bytenr)
be744175 535{
0f9dd46c 536 struct btrfs_block_group_cache *cache;
be744175 537
0f9dd46c 538 cache = block_group_cache_tree_search(info, bytenr, 1);
96b5179d 539
0f9dd46c 540 return cache;
be744175 541}
0b86a832 542
0f9dd46c
JB
543static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
544 u64 flags)
6324fbf3 545{
0f9dd46c 546 struct list_head *head = &info->space_info;
0f9dd46c 547 struct btrfs_space_info *found;
4184ea7f 548
b742bb82
YZ
549 flags &= BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_SYSTEM |
550 BTRFS_BLOCK_GROUP_METADATA;
551
4184ea7f
CM
552 rcu_read_lock();
553 list_for_each_entry_rcu(found, head, list) {
67377734 554 if (found->flags & flags) {
4184ea7f 555 rcu_read_unlock();
0f9dd46c 556 return found;
4184ea7f 557 }
0f9dd46c 558 }
4184ea7f 559 rcu_read_unlock();
0f9dd46c 560 return NULL;
6324fbf3
CM
561}
562
4184ea7f
CM
563/*
564 * after adding space to the filesystem, we need to clear the full flags
565 * on all the space infos.
566 */
567void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
568{
569 struct list_head *head = &info->space_info;
570 struct btrfs_space_info *found;
571
572 rcu_read_lock();
573 list_for_each_entry_rcu(found, head, list)
574 found->full = 0;
575 rcu_read_unlock();
576}
577
80eb234a
JB
578static u64 div_factor(u64 num, int factor)
579{
580 if (factor == 10)
581 return num;
582 num *= factor;
583 do_div(num, 10);
584 return num;
585}
586
e5bc2458
CM
587static u64 div_factor_fine(u64 num, int factor)
588{
589 if (factor == 100)
590 return num;
591 num *= factor;
592 do_div(num, 100);
593 return num;
594}
595
d2fb3437
YZ
596u64 btrfs_find_block_group(struct btrfs_root *root,
597 u64 search_start, u64 search_hint, int owner)
cd1bc465 598{
96b5179d 599 struct btrfs_block_group_cache *cache;
cd1bc465 600 u64 used;
d2fb3437
YZ
601 u64 last = max(search_hint, search_start);
602 u64 group_start = 0;
31f3c99b 603 int full_search = 0;
d2fb3437 604 int factor = 9;
0ef3e66b 605 int wrapped = 0;
31f3c99b 606again:
e8569813
ZY
607 while (1) {
608 cache = btrfs_lookup_first_block_group(root->fs_info, last);
0f9dd46c
JB
609 if (!cache)
610 break;
96b5179d 611
c286ac48 612 spin_lock(&cache->lock);
96b5179d
CM
613 last = cache->key.objectid + cache->key.offset;
614 used = btrfs_block_group_used(&cache->item);
615
d2fb3437
YZ
616 if ((full_search || !cache->ro) &&
617 block_group_bits(cache, BTRFS_BLOCK_GROUP_METADATA)) {
e8569813 618 if (used + cache->pinned + cache->reserved <
d2fb3437
YZ
619 div_factor(cache->key.offset, factor)) {
620 group_start = cache->key.objectid;
c286ac48 621 spin_unlock(&cache->lock);
fa9c0d79 622 btrfs_put_block_group(cache);
8790d502
CM
623 goto found;
624 }
6324fbf3 625 }
c286ac48 626 spin_unlock(&cache->lock);
fa9c0d79 627 btrfs_put_block_group(cache);
de428b63 628 cond_resched();
cd1bc465 629 }
0ef3e66b
CM
630 if (!wrapped) {
631 last = search_start;
632 wrapped = 1;
633 goto again;
634 }
635 if (!full_search && factor < 10) {
be744175 636 last = search_start;
31f3c99b 637 full_search = 1;
0ef3e66b 638 factor = 10;
31f3c99b
CM
639 goto again;
640 }
be744175 641found:
d2fb3437 642 return group_start;
925baedd 643}
0f9dd46c 644
e02119d5 645/* simple helper to search for an existing extent at a given offset */
31840ae1 646int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len)
e02119d5
CM
647{
648 int ret;
649 struct btrfs_key key;
31840ae1 650 struct btrfs_path *path;
e02119d5 651
31840ae1
ZY
652 path = btrfs_alloc_path();
653 BUG_ON(!path);
e02119d5
CM
654 key.objectid = start;
655 key.offset = len;
656 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
657 ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
658 0, 0);
31840ae1 659 btrfs_free_path(path);
7bb86316
CM
660 return ret;
661}
662
a22285a6
YZ
663/*
664 * helper function to lookup reference count and flags of extent.
665 *
666 * the head node for delayed ref is used to store the sum of all the
667 * reference count modifications queued up in the rbtree. the head
668 * node may also store the extent flags to set. This way you can check
669 * to see what the reference count and extent flags would be if all of
670 * the delayed refs are not processed.
671 */
672int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
673 struct btrfs_root *root, u64 bytenr,
674 u64 num_bytes, u64 *refs, u64 *flags)
675{
676 struct btrfs_delayed_ref_head *head;
677 struct btrfs_delayed_ref_root *delayed_refs;
678 struct btrfs_path *path;
679 struct btrfs_extent_item *ei;
680 struct extent_buffer *leaf;
681 struct btrfs_key key;
682 u32 item_size;
683 u64 num_refs;
684 u64 extent_flags;
685 int ret;
686
687 path = btrfs_alloc_path();
688 if (!path)
689 return -ENOMEM;
690
691 key.objectid = bytenr;
692 key.type = BTRFS_EXTENT_ITEM_KEY;
693 key.offset = num_bytes;
694 if (!trans) {
695 path->skip_locking = 1;
696 path->search_commit_root = 1;
697 }
698again:
699 ret = btrfs_search_slot(trans, root->fs_info->extent_root,
700 &key, path, 0, 0);
701 if (ret < 0)
702 goto out_free;
703
704 if (ret == 0) {
705 leaf = path->nodes[0];
706 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
707 if (item_size >= sizeof(*ei)) {
708 ei = btrfs_item_ptr(leaf, path->slots[0],
709 struct btrfs_extent_item);
710 num_refs = btrfs_extent_refs(leaf, ei);
711 extent_flags = btrfs_extent_flags(leaf, ei);
712 } else {
713#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
714 struct btrfs_extent_item_v0 *ei0;
715 BUG_ON(item_size != sizeof(*ei0));
716 ei0 = btrfs_item_ptr(leaf, path->slots[0],
717 struct btrfs_extent_item_v0);
718 num_refs = btrfs_extent_refs_v0(leaf, ei0);
719 /* FIXME: this isn't correct for data */
720 extent_flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
721#else
722 BUG();
723#endif
724 }
725 BUG_ON(num_refs == 0);
726 } else {
727 num_refs = 0;
728 extent_flags = 0;
729 ret = 0;
730 }
731
732 if (!trans)
733 goto out;
734
735 delayed_refs = &trans->transaction->delayed_refs;
736 spin_lock(&delayed_refs->lock);
737 head = btrfs_find_delayed_ref_head(trans, bytenr);
738 if (head) {
739 if (!mutex_trylock(&head->mutex)) {
740 atomic_inc(&head->node.refs);
741 spin_unlock(&delayed_refs->lock);
742
743 btrfs_release_path(root->fs_info->extent_root, path);
744
745 mutex_lock(&head->mutex);
746 mutex_unlock(&head->mutex);
747 btrfs_put_delayed_ref(&head->node);
748 goto again;
749 }
750 if (head->extent_op && head->extent_op->update_flags)
751 extent_flags |= head->extent_op->flags_to_set;
752 else
753 BUG_ON(num_refs == 0);
754
755 num_refs += head->node.ref_mod;
756 mutex_unlock(&head->mutex);
757 }
758 spin_unlock(&delayed_refs->lock);
759out:
760 WARN_ON(num_refs == 0);
761 if (refs)
762 *refs = num_refs;
763 if (flags)
764 *flags = extent_flags;
765out_free:
766 btrfs_free_path(path);
767 return ret;
768}
769
d8d5f3e1
CM
770/*
771 * Back reference rules. Back refs have three main goals:
772 *
773 * 1) differentiate between all holders of references to an extent so that
774 * when a reference is dropped we can make sure it was a valid reference
775 * before freeing the extent.
776 *
777 * 2) Provide enough information to quickly find the holders of an extent
778 * if we notice a given block is corrupted or bad.
779 *
780 * 3) Make it easy to migrate blocks for FS shrinking or storage pool
781 * maintenance. This is actually the same as #2, but with a slightly
782 * different use case.
783 *
5d4f98a2
YZ
784 * There are two kinds of back refs. The implicit back refs is optimized
785 * for pointers in non-shared tree blocks. For a given pointer in a block,
786 * back refs of this kind provide information about the block's owner tree
787 * and the pointer's key. These information allow us to find the block by
788 * b-tree searching. The full back refs is for pointers in tree blocks not
789 * referenced by their owner trees. The location of tree block is recorded
790 * in the back refs. Actually the full back refs is generic, and can be
791 * used in all cases the implicit back refs is used. The major shortcoming
792 * of the full back refs is its overhead. Every time a tree block gets
793 * COWed, we have to update back refs entry for all pointers in it.
794 *
795 * For a newly allocated tree block, we use implicit back refs for
796 * pointers in it. This means most tree related operations only involve
797 * implicit back refs. For a tree block created in old transaction, the
798 * only way to drop a reference to it is COW it. So we can detect the
799 * event that tree block loses its owner tree's reference and do the
800 * back refs conversion.
801 *
802 * When a tree block is COW'd through a tree, there are four cases:
803 *
804 * The reference count of the block is one and the tree is the block's
805 * owner tree. Nothing to do in this case.
806 *
807 * The reference count of the block is one and the tree is not the
808 * block's owner tree. In this case, full back refs is used for pointers
809 * in the block. Remove these full back refs, add implicit back refs for
810 * every pointers in the new block.
811 *
812 * The reference count of the block is greater than one and the tree is
813 * the block's owner tree. In this case, implicit back refs is used for
814 * pointers in the block. Add full back refs for every pointers in the
815 * block, increase lower level extents' reference counts. The original
816 * implicit back refs are entailed to the new block.
817 *
818 * The reference count of the block is greater than one and the tree is
819 * not the block's owner tree. Add implicit back refs for every pointer in
820 * the new block, increase lower level extents' reference count.
821 *
822 * Back Reference Key composing:
823 *
824 * The key objectid corresponds to the first byte in the extent,
825 * The key type is used to differentiate between types of back refs.
826 * There are different meanings of the key offset for different types
827 * of back refs.
828 *
d8d5f3e1
CM
829 * File extents can be referenced by:
830 *
831 * - multiple snapshots, subvolumes, or different generations in one subvol
31840ae1 832 * - different files inside a single subvolume
d8d5f3e1
CM
833 * - different offsets inside a file (bookend extents in file.c)
834 *
5d4f98a2 835 * The extent ref structure for the implicit back refs has fields for:
d8d5f3e1
CM
836 *
837 * - Objectid of the subvolume root
d8d5f3e1 838 * - objectid of the file holding the reference
5d4f98a2
YZ
839 * - original offset in the file
840 * - how many bookend extents
d8d5f3e1 841 *
5d4f98a2
YZ
842 * The key offset for the implicit back refs is hash of the first
843 * three fields.
d8d5f3e1 844 *
5d4f98a2 845 * The extent ref structure for the full back refs has field for:
d8d5f3e1 846 *
5d4f98a2 847 * - number of pointers in the tree leaf
d8d5f3e1 848 *
5d4f98a2
YZ
849 * The key offset for the implicit back refs is the first byte of
850 * the tree leaf
d8d5f3e1 851 *
5d4f98a2
YZ
852 * When a file extent is allocated, The implicit back refs is used.
853 * the fields are filled in:
d8d5f3e1 854 *
5d4f98a2 855 * (root_key.objectid, inode objectid, offset in file, 1)
d8d5f3e1 856 *
5d4f98a2
YZ
857 * When a file extent is removed file truncation, we find the
858 * corresponding implicit back refs and check the following fields:
d8d5f3e1 859 *
5d4f98a2 860 * (btrfs_header_owner(leaf), inode objectid, offset in file)
d8d5f3e1 861 *
5d4f98a2 862 * Btree extents can be referenced by:
d8d5f3e1 863 *
5d4f98a2 864 * - Different subvolumes
d8d5f3e1 865 *
5d4f98a2
YZ
866 * Both the implicit back refs and the full back refs for tree blocks
867 * only consist of key. The key offset for the implicit back refs is
868 * objectid of block's owner tree. The key offset for the full back refs
869 * is the first byte of parent block.
d8d5f3e1 870 *
5d4f98a2
YZ
871 * When implicit back refs is used, information about the lowest key and
872 * level of the tree block are required. These information are stored in
873 * tree block info structure.
d8d5f3e1 874 */
31840ae1 875
5d4f98a2
YZ
876#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
877static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
878 struct btrfs_root *root,
879 struct btrfs_path *path,
880 u64 owner, u32 extra_size)
7bb86316 881{
5d4f98a2
YZ
882 struct btrfs_extent_item *item;
883 struct btrfs_extent_item_v0 *ei0;
884 struct btrfs_extent_ref_v0 *ref0;
885 struct btrfs_tree_block_info *bi;
886 struct extent_buffer *leaf;
7bb86316 887 struct btrfs_key key;
5d4f98a2
YZ
888 struct btrfs_key found_key;
889 u32 new_size = sizeof(*item);
890 u64 refs;
891 int ret;
892
893 leaf = path->nodes[0];
894 BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));
895
896 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
897 ei0 = btrfs_item_ptr(leaf, path->slots[0],
898 struct btrfs_extent_item_v0);
899 refs = btrfs_extent_refs_v0(leaf, ei0);
900
901 if (owner == (u64)-1) {
902 while (1) {
903 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
904 ret = btrfs_next_leaf(root, path);
905 if (ret < 0)
906 return ret;
907 BUG_ON(ret > 0);
908 leaf = path->nodes[0];
909 }
910 btrfs_item_key_to_cpu(leaf, &found_key,
911 path->slots[0]);
912 BUG_ON(key.objectid != found_key.objectid);
913 if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
914 path->slots[0]++;
915 continue;
916 }
917 ref0 = btrfs_item_ptr(leaf, path->slots[0],
918 struct btrfs_extent_ref_v0);
919 owner = btrfs_ref_objectid_v0(leaf, ref0);
920 break;
921 }
922 }
923 btrfs_release_path(root, path);
924
925 if (owner < BTRFS_FIRST_FREE_OBJECTID)
926 new_size += sizeof(*bi);
927
928 new_size -= sizeof(*ei0);
929 ret = btrfs_search_slot(trans, root, &key, path,
930 new_size + extra_size, 1);
931 if (ret < 0)
932 return ret;
933 BUG_ON(ret);
934
935 ret = btrfs_extend_item(trans, root, path, new_size);
936 BUG_ON(ret);
937
938 leaf = path->nodes[0];
939 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
940 btrfs_set_extent_refs(leaf, item, refs);
941 /* FIXME: get real generation */
942 btrfs_set_extent_generation(leaf, item, 0);
943 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
944 btrfs_set_extent_flags(leaf, item,
945 BTRFS_EXTENT_FLAG_TREE_BLOCK |
946 BTRFS_BLOCK_FLAG_FULL_BACKREF);
947 bi = (struct btrfs_tree_block_info *)(item + 1);
948 /* FIXME: get first key of the block */
949 memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi));
950 btrfs_set_tree_block_level(leaf, bi, (int)owner);
951 } else {
952 btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
953 }
954 btrfs_mark_buffer_dirty(leaf);
955 return 0;
956}
957#endif
958
959static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
960{
961 u32 high_crc = ~(u32)0;
962 u32 low_crc = ~(u32)0;
963 __le64 lenum;
964
965 lenum = cpu_to_le64(root_objectid);
163e783e 966 high_crc = crc32c(high_crc, &lenum, sizeof(lenum));
5d4f98a2 967 lenum = cpu_to_le64(owner);
163e783e 968 low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
5d4f98a2 969 lenum = cpu_to_le64(offset);
163e783e 970 low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
5d4f98a2
YZ
971
972 return ((u64)high_crc << 31) ^ (u64)low_crc;
973}
974
975static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
976 struct btrfs_extent_data_ref *ref)
977{
978 return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
979 btrfs_extent_data_ref_objectid(leaf, ref),
980 btrfs_extent_data_ref_offset(leaf, ref));
981}
982
983static int match_extent_data_ref(struct extent_buffer *leaf,
984 struct btrfs_extent_data_ref *ref,
985 u64 root_objectid, u64 owner, u64 offset)
986{
987 if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
988 btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
989 btrfs_extent_data_ref_offset(leaf, ref) != offset)
990 return 0;
991 return 1;
992}
993
994static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
995 struct btrfs_root *root,
996 struct btrfs_path *path,
997 u64 bytenr, u64 parent,
998 u64 root_objectid,
999 u64 owner, u64 offset)
1000{
1001 struct btrfs_key key;
1002 struct btrfs_extent_data_ref *ref;
31840ae1 1003 struct extent_buffer *leaf;
5d4f98a2 1004 u32 nritems;
74493f7a 1005 int ret;
5d4f98a2
YZ
1006 int recow;
1007 int err = -ENOENT;
74493f7a 1008
31840ae1 1009 key.objectid = bytenr;
5d4f98a2
YZ
1010 if (parent) {
1011 key.type = BTRFS_SHARED_DATA_REF_KEY;
1012 key.offset = parent;
1013 } else {
1014 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1015 key.offset = hash_extent_data_ref(root_objectid,
1016 owner, offset);
1017 }
1018again:
1019 recow = 0;
1020 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1021 if (ret < 0) {
1022 err = ret;
1023 goto fail;
1024 }
31840ae1 1025
5d4f98a2
YZ
1026 if (parent) {
1027 if (!ret)
1028 return 0;
1029#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1030 key.type = BTRFS_EXTENT_REF_V0_KEY;
1031 btrfs_release_path(root, path);
1032 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1033 if (ret < 0) {
1034 err = ret;
1035 goto fail;
1036 }
1037 if (!ret)
1038 return 0;
1039#endif
1040 goto fail;
31840ae1
ZY
1041 }
1042
1043 leaf = path->nodes[0];
5d4f98a2
YZ
1044 nritems = btrfs_header_nritems(leaf);
1045 while (1) {
1046 if (path->slots[0] >= nritems) {
1047 ret = btrfs_next_leaf(root, path);
1048 if (ret < 0)
1049 err = ret;
1050 if (ret)
1051 goto fail;
1052
1053 leaf = path->nodes[0];
1054 nritems = btrfs_header_nritems(leaf);
1055 recow = 1;
1056 }
1057
1058 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1059 if (key.objectid != bytenr ||
1060 key.type != BTRFS_EXTENT_DATA_REF_KEY)
1061 goto fail;
1062
1063 ref = btrfs_item_ptr(leaf, path->slots[0],
1064 struct btrfs_extent_data_ref);
1065
1066 if (match_extent_data_ref(leaf, ref, root_objectid,
1067 owner, offset)) {
1068 if (recow) {
1069 btrfs_release_path(root, path);
1070 goto again;
1071 }
1072 err = 0;
1073 break;
1074 }
1075 path->slots[0]++;
31840ae1 1076 }
5d4f98a2
YZ
1077fail:
1078 return err;
31840ae1
ZY
1079}
1080
5d4f98a2
YZ
1081static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
1082 struct btrfs_root *root,
1083 struct btrfs_path *path,
1084 u64 bytenr, u64 parent,
1085 u64 root_objectid, u64 owner,
1086 u64 offset, int refs_to_add)
31840ae1
ZY
1087{
1088 struct btrfs_key key;
1089 struct extent_buffer *leaf;
5d4f98a2 1090 u32 size;
31840ae1
ZY
1091 u32 num_refs;
1092 int ret;
74493f7a 1093
74493f7a 1094 key.objectid = bytenr;
5d4f98a2
YZ
1095 if (parent) {
1096 key.type = BTRFS_SHARED_DATA_REF_KEY;
1097 key.offset = parent;
1098 size = sizeof(struct btrfs_shared_data_ref);
1099 } else {
1100 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1101 key.offset = hash_extent_data_ref(root_objectid,
1102 owner, offset);
1103 size = sizeof(struct btrfs_extent_data_ref);
1104 }
74493f7a 1105
5d4f98a2
YZ
1106 ret = btrfs_insert_empty_item(trans, root, path, &key, size);
1107 if (ret && ret != -EEXIST)
1108 goto fail;
1109
1110 leaf = path->nodes[0];
1111 if (parent) {
1112 struct btrfs_shared_data_ref *ref;
31840ae1 1113 ref = btrfs_item_ptr(leaf, path->slots[0],
5d4f98a2
YZ
1114 struct btrfs_shared_data_ref);
1115 if (ret == 0) {
1116 btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
1117 } else {
1118 num_refs = btrfs_shared_data_ref_count(leaf, ref);
1119 num_refs += refs_to_add;
1120 btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
31840ae1 1121 }
5d4f98a2
YZ
1122 } else {
1123 struct btrfs_extent_data_ref *ref;
1124 while (ret == -EEXIST) {
1125 ref = btrfs_item_ptr(leaf, path->slots[0],
1126 struct btrfs_extent_data_ref);
1127 if (match_extent_data_ref(leaf, ref, root_objectid,
1128 owner, offset))
1129 break;
1130 btrfs_release_path(root, path);
1131 key.offset++;
1132 ret = btrfs_insert_empty_item(trans, root, path, &key,
1133 size);
1134 if (ret && ret != -EEXIST)
1135 goto fail;
31840ae1 1136
5d4f98a2
YZ
1137 leaf = path->nodes[0];
1138 }
1139 ref = btrfs_item_ptr(leaf, path->slots[0],
1140 struct btrfs_extent_data_ref);
1141 if (ret == 0) {
1142 btrfs_set_extent_data_ref_root(leaf, ref,
1143 root_objectid);
1144 btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
1145 btrfs_set_extent_data_ref_offset(leaf, ref, offset);
1146 btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
1147 } else {
1148 num_refs = btrfs_extent_data_ref_count(leaf, ref);
1149 num_refs += refs_to_add;
1150 btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
31840ae1 1151 }
31840ae1 1152 }
5d4f98a2
YZ
1153 btrfs_mark_buffer_dirty(leaf);
1154 ret = 0;
1155fail:
7bb86316
CM
1156 btrfs_release_path(root, path);
1157 return ret;
74493f7a
CM
1158}
1159
5d4f98a2
YZ
1160static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
1161 struct btrfs_root *root,
1162 struct btrfs_path *path,
1163 int refs_to_drop)
31840ae1 1164{
5d4f98a2
YZ
1165 struct btrfs_key key;
1166 struct btrfs_extent_data_ref *ref1 = NULL;
1167 struct btrfs_shared_data_ref *ref2 = NULL;
31840ae1 1168 struct extent_buffer *leaf;
5d4f98a2 1169 u32 num_refs = 0;
31840ae1
ZY
1170 int ret = 0;
1171
1172 leaf = path->nodes[0];
5d4f98a2
YZ
1173 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1174
1175 if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1176 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1177 struct btrfs_extent_data_ref);
1178 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1179 } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1180 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1181 struct btrfs_shared_data_ref);
1182 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1183#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1184 } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1185 struct btrfs_extent_ref_v0 *ref0;
1186 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1187 struct btrfs_extent_ref_v0);
1188 num_refs = btrfs_ref_count_v0(leaf, ref0);
1189#endif
1190 } else {
1191 BUG();
1192 }
1193
56bec294
CM
1194 BUG_ON(num_refs < refs_to_drop);
1195 num_refs -= refs_to_drop;
5d4f98a2 1196
31840ae1
ZY
1197 if (num_refs == 0) {
1198 ret = btrfs_del_item(trans, root, path);
1199 } else {
5d4f98a2
YZ
1200 if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
1201 btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
1202 else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
1203 btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
1204#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1205 else {
1206 struct btrfs_extent_ref_v0 *ref0;
1207 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1208 struct btrfs_extent_ref_v0);
1209 btrfs_set_ref_count_v0(leaf, ref0, num_refs);
1210 }
1211#endif
31840ae1
ZY
1212 btrfs_mark_buffer_dirty(leaf);
1213 }
31840ae1
ZY
1214 return ret;
1215}
1216
5d4f98a2
YZ
1217static noinline u32 extent_data_ref_count(struct btrfs_root *root,
1218 struct btrfs_path *path,
1219 struct btrfs_extent_inline_ref *iref)
15916de8 1220{
5d4f98a2
YZ
1221 struct btrfs_key key;
1222 struct extent_buffer *leaf;
1223 struct btrfs_extent_data_ref *ref1;
1224 struct btrfs_shared_data_ref *ref2;
1225 u32 num_refs = 0;
1226
1227 leaf = path->nodes[0];
1228 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1229 if (iref) {
1230 if (btrfs_extent_inline_ref_type(leaf, iref) ==
1231 BTRFS_EXTENT_DATA_REF_KEY) {
1232 ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
1233 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1234 } else {
1235 ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
1236 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1237 }
1238 } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1239 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1240 struct btrfs_extent_data_ref);
1241 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1242 } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1243 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1244 struct btrfs_shared_data_ref);
1245 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1246#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1247 } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1248 struct btrfs_extent_ref_v0 *ref0;
1249 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1250 struct btrfs_extent_ref_v0);
1251 num_refs = btrfs_ref_count_v0(leaf, ref0);
4b4e25f2 1252#endif
5d4f98a2
YZ
1253 } else {
1254 WARN_ON(1);
1255 }
1256 return num_refs;
1257}
15916de8 1258
5d4f98a2
YZ
1259static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
1260 struct btrfs_root *root,
1261 struct btrfs_path *path,
1262 u64 bytenr, u64 parent,
1263 u64 root_objectid)
1f3c79a2 1264{
5d4f98a2 1265 struct btrfs_key key;
1f3c79a2 1266 int ret;
1f3c79a2 1267
5d4f98a2
YZ
1268 key.objectid = bytenr;
1269 if (parent) {
1270 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1271 key.offset = parent;
1272 } else {
1273 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1274 key.offset = root_objectid;
1f3c79a2
LH
1275 }
1276
5d4f98a2
YZ
1277 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1278 if (ret > 0)
1279 ret = -ENOENT;
1280#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1281 if (ret == -ENOENT && parent) {
1282 btrfs_release_path(root, path);
1283 key.type = BTRFS_EXTENT_REF_V0_KEY;
1284 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1285 if (ret > 0)
1286 ret = -ENOENT;
1287 }
1f3c79a2 1288#endif
5d4f98a2 1289 return ret;
1f3c79a2
LH
1290}
1291
5d4f98a2
YZ
1292static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
1293 struct btrfs_root *root,
1294 struct btrfs_path *path,
1295 u64 bytenr, u64 parent,
1296 u64 root_objectid)
31840ae1 1297{
5d4f98a2 1298 struct btrfs_key key;
31840ae1 1299 int ret;
31840ae1 1300
5d4f98a2
YZ
1301 key.objectid = bytenr;
1302 if (parent) {
1303 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1304 key.offset = parent;
1305 } else {
1306 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1307 key.offset = root_objectid;
1308 }
1309
1310 ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1311 btrfs_release_path(root, path);
31840ae1
ZY
1312 return ret;
1313}
1314
5d4f98a2 1315static inline int extent_ref_type(u64 parent, u64 owner)
31840ae1 1316{
5d4f98a2
YZ
1317 int type;
1318 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1319 if (parent > 0)
1320 type = BTRFS_SHARED_BLOCK_REF_KEY;
1321 else
1322 type = BTRFS_TREE_BLOCK_REF_KEY;
1323 } else {
1324 if (parent > 0)
1325 type = BTRFS_SHARED_DATA_REF_KEY;
1326 else
1327 type = BTRFS_EXTENT_DATA_REF_KEY;
1328 }
1329 return type;
31840ae1 1330}
56bec294 1331
2c47e605
YZ
1332static int find_next_key(struct btrfs_path *path, int level,
1333 struct btrfs_key *key)
56bec294 1334
02217ed2 1335{
2c47e605 1336 for (; level < BTRFS_MAX_LEVEL; level++) {
5d4f98a2
YZ
1337 if (!path->nodes[level])
1338 break;
5d4f98a2
YZ
1339 if (path->slots[level] + 1 >=
1340 btrfs_header_nritems(path->nodes[level]))
1341 continue;
1342 if (level == 0)
1343 btrfs_item_key_to_cpu(path->nodes[level], key,
1344 path->slots[level] + 1);
1345 else
1346 btrfs_node_key_to_cpu(path->nodes[level], key,
1347 path->slots[level] + 1);
1348 return 0;
1349 }
1350 return 1;
1351}
037e6390 1352
5d4f98a2
YZ
1353/*
1354 * look for inline back ref. if back ref is found, *ref_ret is set
1355 * to the address of inline back ref, and 0 is returned.
1356 *
1357 * if back ref isn't found, *ref_ret is set to the address where it
1358 * should be inserted, and -ENOENT is returned.
1359 *
1360 * if insert is true and there are too many inline back refs, the path
1361 * points to the extent item, and -EAGAIN is returned.
1362 *
1363 * NOTE: inline back refs are ordered in the same way that back ref
1364 * items in the tree are ordered.
1365 */
1366static noinline_for_stack
1367int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
1368 struct btrfs_root *root,
1369 struct btrfs_path *path,
1370 struct btrfs_extent_inline_ref **ref_ret,
1371 u64 bytenr, u64 num_bytes,
1372 u64 parent, u64 root_objectid,
1373 u64 owner, u64 offset, int insert)
1374{
1375 struct btrfs_key key;
1376 struct extent_buffer *leaf;
1377 struct btrfs_extent_item *ei;
1378 struct btrfs_extent_inline_ref *iref;
1379 u64 flags;
1380 u64 item_size;
1381 unsigned long ptr;
1382 unsigned long end;
1383 int extra_size;
1384 int type;
1385 int want;
1386 int ret;
1387 int err = 0;
26b8003f 1388
db94535d 1389 key.objectid = bytenr;
31840ae1 1390 key.type = BTRFS_EXTENT_ITEM_KEY;
56bec294 1391 key.offset = num_bytes;
31840ae1 1392
5d4f98a2
YZ
1393 want = extent_ref_type(parent, owner);
1394 if (insert) {
1395 extra_size = btrfs_extent_inline_ref_size(want);
85d4198e 1396 path->keep_locks = 1;
5d4f98a2
YZ
1397 } else
1398 extra_size = -1;
1399 ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
b9473439 1400 if (ret < 0) {
5d4f98a2
YZ
1401 err = ret;
1402 goto out;
1403 }
1404 BUG_ON(ret);
1405
1406 leaf = path->nodes[0];
1407 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1408#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1409 if (item_size < sizeof(*ei)) {
1410 if (!insert) {
1411 err = -ENOENT;
1412 goto out;
1413 }
1414 ret = convert_extent_item_v0(trans, root, path, owner,
1415 extra_size);
1416 if (ret < 0) {
1417 err = ret;
1418 goto out;
1419 }
1420 leaf = path->nodes[0];
1421 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1422 }
1423#endif
1424 BUG_ON(item_size < sizeof(*ei));
1425
5d4f98a2
YZ
1426 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1427 flags = btrfs_extent_flags(leaf, ei);
1428
1429 ptr = (unsigned long)(ei + 1);
1430 end = (unsigned long)ei + item_size;
1431
1432 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1433 ptr += sizeof(struct btrfs_tree_block_info);
1434 BUG_ON(ptr > end);
1435 } else {
1436 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
1437 }
1438
1439 err = -ENOENT;
1440 while (1) {
1441 if (ptr >= end) {
1442 WARN_ON(ptr > end);
1443 break;
1444 }
1445 iref = (struct btrfs_extent_inline_ref *)ptr;
1446 type = btrfs_extent_inline_ref_type(leaf, iref);
1447 if (want < type)
1448 break;
1449 if (want > type) {
1450 ptr += btrfs_extent_inline_ref_size(type);
1451 continue;
1452 }
1453
1454 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1455 struct btrfs_extent_data_ref *dref;
1456 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1457 if (match_extent_data_ref(leaf, dref, root_objectid,
1458 owner, offset)) {
1459 err = 0;
1460 break;
1461 }
1462 if (hash_extent_data_ref_item(leaf, dref) <
1463 hash_extent_data_ref(root_objectid, owner, offset))
1464 break;
1465 } else {
1466 u64 ref_offset;
1467 ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
1468 if (parent > 0) {
1469 if (parent == ref_offset) {
1470 err = 0;
1471 break;
1472 }
1473 if (ref_offset < parent)
1474 break;
1475 } else {
1476 if (root_objectid == ref_offset) {
1477 err = 0;
1478 break;
1479 }
1480 if (ref_offset < root_objectid)
1481 break;
1482 }
1483 }
1484 ptr += btrfs_extent_inline_ref_size(type);
1485 }
1486 if (err == -ENOENT && insert) {
1487 if (item_size + extra_size >=
1488 BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
1489 err = -EAGAIN;
1490 goto out;
1491 }
1492 /*
1493 * To add new inline back ref, we have to make sure
1494 * there is no corresponding back ref item.
1495 * For simplicity, we just do not add new inline back
1496 * ref if there is any kind of item for this block
1497 */
2c47e605
YZ
1498 if (find_next_key(path, 0, &key) == 0 &&
1499 key.objectid == bytenr &&
85d4198e 1500 key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
5d4f98a2
YZ
1501 err = -EAGAIN;
1502 goto out;
1503 }
1504 }
1505 *ref_ret = (struct btrfs_extent_inline_ref *)ptr;
1506out:
85d4198e 1507 if (insert) {
5d4f98a2
YZ
1508 path->keep_locks = 0;
1509 btrfs_unlock_up_safe(path, 1);
1510 }
1511 return err;
1512}
1513
1514/*
1515 * helper to add new inline back ref
1516 */
1517static noinline_for_stack
1518int setup_inline_extent_backref(struct btrfs_trans_handle *trans,
1519 struct btrfs_root *root,
1520 struct btrfs_path *path,
1521 struct btrfs_extent_inline_ref *iref,
1522 u64 parent, u64 root_objectid,
1523 u64 owner, u64 offset, int refs_to_add,
1524 struct btrfs_delayed_extent_op *extent_op)
1525{
1526 struct extent_buffer *leaf;
1527 struct btrfs_extent_item *ei;
1528 unsigned long ptr;
1529 unsigned long end;
1530 unsigned long item_offset;
1531 u64 refs;
1532 int size;
1533 int type;
1534 int ret;
1535
1536 leaf = path->nodes[0];
1537 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1538 item_offset = (unsigned long)iref - (unsigned long)ei;
1539
1540 type = extent_ref_type(parent, owner);
1541 size = btrfs_extent_inline_ref_size(type);
1542
1543 ret = btrfs_extend_item(trans, root, path, size);
1544 BUG_ON(ret);
1545
1546 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1547 refs = btrfs_extent_refs(leaf, ei);
1548 refs += refs_to_add;
1549 btrfs_set_extent_refs(leaf, ei, refs);
1550 if (extent_op)
1551 __run_delayed_extent_op(extent_op, leaf, ei);
1552
1553 ptr = (unsigned long)ei + item_offset;
1554 end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
1555 if (ptr < end - size)
1556 memmove_extent_buffer(leaf, ptr + size, ptr,
1557 end - size - ptr);
1558
1559 iref = (struct btrfs_extent_inline_ref *)ptr;
1560 btrfs_set_extent_inline_ref_type(leaf, iref, type);
1561 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1562 struct btrfs_extent_data_ref *dref;
1563 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1564 btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
1565 btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
1566 btrfs_set_extent_data_ref_offset(leaf, dref, offset);
1567 btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
1568 } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1569 struct btrfs_shared_data_ref *sref;
1570 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1571 btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
1572 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1573 } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
1574 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1575 } else {
1576 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
1577 }
1578 btrfs_mark_buffer_dirty(leaf);
1579 return 0;
1580}
1581
1582static int lookup_extent_backref(struct btrfs_trans_handle *trans,
1583 struct btrfs_root *root,
1584 struct btrfs_path *path,
1585 struct btrfs_extent_inline_ref **ref_ret,
1586 u64 bytenr, u64 num_bytes, u64 parent,
1587 u64 root_objectid, u64 owner, u64 offset)
1588{
1589 int ret;
1590
1591 ret = lookup_inline_extent_backref(trans, root, path, ref_ret,
1592 bytenr, num_bytes, parent,
1593 root_objectid, owner, offset, 0);
1594 if (ret != -ENOENT)
54aa1f4d 1595 return ret;
5d4f98a2
YZ
1596
1597 btrfs_release_path(root, path);
1598 *ref_ret = NULL;
1599
1600 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1601 ret = lookup_tree_block_ref(trans, root, path, bytenr, parent,
1602 root_objectid);
1603 } else {
1604 ret = lookup_extent_data_ref(trans, root, path, bytenr, parent,
1605 root_objectid, owner, offset);
b9473439 1606 }
5d4f98a2
YZ
1607 return ret;
1608}
31840ae1 1609
5d4f98a2
YZ
1610/*
1611 * helper to update/remove inline back ref
1612 */
1613static noinline_for_stack
1614int update_inline_extent_backref(struct btrfs_trans_handle *trans,
1615 struct btrfs_root *root,
1616 struct btrfs_path *path,
1617 struct btrfs_extent_inline_ref *iref,
1618 int refs_to_mod,
1619 struct btrfs_delayed_extent_op *extent_op)
1620{
1621 struct extent_buffer *leaf;
1622 struct btrfs_extent_item *ei;
1623 struct btrfs_extent_data_ref *dref = NULL;
1624 struct btrfs_shared_data_ref *sref = NULL;
1625 unsigned long ptr;
1626 unsigned long end;
1627 u32 item_size;
1628 int size;
1629 int type;
1630 int ret;
1631 u64 refs;
1632
1633 leaf = path->nodes[0];
1634 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1635 refs = btrfs_extent_refs(leaf, ei);
1636 WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
1637 refs += refs_to_mod;
1638 btrfs_set_extent_refs(leaf, ei, refs);
1639 if (extent_op)
1640 __run_delayed_extent_op(extent_op, leaf, ei);
1641
1642 type = btrfs_extent_inline_ref_type(leaf, iref);
1643
1644 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1645 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1646 refs = btrfs_extent_data_ref_count(leaf, dref);
1647 } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1648 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1649 refs = btrfs_shared_data_ref_count(leaf, sref);
1650 } else {
1651 refs = 1;
1652 BUG_ON(refs_to_mod != -1);
56bec294 1653 }
31840ae1 1654
5d4f98a2
YZ
1655 BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
1656 refs += refs_to_mod;
1657
1658 if (refs > 0) {
1659 if (type == BTRFS_EXTENT_DATA_REF_KEY)
1660 btrfs_set_extent_data_ref_count(leaf, dref, refs);
1661 else
1662 btrfs_set_shared_data_ref_count(leaf, sref, refs);
1663 } else {
1664 size = btrfs_extent_inline_ref_size(type);
1665 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1666 ptr = (unsigned long)iref;
1667 end = (unsigned long)ei + item_size;
1668 if (ptr + size < end)
1669 memmove_extent_buffer(leaf, ptr, ptr + size,
1670 end - ptr - size);
1671 item_size -= size;
1672 ret = btrfs_truncate_item(trans, root, path, item_size, 1);
1673 BUG_ON(ret);
1674 }
1675 btrfs_mark_buffer_dirty(leaf);
1676 return 0;
1677}
1678
1679static noinline_for_stack
1680int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
1681 struct btrfs_root *root,
1682 struct btrfs_path *path,
1683 u64 bytenr, u64 num_bytes, u64 parent,
1684 u64 root_objectid, u64 owner,
1685 u64 offset, int refs_to_add,
1686 struct btrfs_delayed_extent_op *extent_op)
1687{
1688 struct btrfs_extent_inline_ref *iref;
1689 int ret;
1690
1691 ret = lookup_inline_extent_backref(trans, root, path, &iref,
1692 bytenr, num_bytes, parent,
1693 root_objectid, owner, offset, 1);
1694 if (ret == 0) {
1695 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
1696 ret = update_inline_extent_backref(trans, root, path, iref,
1697 refs_to_add, extent_op);
1698 } else if (ret == -ENOENT) {
1699 ret = setup_inline_extent_backref(trans, root, path, iref,
1700 parent, root_objectid,
1701 owner, offset, refs_to_add,
1702 extent_op);
771ed689 1703 }
5d4f98a2
YZ
1704 return ret;
1705}
31840ae1 1706
5d4f98a2
YZ
1707static int insert_extent_backref(struct btrfs_trans_handle *trans,
1708 struct btrfs_root *root,
1709 struct btrfs_path *path,
1710 u64 bytenr, u64 parent, u64 root_objectid,
1711 u64 owner, u64 offset, int refs_to_add)
1712{
1713 int ret;
1714 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1715 BUG_ON(refs_to_add != 1);
1716 ret = insert_tree_block_ref(trans, root, path, bytenr,
1717 parent, root_objectid);
1718 } else {
1719 ret = insert_extent_data_ref(trans, root, path, bytenr,
1720 parent, root_objectid,
1721 owner, offset, refs_to_add);
1722 }
1723 return ret;
1724}
56bec294 1725
5d4f98a2
YZ
1726static int remove_extent_backref(struct btrfs_trans_handle *trans,
1727 struct btrfs_root *root,
1728 struct btrfs_path *path,
1729 struct btrfs_extent_inline_ref *iref,
1730 int refs_to_drop, int is_data)
1731{
1732 int ret;
b9473439 1733
5d4f98a2
YZ
1734 BUG_ON(!is_data && refs_to_drop != 1);
1735 if (iref) {
1736 ret = update_inline_extent_backref(trans, root, path, iref,
1737 -refs_to_drop, NULL);
1738 } else if (is_data) {
1739 ret = remove_extent_data_ref(trans, root, path, refs_to_drop);
1740 } else {
1741 ret = btrfs_del_item(trans, root, path);
1742 }
1743 return ret;
1744}
1745
5d4f98a2
YZ
1746static void btrfs_issue_discard(struct block_device *bdev,
1747 u64 start, u64 len)
1748{
746cd1e7 1749 blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_KERNEL,
fbd9b09a 1750 BLKDEV_IFL_WAIT | BLKDEV_IFL_BARRIER);
5d4f98a2 1751}
5d4f98a2
YZ
1752
1753static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
1754 u64 num_bytes)
1755{
5d4f98a2
YZ
1756 int ret;
1757 u64 map_length = num_bytes;
1758 struct btrfs_multi_bio *multi = NULL;
1759
e244a0ae
CH
1760 if (!btrfs_test_opt(root, DISCARD))
1761 return 0;
1762
5d4f98a2
YZ
1763 /* Tell the block device(s) that the sectors can be discarded */
1764 ret = btrfs_map_block(&root->fs_info->mapping_tree, READ,
1765 bytenr, &map_length, &multi, 0);
1766 if (!ret) {
1767 struct btrfs_bio_stripe *stripe = multi->stripes;
1768 int i;
1769
1770 if (map_length > num_bytes)
1771 map_length = num_bytes;
1772
1773 for (i = 0; i < multi->num_stripes; i++, stripe++) {
1774 btrfs_issue_discard(stripe->dev->bdev,
1775 stripe->physical,
1776 map_length);
1777 }
1778 kfree(multi);
1779 }
1780
1781 return ret;
5d4f98a2
YZ
1782}
1783
1784int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1785 struct btrfs_root *root,
1786 u64 bytenr, u64 num_bytes, u64 parent,
1787 u64 root_objectid, u64 owner, u64 offset)
1788{
1789 int ret;
1790 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
1791 root_objectid == BTRFS_TREE_LOG_OBJECTID);
1792
1793 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1794 ret = btrfs_add_delayed_tree_ref(trans, bytenr, num_bytes,
1795 parent, root_objectid, (int)owner,
1796 BTRFS_ADD_DELAYED_REF, NULL);
1797 } else {
1798 ret = btrfs_add_delayed_data_ref(trans, bytenr, num_bytes,
1799 parent, root_objectid, owner, offset,
1800 BTRFS_ADD_DELAYED_REF, NULL);
1801 }
1802 return ret;
1803}
1804
1805static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1806 struct btrfs_root *root,
1807 u64 bytenr, u64 num_bytes,
1808 u64 parent, u64 root_objectid,
1809 u64 owner, u64 offset, int refs_to_add,
1810 struct btrfs_delayed_extent_op *extent_op)
1811{
1812 struct btrfs_path *path;
1813 struct extent_buffer *leaf;
1814 struct btrfs_extent_item *item;
1815 u64 refs;
1816 int ret;
1817 int err = 0;
1818
1819 path = btrfs_alloc_path();
1820 if (!path)
1821 return -ENOMEM;
1822
1823 path->reada = 1;
1824 path->leave_spinning = 1;
1825 /* this will setup the path even if it fails to insert the back ref */
1826 ret = insert_inline_extent_backref(trans, root->fs_info->extent_root,
1827 path, bytenr, num_bytes, parent,
1828 root_objectid, owner, offset,
1829 refs_to_add, extent_op);
1830 if (ret == 0)
1831 goto out;
1832
1833 if (ret != -EAGAIN) {
1834 err = ret;
1835 goto out;
1836 }
1837
1838 leaf = path->nodes[0];
1839 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1840 refs = btrfs_extent_refs(leaf, item);
1841 btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
1842 if (extent_op)
1843 __run_delayed_extent_op(extent_op, leaf, item);
56bec294 1844
5d4f98a2 1845 btrfs_mark_buffer_dirty(leaf);
56bec294
CM
1846 btrfs_release_path(root->fs_info->extent_root, path);
1847
1848 path->reada = 1;
b9473439
CM
1849 path->leave_spinning = 1;
1850
56bec294
CM
1851 /* now insert the actual backref */
1852 ret = insert_extent_backref(trans, root->fs_info->extent_root,
5d4f98a2
YZ
1853 path, bytenr, parent, root_objectid,
1854 owner, offset, refs_to_add);
56bec294 1855 BUG_ON(ret);
5d4f98a2 1856out:
56bec294 1857 btrfs_free_path(path);
5d4f98a2 1858 return err;
56bec294
CM
1859}
1860
5d4f98a2
YZ
1861static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
1862 struct btrfs_root *root,
1863 struct btrfs_delayed_ref_node *node,
1864 struct btrfs_delayed_extent_op *extent_op,
1865 int insert_reserved)
56bec294 1866{
5d4f98a2
YZ
1867 int ret = 0;
1868 struct btrfs_delayed_data_ref *ref;
1869 struct btrfs_key ins;
1870 u64 parent = 0;
1871 u64 ref_root = 0;
1872 u64 flags = 0;
1873
1874 ins.objectid = node->bytenr;
1875 ins.offset = node->num_bytes;
1876 ins.type = BTRFS_EXTENT_ITEM_KEY;
1877
1878 ref = btrfs_delayed_node_to_data_ref(node);
1879 if (node->type == BTRFS_SHARED_DATA_REF_KEY)
1880 parent = ref->parent;
1881 else
1882 ref_root = ref->root;
1883
1884 if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
1885 if (extent_op) {
1886 BUG_ON(extent_op->update_key);
1887 flags |= extent_op->flags_to_set;
1888 }
1889 ret = alloc_reserved_file_extent(trans, root,
1890 parent, ref_root, flags,
1891 ref->objectid, ref->offset,
1892 &ins, node->ref_mod);
5d4f98a2
YZ
1893 } else if (node->action == BTRFS_ADD_DELAYED_REF) {
1894 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
1895 node->num_bytes, parent,
1896 ref_root, ref->objectid,
1897 ref->offset, node->ref_mod,
1898 extent_op);
1899 } else if (node->action == BTRFS_DROP_DELAYED_REF) {
1900 ret = __btrfs_free_extent(trans, root, node->bytenr,
1901 node->num_bytes, parent,
1902 ref_root, ref->objectid,
1903 ref->offset, node->ref_mod,
1904 extent_op);
1905 } else {
1906 BUG();
1907 }
1908 return ret;
1909}
1910
1911static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
1912 struct extent_buffer *leaf,
1913 struct btrfs_extent_item *ei)
1914{
1915 u64 flags = btrfs_extent_flags(leaf, ei);
1916 if (extent_op->update_flags) {
1917 flags |= extent_op->flags_to_set;
1918 btrfs_set_extent_flags(leaf, ei, flags);
1919 }
1920
1921 if (extent_op->update_key) {
1922 struct btrfs_tree_block_info *bi;
1923 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
1924 bi = (struct btrfs_tree_block_info *)(ei + 1);
1925 btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
1926 }
1927}
1928
1929static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
1930 struct btrfs_root *root,
1931 struct btrfs_delayed_ref_node *node,
1932 struct btrfs_delayed_extent_op *extent_op)
1933{
1934 struct btrfs_key key;
1935 struct btrfs_path *path;
1936 struct btrfs_extent_item *ei;
1937 struct extent_buffer *leaf;
1938 u32 item_size;
56bec294 1939 int ret;
5d4f98a2
YZ
1940 int err = 0;
1941
1942 path = btrfs_alloc_path();
1943 if (!path)
1944 return -ENOMEM;
1945
1946 key.objectid = node->bytenr;
1947 key.type = BTRFS_EXTENT_ITEM_KEY;
1948 key.offset = node->num_bytes;
1949
1950 path->reada = 1;
1951 path->leave_spinning = 1;
1952 ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
1953 path, 0, 1);
1954 if (ret < 0) {
1955 err = ret;
1956 goto out;
1957 }
1958 if (ret > 0) {
1959 err = -EIO;
1960 goto out;
1961 }
1962
1963 leaf = path->nodes[0];
1964 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1965#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1966 if (item_size < sizeof(*ei)) {
1967 ret = convert_extent_item_v0(trans, root->fs_info->extent_root,
1968 path, (u64)-1, 0);
1969 if (ret < 0) {
1970 err = ret;
1971 goto out;
1972 }
1973 leaf = path->nodes[0];
1974 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1975 }
1976#endif
1977 BUG_ON(item_size < sizeof(*ei));
1978 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1979 __run_delayed_extent_op(extent_op, leaf, ei);
56bec294 1980
5d4f98a2
YZ
1981 btrfs_mark_buffer_dirty(leaf);
1982out:
1983 btrfs_free_path(path);
1984 return err;
56bec294
CM
1985}
1986
5d4f98a2
YZ
1987static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
1988 struct btrfs_root *root,
1989 struct btrfs_delayed_ref_node *node,
1990 struct btrfs_delayed_extent_op *extent_op,
1991 int insert_reserved)
56bec294
CM
1992{
1993 int ret = 0;
5d4f98a2
YZ
1994 struct btrfs_delayed_tree_ref *ref;
1995 struct btrfs_key ins;
1996 u64 parent = 0;
1997 u64 ref_root = 0;
56bec294 1998
5d4f98a2
YZ
1999 ins.objectid = node->bytenr;
2000 ins.offset = node->num_bytes;
2001 ins.type = BTRFS_EXTENT_ITEM_KEY;
56bec294 2002
5d4f98a2
YZ
2003 ref = btrfs_delayed_node_to_tree_ref(node);
2004 if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2005 parent = ref->parent;
2006 else
2007 ref_root = ref->root;
2008
2009 BUG_ON(node->ref_mod != 1);
2010 if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2011 BUG_ON(!extent_op || !extent_op->update_flags ||
2012 !extent_op->update_key);
2013 ret = alloc_reserved_tree_block(trans, root,
2014 parent, ref_root,
2015 extent_op->flags_to_set,
2016 &extent_op->key,
2017 ref->level, &ins);
5d4f98a2
YZ
2018 } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2019 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
2020 node->num_bytes, parent, ref_root,
2021 ref->level, 0, 1, extent_op);
2022 } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2023 ret = __btrfs_free_extent(trans, root, node->bytenr,
2024 node->num_bytes, parent, ref_root,
2025 ref->level, 0, 1, extent_op);
2026 } else {
2027 BUG();
2028 }
56bec294
CM
2029 return ret;
2030}
2031
2032/* helper function to actually process a single delayed ref entry */
5d4f98a2
YZ
2033static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
2034 struct btrfs_root *root,
2035 struct btrfs_delayed_ref_node *node,
2036 struct btrfs_delayed_extent_op *extent_op,
2037 int insert_reserved)
56bec294
CM
2038{
2039 int ret;
5d4f98a2 2040 if (btrfs_delayed_ref_is_head(node)) {
56bec294
CM
2041 struct btrfs_delayed_ref_head *head;
2042 /*
2043 * we've hit the end of the chain and we were supposed
2044 * to insert this extent into the tree. But, it got
2045 * deleted before we ever needed to insert it, so all
2046 * we have to do is clean up the accounting
2047 */
5d4f98a2
YZ
2048 BUG_ON(extent_op);
2049 head = btrfs_delayed_node_to_head(node);
56bec294 2050 if (insert_reserved) {
f0486c68
YZ
2051 btrfs_pin_extent(root, node->bytenr,
2052 node->num_bytes, 1);
5d4f98a2
YZ
2053 if (head->is_data) {
2054 ret = btrfs_del_csums(trans, root,
2055 node->bytenr,
2056 node->num_bytes);
2057 BUG_ON(ret);
2058 }
56bec294 2059 }
56bec294
CM
2060 mutex_unlock(&head->mutex);
2061 return 0;
2062 }
2063
5d4f98a2
YZ
2064 if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
2065 node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2066 ret = run_delayed_tree_ref(trans, root, node, extent_op,
2067 insert_reserved);
2068 else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
2069 node->type == BTRFS_SHARED_DATA_REF_KEY)
2070 ret = run_delayed_data_ref(trans, root, node, extent_op,
2071 insert_reserved);
2072 else
2073 BUG();
2074 return ret;
56bec294
CM
2075}
2076
2077static noinline struct btrfs_delayed_ref_node *
2078select_delayed_ref(struct btrfs_delayed_ref_head *head)
2079{
2080 struct rb_node *node;
2081 struct btrfs_delayed_ref_node *ref;
2082 int action = BTRFS_ADD_DELAYED_REF;
2083again:
2084 /*
2085 * select delayed ref of type BTRFS_ADD_DELAYED_REF first.
2086 * this prevents ref count from going down to zero when
2087 * there still are pending delayed ref.
2088 */
2089 node = rb_prev(&head->node.rb_node);
2090 while (1) {
2091 if (!node)
2092 break;
2093 ref = rb_entry(node, struct btrfs_delayed_ref_node,
2094 rb_node);
2095 if (ref->bytenr != head->node.bytenr)
2096 break;
5d4f98a2 2097 if (ref->action == action)
56bec294
CM
2098 return ref;
2099 node = rb_prev(node);
2100 }
2101 if (action == BTRFS_ADD_DELAYED_REF) {
2102 action = BTRFS_DROP_DELAYED_REF;
2103 goto again;
2104 }
2105 return NULL;
2106}
2107
c3e69d58
CM
2108static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
2109 struct btrfs_root *root,
2110 struct list_head *cluster)
56bec294 2111{
56bec294
CM
2112 struct btrfs_delayed_ref_root *delayed_refs;
2113 struct btrfs_delayed_ref_node *ref;
2114 struct btrfs_delayed_ref_head *locked_ref = NULL;
5d4f98a2 2115 struct btrfs_delayed_extent_op *extent_op;
56bec294 2116 int ret;
c3e69d58 2117 int count = 0;
56bec294 2118 int must_insert_reserved = 0;
56bec294
CM
2119
2120 delayed_refs = &trans->transaction->delayed_refs;
56bec294
CM
2121 while (1) {
2122 if (!locked_ref) {
c3e69d58
CM
2123 /* pick a new head ref from the cluster list */
2124 if (list_empty(cluster))
56bec294 2125 break;
56bec294 2126
c3e69d58
CM
2127 locked_ref = list_entry(cluster->next,
2128 struct btrfs_delayed_ref_head, cluster);
2129
2130 /* grab the lock that says we are going to process
2131 * all the refs for this head */
2132 ret = btrfs_delayed_ref_lock(trans, locked_ref);
2133
2134 /*
2135 * we may have dropped the spin lock to get the head
2136 * mutex lock, and that might have given someone else
2137 * time to free the head. If that's true, it has been
2138 * removed from our list and we can move on.
2139 */
2140 if (ret == -EAGAIN) {
2141 locked_ref = NULL;
2142 count++;
2143 continue;
56bec294
CM
2144 }
2145 }
a28ec197 2146
56bec294
CM
2147 /*
2148 * record the must insert reserved flag before we
2149 * drop the spin lock.
2150 */
2151 must_insert_reserved = locked_ref->must_insert_reserved;
2152 locked_ref->must_insert_reserved = 0;
7bb86316 2153
5d4f98a2
YZ
2154 extent_op = locked_ref->extent_op;
2155 locked_ref->extent_op = NULL;
2156
56bec294
CM
2157 /*
2158 * locked_ref is the head node, so we have to go one
2159 * node back for any delayed ref updates
2160 */
56bec294
CM
2161 ref = select_delayed_ref(locked_ref);
2162 if (!ref) {
2163 /* All delayed refs have been processed, Go ahead
2164 * and send the head node to run_one_delayed_ref,
2165 * so that any accounting fixes can happen
2166 */
2167 ref = &locked_ref->node;
5d4f98a2
YZ
2168
2169 if (extent_op && must_insert_reserved) {
2170 kfree(extent_op);
2171 extent_op = NULL;
2172 }
2173
2174 if (extent_op) {
2175 spin_unlock(&delayed_refs->lock);
2176
2177 ret = run_delayed_extent_op(trans, root,
2178 ref, extent_op);
2179 BUG_ON(ret);
2180 kfree(extent_op);
2181
2182 cond_resched();
2183 spin_lock(&delayed_refs->lock);
2184 continue;
2185 }
2186
c3e69d58 2187 list_del_init(&locked_ref->cluster);
56bec294
CM
2188 locked_ref = NULL;
2189 }
02217ed2 2190
56bec294
CM
2191 ref->in_tree = 0;
2192 rb_erase(&ref->rb_node, &delayed_refs->root);
2193 delayed_refs->num_entries--;
5d4f98a2 2194
56bec294 2195 spin_unlock(&delayed_refs->lock);
925baedd 2196
5d4f98a2 2197 ret = run_one_delayed_ref(trans, root, ref, extent_op,
56bec294
CM
2198 must_insert_reserved);
2199 BUG_ON(ret);
eb099670 2200
5d4f98a2
YZ
2201 btrfs_put_delayed_ref(ref);
2202 kfree(extent_op);
c3e69d58 2203 count++;
5d4f98a2 2204
c3e69d58
CM
2205 cond_resched();
2206 spin_lock(&delayed_refs->lock);
2207 }
2208 return count;
2209}
2210
2211/*
2212 * this starts processing the delayed reference count updates and
2213 * extent insertions we have queued up so far. count can be
2214 * 0, which means to process everything in the tree at the start
2215 * of the run (but not newly added entries), or it can be some target
2216 * number you'd like to process.
2217 */
2218int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2219 struct btrfs_root *root, unsigned long count)
2220{
2221 struct rb_node *node;
2222 struct btrfs_delayed_ref_root *delayed_refs;
2223 struct btrfs_delayed_ref_node *ref;
2224 struct list_head cluster;
2225 int ret;
2226 int run_all = count == (unsigned long)-1;
2227 int run_most = 0;
2228
2229 if (root == root->fs_info->extent_root)
2230 root = root->fs_info->tree_root;
2231
2232 delayed_refs = &trans->transaction->delayed_refs;
2233 INIT_LIST_HEAD(&cluster);
2234again:
2235 spin_lock(&delayed_refs->lock);
2236 if (count == 0) {
2237 count = delayed_refs->num_entries * 2;
2238 run_most = 1;
2239 }
2240 while (1) {
2241 if (!(run_all || run_most) &&
2242 delayed_refs->num_heads_ready < 64)
2243 break;
eb099670 2244
56bec294 2245 /*
c3e69d58
CM
2246 * go find something we can process in the rbtree. We start at
2247 * the beginning of the tree, and then build a cluster
2248 * of refs to process starting at the first one we are able to
2249 * lock
56bec294 2250 */
c3e69d58
CM
2251 ret = btrfs_find_ref_cluster(trans, &cluster,
2252 delayed_refs->run_delayed_start);
2253 if (ret)
56bec294
CM
2254 break;
2255
c3e69d58
CM
2256 ret = run_clustered_refs(trans, root, &cluster);
2257 BUG_ON(ret < 0);
2258
2259 count -= min_t(unsigned long, ret, count);
2260
2261 if (count == 0)
2262 break;
eb099670 2263 }
c3e69d58 2264
56bec294 2265 if (run_all) {
56bec294 2266 node = rb_first(&delayed_refs->root);
c3e69d58 2267 if (!node)
56bec294 2268 goto out;
c3e69d58 2269 count = (unsigned long)-1;
e9d0b13b 2270
56bec294
CM
2271 while (node) {
2272 ref = rb_entry(node, struct btrfs_delayed_ref_node,
2273 rb_node);
2274 if (btrfs_delayed_ref_is_head(ref)) {
2275 struct btrfs_delayed_ref_head *head;
5caf2a00 2276
56bec294
CM
2277 head = btrfs_delayed_node_to_head(ref);
2278 atomic_inc(&ref->refs);
2279
2280 spin_unlock(&delayed_refs->lock);
2281 mutex_lock(&head->mutex);
2282 mutex_unlock(&head->mutex);
2283
2284 btrfs_put_delayed_ref(ref);
1887be66 2285 cond_resched();
56bec294
CM
2286 goto again;
2287 }
2288 node = rb_next(node);
2289 }
2290 spin_unlock(&delayed_refs->lock);
56bec294
CM
2291 schedule_timeout(1);
2292 goto again;
5f39d397 2293 }
54aa1f4d 2294out:
c3e69d58 2295 spin_unlock(&delayed_refs->lock);
a28ec197
CM
2296 return 0;
2297}
2298
5d4f98a2
YZ
2299int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
2300 struct btrfs_root *root,
2301 u64 bytenr, u64 num_bytes, u64 flags,
2302 int is_data)
2303{
2304 struct btrfs_delayed_extent_op *extent_op;
2305 int ret;
2306
2307 extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
2308 if (!extent_op)
2309 return -ENOMEM;
2310
2311 extent_op->flags_to_set = flags;
2312 extent_op->update_flags = 1;
2313 extent_op->update_key = 0;
2314 extent_op->is_data = is_data ? 1 : 0;
2315
2316 ret = btrfs_add_delayed_extent_op(trans, bytenr, num_bytes, extent_op);
2317 if (ret)
2318 kfree(extent_op);
2319 return ret;
2320}
2321
2322static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
2323 struct btrfs_root *root,
2324 struct btrfs_path *path,
2325 u64 objectid, u64 offset, u64 bytenr)
2326{
2327 struct btrfs_delayed_ref_head *head;
2328 struct btrfs_delayed_ref_node *ref;
2329 struct btrfs_delayed_data_ref *data_ref;
2330 struct btrfs_delayed_ref_root *delayed_refs;
2331 struct rb_node *node;
2332 int ret = 0;
2333
2334 ret = -ENOENT;
2335 delayed_refs = &trans->transaction->delayed_refs;
2336 spin_lock(&delayed_refs->lock);
2337 head = btrfs_find_delayed_ref_head(trans, bytenr);
2338 if (!head)
2339 goto out;
2340
2341 if (!mutex_trylock(&head->mutex)) {
2342 atomic_inc(&head->node.refs);
2343 spin_unlock(&delayed_refs->lock);
2344
2345 btrfs_release_path(root->fs_info->extent_root, path);
2346
2347 mutex_lock(&head->mutex);
2348 mutex_unlock(&head->mutex);
2349 btrfs_put_delayed_ref(&head->node);
2350 return -EAGAIN;
2351 }
2352
2353 node = rb_prev(&head->node.rb_node);
2354 if (!node)
2355 goto out_unlock;
2356
2357 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2358
2359 if (ref->bytenr != bytenr)
2360 goto out_unlock;
2361
2362 ret = 1;
2363 if (ref->type != BTRFS_EXTENT_DATA_REF_KEY)
2364 goto out_unlock;
2365
2366 data_ref = btrfs_delayed_node_to_data_ref(ref);
2367
2368 node = rb_prev(node);
2369 if (node) {
2370 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2371 if (ref->bytenr == bytenr)
2372 goto out_unlock;
2373 }
2374
2375 if (data_ref->root != root->root_key.objectid ||
2376 data_ref->objectid != objectid || data_ref->offset != offset)
2377 goto out_unlock;
2378
2379 ret = 0;
2380out_unlock:
2381 mutex_unlock(&head->mutex);
2382out:
2383 spin_unlock(&delayed_refs->lock);
2384 return ret;
2385}
2386
2387static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
2388 struct btrfs_root *root,
2389 struct btrfs_path *path,
2390 u64 objectid, u64 offset, u64 bytenr)
be20aa9d
CM
2391{
2392 struct btrfs_root *extent_root = root->fs_info->extent_root;
f321e491 2393 struct extent_buffer *leaf;
5d4f98a2
YZ
2394 struct btrfs_extent_data_ref *ref;
2395 struct btrfs_extent_inline_ref *iref;
2396 struct btrfs_extent_item *ei;
f321e491 2397 struct btrfs_key key;
5d4f98a2 2398 u32 item_size;
be20aa9d 2399 int ret;
925baedd 2400
be20aa9d 2401 key.objectid = bytenr;
31840ae1 2402 key.offset = (u64)-1;
f321e491 2403 key.type = BTRFS_EXTENT_ITEM_KEY;
be20aa9d 2404
be20aa9d
CM
2405 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
2406 if (ret < 0)
2407 goto out;
2408 BUG_ON(ret == 0);
80ff3856
YZ
2409
2410 ret = -ENOENT;
2411 if (path->slots[0] == 0)
31840ae1 2412 goto out;
be20aa9d 2413
31840ae1 2414 path->slots[0]--;
f321e491 2415 leaf = path->nodes[0];
5d4f98a2 2416 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
be20aa9d 2417
5d4f98a2 2418 if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
be20aa9d 2419 goto out;
f321e491 2420
5d4f98a2
YZ
2421 ret = 1;
2422 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2423#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2424 if (item_size < sizeof(*ei)) {
2425 WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
2426 goto out;
2427 }
2428#endif
2429 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
bd09835d 2430
5d4f98a2
YZ
2431 if (item_size != sizeof(*ei) +
2432 btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
2433 goto out;
be20aa9d 2434
5d4f98a2
YZ
2435 if (btrfs_extent_generation(leaf, ei) <=
2436 btrfs_root_last_snapshot(&root->root_item))
2437 goto out;
2438
2439 iref = (struct btrfs_extent_inline_ref *)(ei + 1);
2440 if (btrfs_extent_inline_ref_type(leaf, iref) !=
2441 BTRFS_EXTENT_DATA_REF_KEY)
2442 goto out;
2443
2444 ref = (struct btrfs_extent_data_ref *)(&iref->offset);
2445 if (btrfs_extent_refs(leaf, ei) !=
2446 btrfs_extent_data_ref_count(leaf, ref) ||
2447 btrfs_extent_data_ref_root(leaf, ref) !=
2448 root->root_key.objectid ||
2449 btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
2450 btrfs_extent_data_ref_offset(leaf, ref) != offset)
2451 goto out;
2452
2453 ret = 0;
2454out:
2455 return ret;
2456}
2457
2458int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
2459 struct btrfs_root *root,
2460 u64 objectid, u64 offset, u64 bytenr)
2461{
2462 struct btrfs_path *path;
2463 int ret;
2464 int ret2;
2465
2466 path = btrfs_alloc_path();
2467 if (!path)
2468 return -ENOENT;
2469
2470 do {
2471 ret = check_committed_ref(trans, root, path, objectid,
2472 offset, bytenr);
2473 if (ret && ret != -ENOENT)
f321e491 2474 goto out;
80ff3856 2475
5d4f98a2
YZ
2476 ret2 = check_delayed_ref(trans, root, path, objectid,
2477 offset, bytenr);
2478 } while (ret2 == -EAGAIN);
2479
2480 if (ret2 && ret2 != -ENOENT) {
2481 ret = ret2;
2482 goto out;
f321e491 2483 }
5d4f98a2
YZ
2484
2485 if (ret != -ENOENT || ret2 != -ENOENT)
2486 ret = 0;
be20aa9d 2487out:
80ff3856 2488 btrfs_free_path(path);
f0486c68
YZ
2489 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
2490 WARN_ON(ret > 0);
f321e491 2491 return ret;
be20aa9d 2492}
c5739bba 2493
5d4f98a2 2494#if 0
31840ae1
ZY
2495int btrfs_cache_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2496 struct extent_buffer *buf, u32 nr_extents)
02217ed2 2497{
5f39d397 2498 struct btrfs_key key;
6407bf6d 2499 struct btrfs_file_extent_item *fi;
e4657689
ZY
2500 u64 root_gen;
2501 u32 nritems;
02217ed2 2502 int i;
db94535d 2503 int level;
31840ae1 2504 int ret = 0;
e4657689 2505 int shared = 0;
a28ec197 2506
3768f368 2507 if (!root->ref_cows)
a28ec197 2508 return 0;
5f39d397 2509
e4657689
ZY
2510 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
2511 shared = 0;
2512 root_gen = root->root_key.offset;
2513 } else {
2514 shared = 1;
2515 root_gen = trans->transid - 1;
2516 }
2517
db94535d 2518 level = btrfs_header_level(buf);
5f39d397 2519 nritems = btrfs_header_nritems(buf);
4a096752 2520
31840ae1 2521 if (level == 0) {
31153d81
YZ
2522 struct btrfs_leaf_ref *ref;
2523 struct btrfs_extent_info *info;
2524
31840ae1 2525 ref = btrfs_alloc_leaf_ref(root, nr_extents);
31153d81 2526 if (!ref) {
31840ae1 2527 ret = -ENOMEM;
31153d81
YZ
2528 goto out;
2529 }
2530
e4657689 2531 ref->root_gen = root_gen;
31153d81
YZ
2532 ref->bytenr = buf->start;
2533 ref->owner = btrfs_header_owner(buf);
2534 ref->generation = btrfs_header_generation(buf);
31840ae1 2535 ref->nritems = nr_extents;
31153d81 2536 info = ref->extents;
bcc63abb 2537
31840ae1 2538 for (i = 0; nr_extents > 0 && i < nritems; i++) {
31153d81
YZ
2539 u64 disk_bytenr;
2540 btrfs_item_key_to_cpu(buf, &key, i);
2541 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
2542 continue;
2543 fi = btrfs_item_ptr(buf, i,
2544 struct btrfs_file_extent_item);
2545 if (btrfs_file_extent_type(buf, fi) ==
2546 BTRFS_FILE_EXTENT_INLINE)
2547 continue;
2548 disk_bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
2549 if (disk_bytenr == 0)
2550 continue;
2551
2552 info->bytenr = disk_bytenr;
2553 info->num_bytes =
2554 btrfs_file_extent_disk_num_bytes(buf, fi);
2555 info->objectid = key.objectid;
2556 info->offset = key.offset;
2557 info++;
2558 }
2559
e4657689 2560 ret = btrfs_add_leaf_ref(root, ref, shared);
5b84e8d6
YZ
2561 if (ret == -EEXIST && shared) {
2562 struct btrfs_leaf_ref *old;
2563 old = btrfs_lookup_leaf_ref(root, ref->bytenr);
2564 BUG_ON(!old);
2565 btrfs_remove_leaf_ref(root, old);
2566 btrfs_free_leaf_ref(root, old);
2567 ret = btrfs_add_leaf_ref(root, ref, shared);
2568 }
31153d81 2569 WARN_ON(ret);
bcc63abb 2570 btrfs_free_leaf_ref(root, ref);
31153d81
YZ
2571 }
2572out:
31840ae1
ZY
2573 return ret;
2574}
2575
b7a9f29f
CM
2576/* when a block goes through cow, we update the reference counts of
2577 * everything that block points to. The internal pointers of the block
2578 * can be in just about any order, and it is likely to have clusters of
2579 * things that are close together and clusters of things that are not.
2580 *
2581 * To help reduce the seeks that come with updating all of these reference
2582 * counts, sort them by byte number before actual updates are done.
2583 *
2584 * struct refsort is used to match byte number to slot in the btree block.
2585 * we sort based on the byte number and then use the slot to actually
2586 * find the item.
bd56b302
CM
2587 *
2588 * struct refsort is smaller than strcut btrfs_item and smaller than
2589 * struct btrfs_key_ptr. Since we're currently limited to the page size
2590 * for a btree block, there's no way for a kmalloc of refsorts for a
2591 * single node to be bigger than a page.
b7a9f29f
CM
2592 */
2593struct refsort {
2594 u64 bytenr;
2595 u32 slot;
2596};
2597
2598/*
2599 * for passing into sort()
2600 */
2601static int refsort_cmp(const void *a_void, const void *b_void)
2602{
2603 const struct refsort *a = a_void;
2604 const struct refsort *b = b_void;
2605
2606 if (a->bytenr < b->bytenr)
2607 return -1;
2608 if (a->bytenr > b->bytenr)
2609 return 1;
2610 return 0;
2611}
5d4f98a2 2612#endif
b7a9f29f 2613
5d4f98a2 2614static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
b7a9f29f 2615 struct btrfs_root *root,
5d4f98a2
YZ
2616 struct extent_buffer *buf,
2617 int full_backref, int inc)
31840ae1
ZY
2618{
2619 u64 bytenr;
5d4f98a2
YZ
2620 u64 num_bytes;
2621 u64 parent;
31840ae1 2622 u64 ref_root;
31840ae1 2623 u32 nritems;
31840ae1
ZY
2624 struct btrfs_key key;
2625 struct btrfs_file_extent_item *fi;
2626 int i;
2627 int level;
2628 int ret = 0;
31840ae1 2629 int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
5d4f98a2 2630 u64, u64, u64, u64, u64, u64);
31840ae1
ZY
2631
2632 ref_root = btrfs_header_owner(buf);
31840ae1
ZY
2633 nritems = btrfs_header_nritems(buf);
2634 level = btrfs_header_level(buf);
2635
5d4f98a2
YZ
2636 if (!root->ref_cows && level == 0)
2637 return 0;
31840ae1 2638
5d4f98a2
YZ
2639 if (inc)
2640 process_func = btrfs_inc_extent_ref;
2641 else
2642 process_func = btrfs_free_extent;
31840ae1 2643
5d4f98a2
YZ
2644 if (full_backref)
2645 parent = buf->start;
2646 else
2647 parent = 0;
2648
2649 for (i = 0; i < nritems; i++) {
31840ae1 2650 if (level == 0) {
5d4f98a2 2651 btrfs_item_key_to_cpu(buf, &key, i);
31840ae1
ZY
2652 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
2653 continue;
5d4f98a2 2654 fi = btrfs_item_ptr(buf, i,
31840ae1
ZY
2655 struct btrfs_file_extent_item);
2656 if (btrfs_file_extent_type(buf, fi) ==
2657 BTRFS_FILE_EXTENT_INLINE)
2658 continue;
2659 bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
2660 if (bytenr == 0)
2661 continue;
5d4f98a2
YZ
2662
2663 num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
2664 key.offset -= btrfs_file_extent_offset(buf, fi);
2665 ret = process_func(trans, root, bytenr, num_bytes,
2666 parent, ref_root, key.objectid,
2667 key.offset);
31840ae1
ZY
2668 if (ret)
2669 goto fail;
2670 } else {
5d4f98a2
YZ
2671 bytenr = btrfs_node_blockptr(buf, i);
2672 num_bytes = btrfs_level_size(root, level - 1);
2673 ret = process_func(trans, root, bytenr, num_bytes,
2674 parent, ref_root, level - 1, 0);
31840ae1
ZY
2675 if (ret)
2676 goto fail;
2677 }
2678 }
2679 return 0;
2680fail:
5d4f98a2
YZ
2681 BUG();
2682 return ret;
2683}
2684
2685int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2686 struct extent_buffer *buf, int full_backref)
2687{
2688 return __btrfs_mod_ref(trans, root, buf, full_backref, 1);
2689}
2690
2691int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2692 struct extent_buffer *buf, int full_backref)
2693{
2694 return __btrfs_mod_ref(trans, root, buf, full_backref, 0);
31840ae1
ZY
2695}
2696
9078a3e1
CM
2697static int write_one_cache_group(struct btrfs_trans_handle *trans,
2698 struct btrfs_root *root,
2699 struct btrfs_path *path,
2700 struct btrfs_block_group_cache *cache)
2701{
2702 int ret;
9078a3e1 2703 struct btrfs_root *extent_root = root->fs_info->extent_root;
5f39d397
CM
2704 unsigned long bi;
2705 struct extent_buffer *leaf;
9078a3e1 2706
9078a3e1 2707 ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
54aa1f4d
CM
2708 if (ret < 0)
2709 goto fail;
9078a3e1 2710 BUG_ON(ret);
5f39d397
CM
2711
2712 leaf = path->nodes[0];
2713 bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
2714 write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
2715 btrfs_mark_buffer_dirty(leaf);
9078a3e1 2716 btrfs_release_path(extent_root, path);
54aa1f4d 2717fail:
9078a3e1
CM
2718 if (ret)
2719 return ret;
9078a3e1
CM
2720 return 0;
2721
2722}
2723
4a8c9a62
YZ
2724static struct btrfs_block_group_cache *
2725next_block_group(struct btrfs_root *root,
2726 struct btrfs_block_group_cache *cache)
2727{
2728 struct rb_node *node;
2729 spin_lock(&root->fs_info->block_group_cache_lock);
2730 node = rb_next(&cache->cache_node);
2731 btrfs_put_block_group(cache);
2732 if (node) {
2733 cache = rb_entry(node, struct btrfs_block_group_cache,
2734 cache_node);
11dfe35a 2735 btrfs_get_block_group(cache);
4a8c9a62
YZ
2736 } else
2737 cache = NULL;
2738 spin_unlock(&root->fs_info->block_group_cache_lock);
2739 return cache;
2740}
2741
0af3d00b
JB
2742static int cache_save_setup(struct btrfs_block_group_cache *block_group,
2743 struct btrfs_trans_handle *trans,
2744 struct btrfs_path *path)
2745{
2746 struct btrfs_root *root = block_group->fs_info->tree_root;
2747 struct inode *inode = NULL;
2748 u64 alloc_hint = 0;
2b20982e 2749 int dcs = BTRFS_DC_ERROR;
0af3d00b
JB
2750 int num_pages = 0;
2751 int retries = 0;
2752 int ret = 0;
2753
2754 /*
2755 * If this block group is smaller than 100 megs don't bother caching the
2756 * block group.
2757 */
2758 if (block_group->key.offset < (100 * 1024 * 1024)) {
2759 spin_lock(&block_group->lock);
2760 block_group->disk_cache_state = BTRFS_DC_WRITTEN;
2761 spin_unlock(&block_group->lock);
2762 return 0;
2763 }
2764
2765again:
2766 inode = lookup_free_space_inode(root, block_group, path);
2767 if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
2768 ret = PTR_ERR(inode);
2769 btrfs_release_path(root, path);
2770 goto out;
2771 }
2772
2773 if (IS_ERR(inode)) {
2774 BUG_ON(retries);
2775 retries++;
2776
2777 if (block_group->ro)
2778 goto out_free;
2779
2780 ret = create_free_space_inode(root, trans, block_group, path);
2781 if (ret)
2782 goto out_free;
2783 goto again;
2784 }
2785
2786 /*
2787 * We want to set the generation to 0, that way if anything goes wrong
2788 * from here on out we know not to trust this cache when we load up next
2789 * time.
2790 */
2791 BTRFS_I(inode)->generation = 0;
2792 ret = btrfs_update_inode(trans, root, inode);
2793 WARN_ON(ret);
2794
2795 if (i_size_read(inode) > 0) {
2796 ret = btrfs_truncate_free_space_cache(root, trans, path,
2797 inode);
2798 if (ret)
2799 goto out_put;
2800 }
2801
2802 spin_lock(&block_group->lock);
2803 if (block_group->cached != BTRFS_CACHE_FINISHED) {
2b20982e
JB
2804 /* We're not cached, don't bother trying to write stuff out */
2805 dcs = BTRFS_DC_WRITTEN;
0af3d00b
JB
2806 spin_unlock(&block_group->lock);
2807 goto out_put;
2808 }
2809 spin_unlock(&block_group->lock);
2810
2811 num_pages = (int)div64_u64(block_group->key.offset, 1024 * 1024 * 1024);
2812 if (!num_pages)
2813 num_pages = 1;
2814
2815 /*
2816 * Just to make absolutely sure we have enough space, we're going to
2817 * preallocate 12 pages worth of space for each block group. In
2818 * practice we ought to use at most 8, but we need extra space so we can
2819 * add our header and have a terminator between the extents and the
2820 * bitmaps.
2821 */
2822 num_pages *= 16;
2823 num_pages *= PAGE_CACHE_SIZE;
2824
2825 ret = btrfs_check_data_free_space(inode, num_pages);
2826 if (ret)
2827 goto out_put;
2828
2829 ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
2830 num_pages, num_pages,
2831 &alloc_hint);
2b20982e
JB
2832 if (!ret)
2833 dcs = BTRFS_DC_SETUP;
0af3d00b
JB
2834 btrfs_free_reserved_data_space(inode, num_pages);
2835out_put:
2836 iput(inode);
2837out_free:
2838 btrfs_release_path(root, path);
2839out:
2840 spin_lock(&block_group->lock);
2b20982e 2841 block_group->disk_cache_state = dcs;
0af3d00b
JB
2842 spin_unlock(&block_group->lock);
2843
2844 return ret;
2845}
2846
96b5179d
CM
2847int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
2848 struct btrfs_root *root)
9078a3e1 2849{
4a8c9a62 2850 struct btrfs_block_group_cache *cache;
9078a3e1 2851 int err = 0;
9078a3e1 2852 struct btrfs_path *path;
96b5179d 2853 u64 last = 0;
9078a3e1
CM
2854
2855 path = btrfs_alloc_path();
2856 if (!path)
2857 return -ENOMEM;
2858
0af3d00b
JB
2859again:
2860 while (1) {
2861 cache = btrfs_lookup_first_block_group(root->fs_info, last);
2862 while (cache) {
2863 if (cache->disk_cache_state == BTRFS_DC_CLEAR)
2864 break;
2865 cache = next_block_group(root, cache);
2866 }
2867 if (!cache) {
2868 if (last == 0)
2869 break;
2870 last = 0;
2871 continue;
2872 }
2873 err = cache_save_setup(cache, trans, path);
2874 last = cache->key.objectid + cache->key.offset;
2875 btrfs_put_block_group(cache);
2876 }
2877
d397712b 2878 while (1) {
4a8c9a62
YZ
2879 if (last == 0) {
2880 err = btrfs_run_delayed_refs(trans, root,
2881 (unsigned long)-1);
2882 BUG_ON(err);
0f9dd46c 2883 }
54aa1f4d 2884
4a8c9a62
YZ
2885 cache = btrfs_lookup_first_block_group(root->fs_info, last);
2886 while (cache) {
0af3d00b
JB
2887 if (cache->disk_cache_state == BTRFS_DC_CLEAR) {
2888 btrfs_put_block_group(cache);
2889 goto again;
2890 }
2891
4a8c9a62
YZ
2892 if (cache->dirty)
2893 break;
2894 cache = next_block_group(root, cache);
2895 }
2896 if (!cache) {
2897 if (last == 0)
2898 break;
2899 last = 0;
2900 continue;
2901 }
0f9dd46c 2902
0cb59c99
JB
2903 if (cache->disk_cache_state == BTRFS_DC_SETUP)
2904 cache->disk_cache_state = BTRFS_DC_NEED_WRITE;
e8569813 2905 cache->dirty = 0;
4a8c9a62 2906 last = cache->key.objectid + cache->key.offset;
0f9dd46c 2907
4a8c9a62
YZ
2908 err = write_one_cache_group(trans, root, path, cache);
2909 BUG_ON(err);
2910 btrfs_put_block_group(cache);
9078a3e1 2911 }
4a8c9a62 2912
0cb59c99
JB
2913 while (1) {
2914 /*
2915 * I don't think this is needed since we're just marking our
2916 * preallocated extent as written, but just in case it can't
2917 * hurt.
2918 */
2919 if (last == 0) {
2920 err = btrfs_run_delayed_refs(trans, root,
2921 (unsigned long)-1);
2922 BUG_ON(err);
2923 }
2924
2925 cache = btrfs_lookup_first_block_group(root->fs_info, last);
2926 while (cache) {
2927 /*
2928 * Really this shouldn't happen, but it could if we
2929 * couldn't write the entire preallocated extent and
2930 * splitting the extent resulted in a new block.
2931 */
2932 if (cache->dirty) {
2933 btrfs_put_block_group(cache);
2934 goto again;
2935 }
2936 if (cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
2937 break;
2938 cache = next_block_group(root, cache);
2939 }
2940 if (!cache) {
2941 if (last == 0)
2942 break;
2943 last = 0;
2944 continue;
2945 }
2946
2947 btrfs_write_out_cache(root, trans, cache, path);
2948
2949 /*
2950 * If we didn't have an error then the cache state is still
2951 * NEED_WRITE, so we can set it to WRITTEN.
2952 */
2953 if (cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
2954 cache->disk_cache_state = BTRFS_DC_WRITTEN;
2955 last = cache->key.objectid + cache->key.offset;
2956 btrfs_put_block_group(cache);
2957 }
2958
9078a3e1 2959 btrfs_free_path(path);
4a8c9a62 2960 return 0;
9078a3e1
CM
2961}
2962
d2fb3437
YZ
2963int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
2964{
2965 struct btrfs_block_group_cache *block_group;
2966 int readonly = 0;
2967
2968 block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
2969 if (!block_group || block_group->ro)
2970 readonly = 1;
2971 if (block_group)
fa9c0d79 2972 btrfs_put_block_group(block_group);
d2fb3437
YZ
2973 return readonly;
2974}
2975
593060d7
CM
2976static int update_space_info(struct btrfs_fs_info *info, u64 flags,
2977 u64 total_bytes, u64 bytes_used,
2978 struct btrfs_space_info **space_info)
2979{
2980 struct btrfs_space_info *found;
b742bb82
YZ
2981 int i;
2982 int factor;
2983
2984 if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
2985 BTRFS_BLOCK_GROUP_RAID10))
2986 factor = 2;
2987 else
2988 factor = 1;
593060d7
CM
2989
2990 found = __find_space_info(info, flags);
2991 if (found) {
25179201 2992 spin_lock(&found->lock);
593060d7 2993 found->total_bytes += total_bytes;
89a55897 2994 found->disk_total += total_bytes * factor;
593060d7 2995 found->bytes_used += bytes_used;
b742bb82 2996 found->disk_used += bytes_used * factor;
8f18cf13 2997 found->full = 0;
25179201 2998 spin_unlock(&found->lock);
593060d7
CM
2999 *space_info = found;
3000 return 0;
3001 }
c146afad 3002 found = kzalloc(sizeof(*found), GFP_NOFS);
593060d7
CM
3003 if (!found)
3004 return -ENOMEM;
3005
b742bb82
YZ
3006 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
3007 INIT_LIST_HEAD(&found->block_groups[i]);
80eb234a 3008 init_rwsem(&found->groups_sem);
0f9dd46c 3009 spin_lock_init(&found->lock);
b742bb82
YZ
3010 found->flags = flags & (BTRFS_BLOCK_GROUP_DATA |
3011 BTRFS_BLOCK_GROUP_SYSTEM |
3012 BTRFS_BLOCK_GROUP_METADATA);
593060d7 3013 found->total_bytes = total_bytes;
89a55897 3014 found->disk_total = total_bytes * factor;
593060d7 3015 found->bytes_used = bytes_used;
b742bb82 3016 found->disk_used = bytes_used * factor;
593060d7 3017 found->bytes_pinned = 0;
e8569813 3018 found->bytes_reserved = 0;
c146afad 3019 found->bytes_readonly = 0;
f0486c68 3020 found->bytes_may_use = 0;
593060d7 3021 found->full = 0;
0ef3e66b 3022 found->force_alloc = 0;
593060d7 3023 *space_info = found;
4184ea7f 3024 list_add_rcu(&found->list, &info->space_info);
817d52f8 3025 atomic_set(&found->caching_threads, 0);
593060d7
CM
3026 return 0;
3027}
3028
8790d502
CM
3029static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
3030{
3031 u64 extra_flags = flags & (BTRFS_BLOCK_GROUP_RAID0 |
611f0e00 3032 BTRFS_BLOCK_GROUP_RAID1 |
321aecc6 3033 BTRFS_BLOCK_GROUP_RAID10 |
611f0e00 3034 BTRFS_BLOCK_GROUP_DUP);
8790d502
CM
3035 if (extra_flags) {
3036 if (flags & BTRFS_BLOCK_GROUP_DATA)
3037 fs_info->avail_data_alloc_bits |= extra_flags;
3038 if (flags & BTRFS_BLOCK_GROUP_METADATA)
3039 fs_info->avail_metadata_alloc_bits |= extra_flags;
3040 if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3041 fs_info->avail_system_alloc_bits |= extra_flags;
3042 }
3043}
593060d7 3044
2b82032c 3045u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
ec44a35c 3046{
cd02dca5
CM
3047 /*
3048 * we add in the count of missing devices because we want
3049 * to make sure that any RAID levels on a degraded FS
3050 * continue to be honored.
3051 */
3052 u64 num_devices = root->fs_info->fs_devices->rw_devices +
3053 root->fs_info->fs_devices->missing_devices;
a061fc8d
CM
3054
3055 if (num_devices == 1)
3056 flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0);
3057 if (num_devices < 4)
3058 flags &= ~BTRFS_BLOCK_GROUP_RAID10;
3059
ec44a35c
CM
3060 if ((flags & BTRFS_BLOCK_GROUP_DUP) &&
3061 (flags & (BTRFS_BLOCK_GROUP_RAID1 |
a061fc8d 3062 BTRFS_BLOCK_GROUP_RAID10))) {
ec44a35c 3063 flags &= ~BTRFS_BLOCK_GROUP_DUP;
a061fc8d 3064 }
ec44a35c
CM
3065
3066 if ((flags & BTRFS_BLOCK_GROUP_RAID1) &&
a061fc8d 3067 (flags & BTRFS_BLOCK_GROUP_RAID10)) {
ec44a35c 3068 flags &= ~BTRFS_BLOCK_GROUP_RAID1;
a061fc8d 3069 }
ec44a35c
CM
3070
3071 if ((flags & BTRFS_BLOCK_GROUP_RAID0) &&
3072 ((flags & BTRFS_BLOCK_GROUP_RAID1) |
3073 (flags & BTRFS_BLOCK_GROUP_RAID10) |
3074 (flags & BTRFS_BLOCK_GROUP_DUP)))
3075 flags &= ~BTRFS_BLOCK_GROUP_RAID0;
3076 return flags;
3077}
3078
b742bb82 3079static u64 get_alloc_profile(struct btrfs_root *root, u64 flags)
6a63209f 3080{
b742bb82
YZ
3081 if (flags & BTRFS_BLOCK_GROUP_DATA)
3082 flags |= root->fs_info->avail_data_alloc_bits &
3083 root->fs_info->data_alloc_profile;
3084 else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3085 flags |= root->fs_info->avail_system_alloc_bits &
3086 root->fs_info->system_alloc_profile;
3087 else if (flags & BTRFS_BLOCK_GROUP_METADATA)
3088 flags |= root->fs_info->avail_metadata_alloc_bits &
3089 root->fs_info->metadata_alloc_profile;
3090 return btrfs_reduce_alloc_profile(root, flags);
6a63209f
JB
3091}
3092
b742bb82 3093static u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
9ed74f2d 3094{
b742bb82 3095 u64 flags;
9ed74f2d 3096
b742bb82
YZ
3097 if (data)
3098 flags = BTRFS_BLOCK_GROUP_DATA;
3099 else if (root == root->fs_info->chunk_root)
3100 flags = BTRFS_BLOCK_GROUP_SYSTEM;
9ed74f2d 3101 else
b742bb82 3102 flags = BTRFS_BLOCK_GROUP_METADATA;
9ed74f2d 3103
b742bb82 3104 return get_alloc_profile(root, flags);
6a63209f 3105}
9ed74f2d 3106
6a63209f
JB
3107void btrfs_set_inode_space_info(struct btrfs_root *root, struct inode *inode)
3108{
6a63209f 3109 BTRFS_I(inode)->space_info = __find_space_info(root->fs_info,
f0486c68 3110 BTRFS_BLOCK_GROUP_DATA);
9ed74f2d
JB
3111}
3112
6a63209f 3113/*
6a63209f
JB
3114 * This will check the space that the inode allocates from to make sure we have
3115 * enough space for bytes.
6a63209f 3116 */
0ca1f7ce 3117int btrfs_check_data_free_space(struct inode *inode, u64 bytes)
6a63209f 3118{
6a63209f 3119 struct btrfs_space_info *data_sinfo;
0ca1f7ce 3120 struct btrfs_root *root = BTRFS_I(inode)->root;
ab6e2410 3121 u64 used;
0af3d00b 3122 int ret = 0, committed = 0, alloc_chunk = 1;
6a63209f 3123
6a63209f
JB
3124 /* make sure bytes are sectorsize aligned */
3125 bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
6a63209f 3126
0af3d00b
JB
3127 if (root == root->fs_info->tree_root) {
3128 alloc_chunk = 0;
3129 committed = 1;
3130 }
3131
6a63209f 3132 data_sinfo = BTRFS_I(inode)->space_info;
33b4d47f
CM
3133 if (!data_sinfo)
3134 goto alloc;
9ed74f2d 3135
6a63209f
JB
3136again:
3137 /* make sure we have enough space to handle the data first */
3138 spin_lock(&data_sinfo->lock);
8929ecfa
YZ
3139 used = data_sinfo->bytes_used + data_sinfo->bytes_reserved +
3140 data_sinfo->bytes_pinned + data_sinfo->bytes_readonly +
3141 data_sinfo->bytes_may_use;
ab6e2410
JB
3142
3143 if (used + bytes > data_sinfo->total_bytes) {
4e06bdd6 3144 struct btrfs_trans_handle *trans;
9ed74f2d 3145
6a63209f
JB
3146 /*
3147 * if we don't have enough free bytes in this space then we need
3148 * to alloc a new chunk.
3149 */
0af3d00b 3150 if (!data_sinfo->full && alloc_chunk) {
6a63209f 3151 u64 alloc_target;
9ed74f2d 3152
6a63209f
JB
3153 data_sinfo->force_alloc = 1;
3154 spin_unlock(&data_sinfo->lock);
33b4d47f 3155alloc:
6a63209f 3156 alloc_target = btrfs_get_alloc_profile(root, 1);
a22285a6
YZ
3157 trans = btrfs_join_transaction(root, 1);
3158 if (IS_ERR(trans))
3159 return PTR_ERR(trans);
9ed74f2d 3160
6a63209f
JB
3161 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
3162 bytes + 2 * 1024 * 1024,
3163 alloc_target, 0);
3164 btrfs_end_transaction(trans, root);
d52a5b5f
MX
3165 if (ret < 0) {
3166 if (ret != -ENOSPC)
3167 return ret;
3168 else
3169 goto commit_trans;
3170 }
9ed74f2d 3171
33b4d47f
CM
3172 if (!data_sinfo) {
3173 btrfs_set_inode_space_info(root, inode);
3174 data_sinfo = BTRFS_I(inode)->space_info;
3175 }
6a63209f
JB
3176 goto again;
3177 }
3178 spin_unlock(&data_sinfo->lock);
6a63209f 3179
4e06bdd6 3180 /* commit the current transaction and try again */
d52a5b5f 3181commit_trans:
dd7e0b7b 3182 if (!committed && !root->fs_info->open_ioctl_trans) {
4e06bdd6
JB
3183 committed = 1;
3184 trans = btrfs_join_transaction(root, 1);
a22285a6
YZ
3185 if (IS_ERR(trans))
3186 return PTR_ERR(trans);
4e06bdd6
JB
3187 ret = btrfs_commit_transaction(trans, root);
3188 if (ret)
3189 return ret;
3190 goto again;
3191 }
9ed74f2d 3192
933b585f 3193#if 0 /* I hope we never need this code again, just in case */
8929ecfa
YZ
3194 printk(KERN_ERR "no space left, need %llu, %llu bytes_used, "
3195 "%llu bytes_reserved, " "%llu bytes_pinned, "
3196 "%llu bytes_readonly, %llu may use %llu total\n",
3197 (unsigned long long)bytes,
21380931
JB
3198 (unsigned long long)data_sinfo->bytes_used,
3199 (unsigned long long)data_sinfo->bytes_reserved,
3200 (unsigned long long)data_sinfo->bytes_pinned,
3201 (unsigned long long)data_sinfo->bytes_readonly,
3202 (unsigned long long)data_sinfo->bytes_may_use,
3203 (unsigned long long)data_sinfo->total_bytes);
933b585f 3204#endif
6a63209f
JB
3205 return -ENOSPC;
3206 }
3207 data_sinfo->bytes_may_use += bytes;
3208 BTRFS_I(inode)->reserved_bytes += bytes;
3209 spin_unlock(&data_sinfo->lock);
6a63209f 3210
9ed74f2d 3211 return 0;
9ed74f2d 3212}
6a63209f 3213
6a63209f 3214/*
0ca1f7ce
YZ
3215 * called when we are clearing an delalloc extent from the
3216 * inode's io_tree or there was an error for whatever reason
3217 * after calling btrfs_check_data_free_space
6a63209f 3218 */
0ca1f7ce 3219void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes)
e3ccfa98 3220{
0ca1f7ce 3221 struct btrfs_root *root = BTRFS_I(inode)->root;
6a63209f 3222 struct btrfs_space_info *data_sinfo;
e3ccfa98 3223
6a63209f
JB
3224 /* make sure bytes are sectorsize aligned */
3225 bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
e3ccfa98 3226
6a63209f
JB
3227 data_sinfo = BTRFS_I(inode)->space_info;
3228 spin_lock(&data_sinfo->lock);
3229 data_sinfo->bytes_may_use -= bytes;
3230 BTRFS_I(inode)->reserved_bytes -= bytes;
3231 spin_unlock(&data_sinfo->lock);
e3ccfa98
JB
3232}
3233
97e728d4 3234static void force_metadata_allocation(struct btrfs_fs_info *info)
e3ccfa98 3235{
97e728d4
JB
3236 struct list_head *head = &info->space_info;
3237 struct btrfs_space_info *found;
e3ccfa98 3238
97e728d4
JB
3239 rcu_read_lock();
3240 list_for_each_entry_rcu(found, head, list) {
3241 if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
3242 found->force_alloc = 1;
e3ccfa98 3243 }
97e728d4 3244 rcu_read_unlock();
e3ccfa98
JB
3245}
3246
e5bc2458
CM
3247static int should_alloc_chunk(struct btrfs_root *root,
3248 struct btrfs_space_info *sinfo, u64 alloc_bytes)
32c00aff 3249{
424499db 3250 u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly;
e5bc2458 3251 u64 thresh;
e3ccfa98 3252
424499db
YZ
3253 if (sinfo->bytes_used + sinfo->bytes_reserved +
3254 alloc_bytes + 256 * 1024 * 1024 < num_bytes)
3255 return 0;
e3ccfa98 3256
424499db
YZ
3257 if (sinfo->bytes_used + sinfo->bytes_reserved +
3258 alloc_bytes < div_factor(num_bytes, 8))
3259 return 0;
32c00aff 3260
e5bc2458
CM
3261 thresh = btrfs_super_total_bytes(&root->fs_info->super_copy);
3262 thresh = max_t(u64, 256 * 1024 * 1024, div_factor_fine(thresh, 5));
3263
3264 if (num_bytes > thresh && sinfo->bytes_used < div_factor(num_bytes, 3))
14ed0ca6
JB
3265 return 0;
3266
424499db 3267 return 1;
32c00aff
JB
3268}
3269
6324fbf3
CM
3270static int do_chunk_alloc(struct btrfs_trans_handle *trans,
3271 struct btrfs_root *extent_root, u64 alloc_bytes,
0ef3e66b 3272 u64 flags, int force)
9ed74f2d 3273{
6324fbf3 3274 struct btrfs_space_info *space_info;
97e728d4 3275 struct btrfs_fs_info *fs_info = extent_root->fs_info;
9ed74f2d 3276 int ret = 0;
9ed74f2d 3277
97e728d4 3278 mutex_lock(&fs_info->chunk_mutex);
9ed74f2d 3279
2b82032c 3280 flags = btrfs_reduce_alloc_profile(extent_root, flags);
ec44a35c 3281
6324fbf3 3282 space_info = __find_space_info(extent_root->fs_info, flags);
593060d7
CM
3283 if (!space_info) {
3284 ret = update_space_info(extent_root->fs_info, flags,
3285 0, 0, &space_info);
3286 BUG_ON(ret);
9ed74f2d 3287 }
6324fbf3 3288 BUG_ON(!space_info);
9ed74f2d 3289
25179201 3290 spin_lock(&space_info->lock);
9ed74f2d 3291 if (space_info->force_alloc)
0ef3e66b 3292 force = 1;
25179201
JB
3293 if (space_info->full) {
3294 spin_unlock(&space_info->lock);
925baedd 3295 goto out;
9ed74f2d
JB
3296 }
3297
e5bc2458
CM
3298 if (!force && !should_alloc_chunk(extent_root, space_info,
3299 alloc_bytes)) {
25179201 3300 spin_unlock(&space_info->lock);
925baedd 3301 goto out;
9ed74f2d 3302 }
25179201 3303 spin_unlock(&space_info->lock);
9ed74f2d 3304
67377734
JB
3305 /*
3306 * If we have mixed data/metadata chunks we want to make sure we keep
3307 * allocating mixed chunks instead of individual chunks.
3308 */
3309 if (btrfs_mixed_space_info(space_info))
3310 flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA);
3311
97e728d4
JB
3312 /*
3313 * if we're doing a data chunk, go ahead and make sure that
3314 * we keep a reasonable number of metadata chunks allocated in the
3315 * FS as well.
3316 */
9ed74f2d 3317 if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
97e728d4
JB
3318 fs_info->data_chunk_allocations++;
3319 if (!(fs_info->data_chunk_allocations %
3320 fs_info->metadata_ratio))
3321 force_metadata_allocation(fs_info);
9ed74f2d
JB
3322 }
3323
2b82032c 3324 ret = btrfs_alloc_chunk(trans, extent_root, flags);
9ed74f2d 3325 spin_lock(&space_info->lock);
9ed74f2d 3326 if (ret)
6324fbf3 3327 space_info->full = 1;
424499db
YZ
3328 else
3329 ret = 1;
9ed74f2d
JB
3330 space_info->force_alloc = 0;
3331 spin_unlock(&space_info->lock);
9ed74f2d 3332out:
c146afad 3333 mutex_unlock(&extent_root->fs_info->chunk_mutex);
0f9dd46c 3334 return ret;
6324fbf3 3335}
9ed74f2d 3336
9ed74f2d 3337/*
5da9d01b 3338 * shrink metadata reservation for delalloc
9ed74f2d 3339 */
5da9d01b 3340static int shrink_delalloc(struct btrfs_trans_handle *trans,
0019f10d 3341 struct btrfs_root *root, u64 to_reclaim, int sync)
5da9d01b 3342{
0ca1f7ce 3343 struct btrfs_block_rsv *block_rsv;
0019f10d 3344 struct btrfs_space_info *space_info;
5da9d01b
YZ
3345 u64 reserved;
3346 u64 max_reclaim;
3347 u64 reclaimed = 0;
3348 int pause = 1;
bf9022e0 3349 int nr_pages = (2 * 1024 * 1024) >> PAGE_CACHE_SHIFT;
5da9d01b 3350
0ca1f7ce 3351 block_rsv = &root->fs_info->delalloc_block_rsv;
0019f10d 3352 space_info = block_rsv->space_info;
bf9022e0
CM
3353
3354 smp_mb();
0019f10d 3355 reserved = space_info->bytes_reserved;
5da9d01b
YZ
3356
3357 if (reserved == 0)
3358 return 0;
3359
3360 max_reclaim = min(reserved, to_reclaim);
3361
3362 while (1) {
bf9022e0
CM
3363 /* have the flusher threads jump in and do some IO */
3364 smp_mb();
3365 nr_pages = min_t(unsigned long, nr_pages,
3366 root->fs_info->delalloc_bytes >> PAGE_CACHE_SHIFT);
3367 writeback_inodes_sb_nr_if_idle(root->fs_info->sb, nr_pages);
5da9d01b 3368
0019f10d
JB
3369 spin_lock(&space_info->lock);
3370 if (reserved > space_info->bytes_reserved)
3371 reclaimed += reserved - space_info->bytes_reserved;
3372 reserved = space_info->bytes_reserved;
3373 spin_unlock(&space_info->lock);
5da9d01b
YZ
3374
3375 if (reserved == 0 || reclaimed >= max_reclaim)
3376 break;
3377
3378 if (trans && trans->transaction->blocked)
3379 return -EAGAIN;
bf9022e0
CM
3380
3381 __set_current_state(TASK_INTERRUPTIBLE);
3382 schedule_timeout(pause);
3383 pause <<= 1;
3384 if (pause > HZ / 10)
3385 pause = HZ / 10;
3386
5da9d01b
YZ
3387 }
3388 return reclaimed >= to_reclaim;
3389}
3390
8bb8ab2e
JB
3391/*
3392 * Retries tells us how many times we've called reserve_metadata_bytes. The
3393 * idea is if this is the first call (retries == 0) then we will add to our
3394 * reserved count if we can't make the allocation in order to hold our place
3395 * while we go and try and free up space. That way for retries > 1 we don't try
3396 * and add space, we just check to see if the amount of unused space is >= the
3397 * total space, meaning that our reservation is valid.
3398 *
3399 * However if we don't intend to retry this reservation, pass -1 as retries so
3400 * that it short circuits this logic.
3401 */
3402static int reserve_metadata_bytes(struct btrfs_trans_handle *trans,
3403 struct btrfs_root *root,
3404 struct btrfs_block_rsv *block_rsv,
3405 u64 orig_bytes, int flush)
9ed74f2d 3406{
f0486c68 3407 struct btrfs_space_info *space_info = block_rsv->space_info;
8bb8ab2e
JB
3408 u64 unused;
3409 u64 num_bytes = orig_bytes;
3410 int retries = 0;
3411 int ret = 0;
3412 bool reserved = false;
38227933 3413 bool committed = false;
9ed74f2d 3414
8bb8ab2e
JB
3415again:
3416 ret = -ENOSPC;
3417 if (reserved)
3418 num_bytes = 0;
9ed74f2d 3419
8bb8ab2e
JB
3420 spin_lock(&space_info->lock);
3421 unused = space_info->bytes_used + space_info->bytes_reserved +
3422 space_info->bytes_pinned + space_info->bytes_readonly +
3423 space_info->bytes_may_use;
9ed74f2d 3424
8bb8ab2e
JB
3425 /*
3426 * The idea here is that we've not already over-reserved the block group
3427 * then we can go ahead and save our reservation first and then start
3428 * flushing if we need to. Otherwise if we've already overcommitted
3429 * lets start flushing stuff first and then come back and try to make
3430 * our reservation.
3431 */
3432 if (unused <= space_info->total_bytes) {
6f334348 3433 unused = space_info->total_bytes - unused;
8bb8ab2e
JB
3434 if (unused >= num_bytes) {
3435 if (!reserved)
3436 space_info->bytes_reserved += orig_bytes;
3437 ret = 0;
3438 } else {
3439 /*
3440 * Ok set num_bytes to orig_bytes since we aren't
3441 * overocmmitted, this way we only try and reclaim what
3442 * we need.
3443 */
3444 num_bytes = orig_bytes;
3445 }
3446 } else {
3447 /*
3448 * Ok we're over committed, set num_bytes to the overcommitted
3449 * amount plus the amount of bytes that we need for this
3450 * reservation.
3451 */
3452 num_bytes = unused - space_info->total_bytes +
3453 (orig_bytes * (retries + 1));
3454 }
9ed74f2d 3455
8bb8ab2e
JB
3456 /*
3457 * Couldn't make our reservation, save our place so while we're trying
3458 * to reclaim space we can actually use it instead of somebody else
3459 * stealing it from us.
3460 */
3461 if (ret && !reserved) {
3462 space_info->bytes_reserved += orig_bytes;
3463 reserved = true;
3464 }
9ed74f2d 3465
f0486c68 3466 spin_unlock(&space_info->lock);
9ed74f2d 3467
8bb8ab2e
JB
3468 if (!ret)
3469 return 0;
9ed74f2d 3470
8bb8ab2e
JB
3471 if (!flush)
3472 goto out;
f0486c68 3473
8bb8ab2e
JB
3474 /*
3475 * We do synchronous shrinking since we don't actually unreserve
3476 * metadata until after the IO is completed.
3477 */
3478 ret = shrink_delalloc(trans, root, num_bytes, 1);
3479 if (ret > 0)
3480 return 0;
3481 else if (ret < 0)
3482 goto out;
f0486c68 3483
8bb8ab2e
JB
3484 /*
3485 * So if we were overcommitted it's possible that somebody else flushed
3486 * out enough space and we simply didn't have enough space to reclaim,
3487 * so go back around and try again.
3488 */
3489 if (retries < 2) {
3490 retries++;
3491 goto again;
3492 }
f0486c68
YZ
3493
3494 spin_lock(&space_info->lock);
8bb8ab2e
JB
3495 /*
3496 * Not enough space to be reclaimed, don't bother committing the
3497 * transaction.
3498 */
3499 if (space_info->bytes_pinned < orig_bytes)
3500 ret = -ENOSPC;
3501 spin_unlock(&space_info->lock);
3502 if (ret)
3503 goto out;
f0486c68 3504
8bb8ab2e 3505 ret = -EAGAIN;
38227933 3506 if (trans || committed)
8bb8ab2e 3507 goto out;
f0486c68 3508
8bb8ab2e
JB
3509 ret = -ENOSPC;
3510 trans = btrfs_join_transaction(root, 1);
3511 if (IS_ERR(trans))
3512 goto out;
3513 ret = btrfs_commit_transaction(trans, root);
38227933
JB
3514 if (!ret) {
3515 trans = NULL;
3516 committed = true;
8bb8ab2e 3517 goto again;
38227933 3518 }
8bb8ab2e
JB
3519
3520out:
3521 if (reserved) {
3522 spin_lock(&space_info->lock);
3523 space_info->bytes_reserved -= orig_bytes;
3524 spin_unlock(&space_info->lock);
f0486c68 3525 }
4e06bdd6 3526
f0486c68
YZ
3527 return ret;
3528}
3529
3530static struct btrfs_block_rsv *get_block_rsv(struct btrfs_trans_handle *trans,
3531 struct btrfs_root *root)
3532{
3533 struct btrfs_block_rsv *block_rsv;
3534 if (root->ref_cows)
3535 block_rsv = trans->block_rsv;
3536 else
3537 block_rsv = root->block_rsv;
3538
3539 if (!block_rsv)
3540 block_rsv = &root->fs_info->empty_block_rsv;
3541
3542 return block_rsv;
3543}
3544
3545static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
3546 u64 num_bytes)
3547{
3548 int ret = -ENOSPC;
3549 spin_lock(&block_rsv->lock);
3550 if (block_rsv->reserved >= num_bytes) {
3551 block_rsv->reserved -= num_bytes;
3552 if (block_rsv->reserved < block_rsv->size)
3553 block_rsv->full = 0;
3554 ret = 0;
3555 }
3556 spin_unlock(&block_rsv->lock);
3557 return ret;
3558}
3559
3560static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
3561 u64 num_bytes, int update_size)
3562{
3563 spin_lock(&block_rsv->lock);
3564 block_rsv->reserved += num_bytes;
3565 if (update_size)
3566 block_rsv->size += num_bytes;
3567 else if (block_rsv->reserved >= block_rsv->size)
3568 block_rsv->full = 1;
3569 spin_unlock(&block_rsv->lock);
3570}
3571
3572void block_rsv_release_bytes(struct btrfs_block_rsv *block_rsv,
3573 struct btrfs_block_rsv *dest, u64 num_bytes)
3574{
3575 struct btrfs_space_info *space_info = block_rsv->space_info;
3576
3577 spin_lock(&block_rsv->lock);
3578 if (num_bytes == (u64)-1)
3579 num_bytes = block_rsv->size;
3580 block_rsv->size -= num_bytes;
3581 if (block_rsv->reserved >= block_rsv->size) {
3582 num_bytes = block_rsv->reserved - block_rsv->size;
3583 block_rsv->reserved = block_rsv->size;
3584 block_rsv->full = 1;
3585 } else {
3586 num_bytes = 0;
3587 }
3588 spin_unlock(&block_rsv->lock);
3589
3590 if (num_bytes > 0) {
3591 if (dest) {
3592 block_rsv_add_bytes(dest, num_bytes, 0);
3593 } else {
3594 spin_lock(&space_info->lock);
3595 space_info->bytes_reserved -= num_bytes;
3596 spin_unlock(&space_info->lock);
4e06bdd6 3597 }
9ed74f2d 3598 }
f0486c68 3599}
4e06bdd6 3600
f0486c68
YZ
3601static int block_rsv_migrate_bytes(struct btrfs_block_rsv *src,
3602 struct btrfs_block_rsv *dst, u64 num_bytes)
3603{
3604 int ret;
9ed74f2d 3605
f0486c68
YZ
3606 ret = block_rsv_use_bytes(src, num_bytes);
3607 if (ret)
3608 return ret;
9ed74f2d 3609
f0486c68 3610 block_rsv_add_bytes(dst, num_bytes, 1);
9ed74f2d
JB
3611 return 0;
3612}
3613
f0486c68 3614void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv)
9ed74f2d 3615{
f0486c68
YZ
3616 memset(rsv, 0, sizeof(*rsv));
3617 spin_lock_init(&rsv->lock);
3618 atomic_set(&rsv->usage, 1);
3619 rsv->priority = 6;
3620 INIT_LIST_HEAD(&rsv->list);
3621}
3622
3623struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root)
3624{
3625 struct btrfs_block_rsv *block_rsv;
3626 struct btrfs_fs_info *fs_info = root->fs_info;
9ed74f2d 3627
f0486c68
YZ
3628 block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS);
3629 if (!block_rsv)
3630 return NULL;
9ed74f2d 3631
f0486c68 3632 btrfs_init_block_rsv(block_rsv);
f0486c68
YZ
3633 block_rsv->space_info = __find_space_info(fs_info,
3634 BTRFS_BLOCK_GROUP_METADATA);
f0486c68
YZ
3635 return block_rsv;
3636}
9ed74f2d 3637
f0486c68
YZ
3638void btrfs_free_block_rsv(struct btrfs_root *root,
3639 struct btrfs_block_rsv *rsv)
3640{
3641 if (rsv && atomic_dec_and_test(&rsv->usage)) {
3642 btrfs_block_rsv_release(root, rsv, (u64)-1);
3643 if (!rsv->durable)
3644 kfree(rsv);
3645 }
9ed74f2d
JB
3646}
3647
3648/*
f0486c68
YZ
3649 * make the block_rsv struct be able to capture freed space.
3650 * the captured space will re-add to the the block_rsv struct
3651 * after transaction commit
9ed74f2d 3652 */
f0486c68
YZ
3653void btrfs_add_durable_block_rsv(struct btrfs_fs_info *fs_info,
3654 struct btrfs_block_rsv *block_rsv)
9ed74f2d 3655{
f0486c68
YZ
3656 block_rsv->durable = 1;
3657 mutex_lock(&fs_info->durable_block_rsv_mutex);
3658 list_add_tail(&block_rsv->list, &fs_info->durable_block_rsv_list);
3659 mutex_unlock(&fs_info->durable_block_rsv_mutex);
3660}
9ed74f2d 3661
f0486c68
YZ
3662int btrfs_block_rsv_add(struct btrfs_trans_handle *trans,
3663 struct btrfs_root *root,
3664 struct btrfs_block_rsv *block_rsv,
8bb8ab2e 3665 u64 num_bytes)
f0486c68
YZ
3666{
3667 int ret;
9ed74f2d 3668
f0486c68
YZ
3669 if (num_bytes == 0)
3670 return 0;
8bb8ab2e
JB
3671
3672 ret = reserve_metadata_bytes(trans, root, block_rsv, num_bytes, 1);
f0486c68
YZ
3673 if (!ret) {
3674 block_rsv_add_bytes(block_rsv, num_bytes, 1);
3675 return 0;
3676 }
9ed74f2d 3677
f0486c68
YZ
3678 return ret;
3679}
9ed74f2d 3680
f0486c68
YZ
3681int btrfs_block_rsv_check(struct btrfs_trans_handle *trans,
3682 struct btrfs_root *root,
3683 struct btrfs_block_rsv *block_rsv,
3684 u64 min_reserved, int min_factor)
3685{
3686 u64 num_bytes = 0;
3687 int commit_trans = 0;
3688 int ret = -ENOSPC;
9ed74f2d 3689
f0486c68
YZ
3690 if (!block_rsv)
3691 return 0;
9ed74f2d 3692
f0486c68
YZ
3693 spin_lock(&block_rsv->lock);
3694 if (min_factor > 0)
3695 num_bytes = div_factor(block_rsv->size, min_factor);
3696 if (min_reserved > num_bytes)
3697 num_bytes = min_reserved;
9ed74f2d 3698
f0486c68
YZ
3699 if (block_rsv->reserved >= num_bytes) {
3700 ret = 0;
3701 } else {
3702 num_bytes -= block_rsv->reserved;
3703 if (block_rsv->durable &&
3704 block_rsv->freed[0] + block_rsv->freed[1] >= num_bytes)
3705 commit_trans = 1;
3706 }
3707 spin_unlock(&block_rsv->lock);
3708 if (!ret)
3709 return 0;
3710
3711 if (block_rsv->refill_used) {
8bb8ab2e
JB
3712 ret = reserve_metadata_bytes(trans, root, block_rsv,
3713 num_bytes, 0);
f0486c68
YZ
3714 if (!ret) {
3715 block_rsv_add_bytes(block_rsv, num_bytes, 0);
3716 return 0;
4e06bdd6 3717 }
f0486c68 3718 }
9ed74f2d 3719
f0486c68
YZ
3720 if (commit_trans) {
3721 if (trans)
3722 return -EAGAIN;
3723
3724 trans = btrfs_join_transaction(root, 1);
3725 BUG_ON(IS_ERR(trans));
3726 ret = btrfs_commit_transaction(trans, root);
3727 return 0;
6a63209f 3728 }
9ed74f2d 3729
f0486c68
YZ
3730 WARN_ON(1);
3731 printk(KERN_INFO"block_rsv size %llu reserved %llu freed %llu %llu\n",
3732 block_rsv->size, block_rsv->reserved,
3733 block_rsv->freed[0], block_rsv->freed[1]);
6a63209f 3734
f0486c68
YZ
3735 return -ENOSPC;
3736}
3737
3738int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
3739 struct btrfs_block_rsv *dst_rsv,
3740 u64 num_bytes)
3741{
3742 return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
3743}
3744
3745void btrfs_block_rsv_release(struct btrfs_root *root,
3746 struct btrfs_block_rsv *block_rsv,
3747 u64 num_bytes)
3748{
3749 struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
3750 if (global_rsv->full || global_rsv == block_rsv ||
3751 block_rsv->space_info != global_rsv->space_info)
3752 global_rsv = NULL;
3753 block_rsv_release_bytes(block_rsv, global_rsv, num_bytes);
6a63209f
JB
3754}
3755
3756/*
8929ecfa
YZ
3757 * helper to calculate size of global block reservation.
3758 * the desired value is sum of space used by extent tree,
3759 * checksum tree and root tree
6a63209f 3760 */
8929ecfa 3761static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info)
6a63209f 3762{
8929ecfa
YZ
3763 struct btrfs_space_info *sinfo;
3764 u64 num_bytes;
3765 u64 meta_used;
3766 u64 data_used;
3767 int csum_size = btrfs_super_csum_size(&fs_info->super_copy);
3768#if 0
3769 /*
3770 * per tree used space accounting can be inaccuracy, so we
3771 * can't rely on it.
3772 */
3773 spin_lock(&fs_info->extent_root->accounting_lock);
3774 num_bytes = btrfs_root_used(&fs_info->extent_root->root_item);
3775 spin_unlock(&fs_info->extent_root->accounting_lock);
6a63209f 3776
8929ecfa
YZ
3777 spin_lock(&fs_info->csum_root->accounting_lock);
3778 num_bytes += btrfs_root_used(&fs_info->csum_root->root_item);
3779 spin_unlock(&fs_info->csum_root->accounting_lock);
6a63209f 3780
8929ecfa
YZ
3781 spin_lock(&fs_info->tree_root->accounting_lock);
3782 num_bytes += btrfs_root_used(&fs_info->tree_root->root_item);
3783 spin_unlock(&fs_info->tree_root->accounting_lock);
3784#endif
3785 sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
3786 spin_lock(&sinfo->lock);
3787 data_used = sinfo->bytes_used;
3788 spin_unlock(&sinfo->lock);
33b4d47f 3789
8929ecfa
YZ
3790 sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
3791 spin_lock(&sinfo->lock);
6d48755d
JB
3792 if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA)
3793 data_used = 0;
8929ecfa
YZ
3794 meta_used = sinfo->bytes_used;
3795 spin_unlock(&sinfo->lock);
ab6e2410 3796
8929ecfa
YZ
3797 num_bytes = (data_used >> fs_info->sb->s_blocksize_bits) *
3798 csum_size * 2;
3799 num_bytes += div64_u64(data_used + meta_used, 50);
4e06bdd6 3800
8929ecfa
YZ
3801 if (num_bytes * 3 > meta_used)
3802 num_bytes = div64_u64(meta_used, 3);
ab6e2410 3803
8929ecfa
YZ
3804 return ALIGN(num_bytes, fs_info->extent_root->leafsize << 10);
3805}
6a63209f 3806
8929ecfa
YZ
3807static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
3808{
3809 struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
3810 struct btrfs_space_info *sinfo = block_rsv->space_info;
3811 u64 num_bytes;
6a63209f 3812
8929ecfa 3813 num_bytes = calc_global_metadata_size(fs_info);
33b4d47f 3814
8929ecfa
YZ
3815 spin_lock(&block_rsv->lock);
3816 spin_lock(&sinfo->lock);
4e06bdd6 3817
8929ecfa 3818 block_rsv->size = num_bytes;
4e06bdd6 3819
8929ecfa 3820 num_bytes = sinfo->bytes_used + sinfo->bytes_pinned +
6d48755d
JB
3821 sinfo->bytes_reserved + sinfo->bytes_readonly +
3822 sinfo->bytes_may_use;
8929ecfa
YZ
3823
3824 if (sinfo->total_bytes > num_bytes) {
3825 num_bytes = sinfo->total_bytes - num_bytes;
3826 block_rsv->reserved += num_bytes;
3827 sinfo->bytes_reserved += num_bytes;
6a63209f 3828 }
6a63209f 3829
8929ecfa
YZ
3830 if (block_rsv->reserved >= block_rsv->size) {
3831 num_bytes = block_rsv->reserved - block_rsv->size;
3832 sinfo->bytes_reserved -= num_bytes;
3833 block_rsv->reserved = block_rsv->size;
3834 block_rsv->full = 1;
3835 }
3836#if 0
3837 printk(KERN_INFO"global block rsv size %llu reserved %llu\n",
3838 block_rsv->size, block_rsv->reserved);
3839#endif
3840 spin_unlock(&sinfo->lock);
3841 spin_unlock(&block_rsv->lock);
6a63209f
JB
3842}
3843
f0486c68 3844static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
6a63209f 3845{
f0486c68 3846 struct btrfs_space_info *space_info;
6a63209f 3847
f0486c68
YZ
3848 space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
3849 fs_info->chunk_block_rsv.space_info = space_info;
3850 fs_info->chunk_block_rsv.priority = 10;
6a63209f 3851
f0486c68 3852 space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
8929ecfa
YZ
3853 fs_info->global_block_rsv.space_info = space_info;
3854 fs_info->global_block_rsv.priority = 10;
3855 fs_info->global_block_rsv.refill_used = 1;
3856 fs_info->delalloc_block_rsv.space_info = space_info;
f0486c68
YZ
3857 fs_info->trans_block_rsv.space_info = space_info;
3858 fs_info->empty_block_rsv.space_info = space_info;
3859 fs_info->empty_block_rsv.priority = 10;
3860
8929ecfa
YZ
3861 fs_info->extent_root->block_rsv = &fs_info->global_block_rsv;
3862 fs_info->csum_root->block_rsv = &fs_info->global_block_rsv;
3863 fs_info->dev_root->block_rsv = &fs_info->global_block_rsv;
3864 fs_info->tree_root->block_rsv = &fs_info->global_block_rsv;
f0486c68 3865 fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv;
8929ecfa
YZ
3866
3867 btrfs_add_durable_block_rsv(fs_info, &fs_info->global_block_rsv);
3868
3869 btrfs_add_durable_block_rsv(fs_info, &fs_info->delalloc_block_rsv);
3870
3871 update_global_block_rsv(fs_info);
6a63209f
JB
3872}
3873
8929ecfa 3874static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
6a63209f 3875{
8929ecfa
YZ
3876 block_rsv_release_bytes(&fs_info->global_block_rsv, NULL, (u64)-1);
3877 WARN_ON(fs_info->delalloc_block_rsv.size > 0);
3878 WARN_ON(fs_info->delalloc_block_rsv.reserved > 0);
3879 WARN_ON(fs_info->trans_block_rsv.size > 0);
3880 WARN_ON(fs_info->trans_block_rsv.reserved > 0);
3881 WARN_ON(fs_info->chunk_block_rsv.size > 0);
3882 WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
f0486c68 3883}
6a63209f 3884
a22285a6
YZ
3885static u64 calc_trans_metadata_size(struct btrfs_root *root, int num_items)
3886{
3887 return (root->leafsize + root->nodesize * (BTRFS_MAX_LEVEL - 1)) *
3888 3 * num_items;
3889}
6a63209f 3890
a22285a6
YZ
3891int btrfs_trans_reserve_metadata(struct btrfs_trans_handle *trans,
3892 struct btrfs_root *root,
8bb8ab2e 3893 int num_items)
a22285a6
YZ
3894{
3895 u64 num_bytes;
3896 int ret;
6a63209f 3897
a22285a6
YZ
3898 if (num_items == 0 || root->fs_info->chunk_root == root)
3899 return 0;
6a63209f 3900
a22285a6
YZ
3901 num_bytes = calc_trans_metadata_size(root, num_items);
3902 ret = btrfs_block_rsv_add(trans, root, &root->fs_info->trans_block_rsv,
8bb8ab2e 3903 num_bytes);
a22285a6
YZ
3904 if (!ret) {
3905 trans->bytes_reserved += num_bytes;
3906 trans->block_rsv = &root->fs_info->trans_block_rsv;
3907 }
3908 return ret;
6a63209f
JB
3909}
3910
a22285a6
YZ
3911void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
3912 struct btrfs_root *root)
6a63209f 3913{
a22285a6
YZ
3914 if (!trans->bytes_reserved)
3915 return;
6a63209f 3916
a22285a6
YZ
3917 BUG_ON(trans->block_rsv != &root->fs_info->trans_block_rsv);
3918 btrfs_block_rsv_release(root, trans->block_rsv,
3919 trans->bytes_reserved);
3920 trans->bytes_reserved = 0;
3921}
6a63209f 3922
d68fc57b
YZ
3923int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
3924 struct inode *inode)
3925{
3926 struct btrfs_root *root = BTRFS_I(inode)->root;
3927 struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
3928 struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv;
3929
3930 /*
3931 * one for deleting orphan item, one for updating inode and
3932 * two for calling btrfs_truncate_inode_items.
3933 *
3934 * btrfs_truncate_inode_items is a delete operation, it frees
3935 * more space than it uses in most cases. So two units of
3936 * metadata space should be enough for calling it many times.
3937 * If all of the metadata space is used, we can commit
3938 * transaction and use space it freed.
3939 */
3940 u64 num_bytes = calc_trans_metadata_size(root, 4);
3941 return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
6a63209f
JB
3942}
3943
d68fc57b 3944void btrfs_orphan_release_metadata(struct inode *inode)
97e728d4 3945{
d68fc57b
YZ
3946 struct btrfs_root *root = BTRFS_I(inode)->root;
3947 u64 num_bytes = calc_trans_metadata_size(root, 4);
3948 btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes);
3949}
97e728d4 3950
a22285a6
YZ
3951int btrfs_snap_reserve_metadata(struct btrfs_trans_handle *trans,
3952 struct btrfs_pending_snapshot *pending)
3953{
3954 struct btrfs_root *root = pending->root;
3955 struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
3956 struct btrfs_block_rsv *dst_rsv = &pending->block_rsv;
3957 /*
3958 * two for root back/forward refs, two for directory entries
3959 * and one for root of the snapshot.
3960 */
3961 u64 num_bytes = calc_trans_metadata_size(root, 5);
3962 dst_rsv->space_info = src_rsv->space_info;
3963 return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
97e728d4
JB
3964}
3965
0ca1f7ce 3966static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes)
6324fbf3 3967{
0ca1f7ce
YZ
3968 return num_bytes >>= 3;
3969}
c146afad 3970
0ca1f7ce
YZ
3971int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
3972{
3973 struct btrfs_root *root = BTRFS_I(inode)->root;
3974 struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
3975 u64 to_reserve;
3976 int nr_extents;
0ca1f7ce 3977 int ret;
6324fbf3 3978
0ca1f7ce
YZ
3979 if (btrfs_transaction_in_commit(root->fs_info))
3980 schedule_timeout(1);
ec44a35c 3981
0ca1f7ce 3982 num_bytes = ALIGN(num_bytes, root->sectorsize);
8bb8ab2e 3983
0ca1f7ce
YZ
3984 spin_lock(&BTRFS_I(inode)->accounting_lock);
3985 nr_extents = atomic_read(&BTRFS_I(inode)->outstanding_extents) + 1;
3986 if (nr_extents > BTRFS_I(inode)->reserved_extents) {
3987 nr_extents -= BTRFS_I(inode)->reserved_extents;
3988 to_reserve = calc_trans_metadata_size(root, nr_extents);
3989 } else {
3990 nr_extents = 0;
3991 to_reserve = 0;
593060d7 3992 }
8bb8ab2e 3993 spin_unlock(&BTRFS_I(inode)->accounting_lock);
6324fbf3 3994
0ca1f7ce 3995 to_reserve += calc_csum_metadata_size(inode, num_bytes);
8bb8ab2e
JB
3996 ret = reserve_metadata_bytes(NULL, root, block_rsv, to_reserve, 1);
3997 if (ret)
0ca1f7ce 3998 return ret;
6324fbf3 3999
8bb8ab2e 4000 spin_lock(&BTRFS_I(inode)->accounting_lock);
0ca1f7ce
YZ
4001 BTRFS_I(inode)->reserved_extents += nr_extents;
4002 atomic_inc(&BTRFS_I(inode)->outstanding_extents);
4003 spin_unlock(&BTRFS_I(inode)->accounting_lock);
25179201 4004
0ca1f7ce
YZ
4005 block_rsv_add_bytes(block_rsv, to_reserve, 1);
4006
4007 if (block_rsv->size > 512 * 1024 * 1024)
0019f10d 4008 shrink_delalloc(NULL, root, to_reserve, 0);
0ca1f7ce
YZ
4009
4010 return 0;
4011}
4012
4013void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
4014{
4015 struct btrfs_root *root = BTRFS_I(inode)->root;
4016 u64 to_free;
4017 int nr_extents;
4018
4019 num_bytes = ALIGN(num_bytes, root->sectorsize);
4020 atomic_dec(&BTRFS_I(inode)->outstanding_extents);
4021
4022 spin_lock(&BTRFS_I(inode)->accounting_lock);
4023 nr_extents = atomic_read(&BTRFS_I(inode)->outstanding_extents);
4024 if (nr_extents < BTRFS_I(inode)->reserved_extents) {
4025 nr_extents = BTRFS_I(inode)->reserved_extents - nr_extents;
4026 BTRFS_I(inode)->reserved_extents -= nr_extents;
4027 } else {
4028 nr_extents = 0;
97e728d4 4029 }
0ca1f7ce 4030 spin_unlock(&BTRFS_I(inode)->accounting_lock);
97e728d4 4031
0ca1f7ce
YZ
4032 to_free = calc_csum_metadata_size(inode, num_bytes);
4033 if (nr_extents > 0)
4034 to_free += calc_trans_metadata_size(root, nr_extents);
4035
4036 btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv,
4037 to_free);
4038}
4039
4040int btrfs_delalloc_reserve_space(struct inode *inode, u64 num_bytes)
4041{
4042 int ret;
4043
4044 ret = btrfs_check_data_free_space(inode, num_bytes);
d397712b 4045 if (ret)
0ca1f7ce
YZ
4046 return ret;
4047
4048 ret = btrfs_delalloc_reserve_metadata(inode, num_bytes);
4049 if (ret) {
4050 btrfs_free_reserved_data_space(inode, num_bytes);
4051 return ret;
4052 }
4053
4054 return 0;
4055}
4056
4057void btrfs_delalloc_release_space(struct inode *inode, u64 num_bytes)
4058{
4059 btrfs_delalloc_release_metadata(inode, num_bytes);
4060 btrfs_free_reserved_data_space(inode, num_bytes);
6324fbf3
CM
4061}
4062
9078a3e1
CM
4063static int update_block_group(struct btrfs_trans_handle *trans,
4064 struct btrfs_root *root,
f0486c68 4065 u64 bytenr, u64 num_bytes, int alloc)
9078a3e1 4066{
0af3d00b 4067 struct btrfs_block_group_cache *cache = NULL;
9078a3e1 4068 struct btrfs_fs_info *info = root->fs_info;
db94535d 4069 u64 total = num_bytes;
9078a3e1 4070 u64 old_val;
db94535d 4071 u64 byte_in_group;
0af3d00b 4072 int factor;
3e1ad54f 4073
5d4f98a2
YZ
4074 /* block accounting for super block */
4075 spin_lock(&info->delalloc_lock);
4076 old_val = btrfs_super_bytes_used(&info->super_copy);
4077 if (alloc)
4078 old_val += num_bytes;
4079 else
4080 old_val -= num_bytes;
4081 btrfs_set_super_bytes_used(&info->super_copy, old_val);
5d4f98a2
YZ
4082 spin_unlock(&info->delalloc_lock);
4083
d397712b 4084 while (total) {
db94535d 4085 cache = btrfs_lookup_block_group(info, bytenr);
f3465ca4 4086 if (!cache)
9078a3e1 4087 return -1;
b742bb82
YZ
4088 if (cache->flags & (BTRFS_BLOCK_GROUP_DUP |
4089 BTRFS_BLOCK_GROUP_RAID1 |
4090 BTRFS_BLOCK_GROUP_RAID10))
4091 factor = 2;
4092 else
4093 factor = 1;
9d66e233
JB
4094 /*
4095 * If this block group has free space cache written out, we
4096 * need to make sure to load it if we are removing space. This
4097 * is because we need the unpinning stage to actually add the
4098 * space back to the block group, otherwise we will leak space.
4099 */
4100 if (!alloc && cache->cached == BTRFS_CACHE_NO)
b8399dee 4101 cache_block_group(cache, trans, NULL, 1);
0af3d00b 4102
db94535d
CM
4103 byte_in_group = bytenr - cache->key.objectid;
4104 WARN_ON(byte_in_group > cache->key.offset);
9078a3e1 4105
25179201 4106 spin_lock(&cache->space_info->lock);
c286ac48 4107 spin_lock(&cache->lock);
0af3d00b
JB
4108
4109 if (btrfs_super_cache_generation(&info->super_copy) != 0 &&
4110 cache->disk_cache_state < BTRFS_DC_CLEAR)
4111 cache->disk_cache_state = BTRFS_DC_CLEAR;
4112
0f9dd46c 4113 cache->dirty = 1;
9078a3e1 4114 old_val = btrfs_block_group_used(&cache->item);
db94535d 4115 num_bytes = min(total, cache->key.offset - byte_in_group);
cd1bc465 4116 if (alloc) {
db94535d 4117 old_val += num_bytes;
11833d66
YZ
4118 btrfs_set_block_group_used(&cache->item, old_val);
4119 cache->reserved -= num_bytes;
11833d66 4120 cache->space_info->bytes_reserved -= num_bytes;
b742bb82
YZ
4121 cache->space_info->bytes_used += num_bytes;
4122 cache->space_info->disk_used += num_bytes * factor;
c286ac48 4123 spin_unlock(&cache->lock);
25179201 4124 spin_unlock(&cache->space_info->lock);
cd1bc465 4125 } else {
db94535d 4126 old_val -= num_bytes;
c286ac48 4127 btrfs_set_block_group_used(&cache->item, old_val);
f0486c68
YZ
4128 cache->pinned += num_bytes;
4129 cache->space_info->bytes_pinned += num_bytes;
6324fbf3 4130 cache->space_info->bytes_used -= num_bytes;
b742bb82 4131 cache->space_info->disk_used -= num_bytes * factor;
c286ac48 4132 spin_unlock(&cache->lock);
25179201 4133 spin_unlock(&cache->space_info->lock);
1f3c79a2 4134
f0486c68
YZ
4135 set_extent_dirty(info->pinned_extents,
4136 bytenr, bytenr + num_bytes - 1,
4137 GFP_NOFS | __GFP_NOFAIL);
cd1bc465 4138 }
fa9c0d79 4139 btrfs_put_block_group(cache);
db94535d
CM
4140 total -= num_bytes;
4141 bytenr += num_bytes;
9078a3e1
CM
4142 }
4143 return 0;
4144}
6324fbf3 4145
a061fc8d
CM
4146static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
4147{
0f9dd46c 4148 struct btrfs_block_group_cache *cache;
d2fb3437 4149 u64 bytenr;
0f9dd46c
JB
4150
4151 cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
4152 if (!cache)
a061fc8d 4153 return 0;
0f9dd46c 4154
d2fb3437 4155 bytenr = cache->key.objectid;
fa9c0d79 4156 btrfs_put_block_group(cache);
d2fb3437
YZ
4157
4158 return bytenr;
a061fc8d
CM
4159}
4160
f0486c68
YZ
4161static int pin_down_extent(struct btrfs_root *root,
4162 struct btrfs_block_group_cache *cache,
4163 u64 bytenr, u64 num_bytes, int reserved)
324ae4df 4164{
11833d66
YZ
4165 spin_lock(&cache->space_info->lock);
4166 spin_lock(&cache->lock);
4167 cache->pinned += num_bytes;
4168 cache->space_info->bytes_pinned += num_bytes;
4169 if (reserved) {
4170 cache->reserved -= num_bytes;
4171 cache->space_info->bytes_reserved -= num_bytes;
4172 }
4173 spin_unlock(&cache->lock);
4174 spin_unlock(&cache->space_info->lock);
68b38550 4175
f0486c68
YZ
4176 set_extent_dirty(root->fs_info->pinned_extents, bytenr,
4177 bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
4178 return 0;
4179}
68b38550 4180
f0486c68
YZ
4181/*
4182 * this function must be called within transaction
4183 */
4184int btrfs_pin_extent(struct btrfs_root *root,
4185 u64 bytenr, u64 num_bytes, int reserved)
4186{
4187 struct btrfs_block_group_cache *cache;
68b38550 4188
f0486c68
YZ
4189 cache = btrfs_lookup_block_group(root->fs_info, bytenr);
4190 BUG_ON(!cache);
4191
4192 pin_down_extent(root, cache, bytenr, num_bytes, reserved);
4193
4194 btrfs_put_block_group(cache);
11833d66
YZ
4195 return 0;
4196}
4197
f0486c68
YZ
4198/*
4199 * update size of reserved extents. this function may return -EAGAIN
4200 * if 'reserve' is true or 'sinfo' is false.
4201 */
4202static int update_reserved_bytes(struct btrfs_block_group_cache *cache,
4203 u64 num_bytes, int reserve, int sinfo)
11833d66 4204{
f0486c68
YZ
4205 int ret = 0;
4206 if (sinfo) {
4207 struct btrfs_space_info *space_info = cache->space_info;
4208 spin_lock(&space_info->lock);
4209 spin_lock(&cache->lock);
4210 if (reserve) {
4211 if (cache->ro) {
4212 ret = -EAGAIN;
4213 } else {
4214 cache->reserved += num_bytes;
4215 space_info->bytes_reserved += num_bytes;
4216 }
4217 } else {
4218 if (cache->ro)
4219 space_info->bytes_readonly += num_bytes;
4220 cache->reserved -= num_bytes;
4221 space_info->bytes_reserved -= num_bytes;
4222 }
4223 spin_unlock(&cache->lock);
4224 spin_unlock(&space_info->lock);
11833d66 4225 } else {
f0486c68
YZ
4226 spin_lock(&cache->lock);
4227 if (cache->ro) {
4228 ret = -EAGAIN;
4229 } else {
4230 if (reserve)
4231 cache->reserved += num_bytes;
4232 else
4233 cache->reserved -= num_bytes;
4234 }
4235 spin_unlock(&cache->lock);
324ae4df 4236 }
f0486c68 4237 return ret;
324ae4df 4238}
9078a3e1 4239
11833d66
YZ
4240int btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
4241 struct btrfs_root *root)
e8569813 4242{
e8569813 4243 struct btrfs_fs_info *fs_info = root->fs_info;
11833d66
YZ
4244 struct btrfs_caching_control *next;
4245 struct btrfs_caching_control *caching_ctl;
4246 struct btrfs_block_group_cache *cache;
e8569813 4247
11833d66 4248 down_write(&fs_info->extent_commit_sem);
25179201 4249
11833d66
YZ
4250 list_for_each_entry_safe(caching_ctl, next,
4251 &fs_info->caching_block_groups, list) {
4252 cache = caching_ctl->block_group;
4253 if (block_group_cache_done(cache)) {
4254 cache->last_byte_to_unpin = (u64)-1;
4255 list_del_init(&caching_ctl->list);
4256 put_caching_control(caching_ctl);
e8569813 4257 } else {
11833d66 4258 cache->last_byte_to_unpin = caching_ctl->progress;
e8569813 4259 }
e8569813 4260 }
11833d66
YZ
4261
4262 if (fs_info->pinned_extents == &fs_info->freed_extents[0])
4263 fs_info->pinned_extents = &fs_info->freed_extents[1];
4264 else
4265 fs_info->pinned_extents = &fs_info->freed_extents[0];
4266
4267 up_write(&fs_info->extent_commit_sem);
8929ecfa
YZ
4268
4269 update_global_block_rsv(fs_info);
e8569813
ZY
4270 return 0;
4271}
4272
11833d66 4273static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
ccd467d6 4274{
11833d66
YZ
4275 struct btrfs_fs_info *fs_info = root->fs_info;
4276 struct btrfs_block_group_cache *cache = NULL;
4277 u64 len;
ccd467d6 4278
11833d66
YZ
4279 while (start <= end) {
4280 if (!cache ||
4281 start >= cache->key.objectid + cache->key.offset) {
4282 if (cache)
4283 btrfs_put_block_group(cache);
4284 cache = btrfs_lookup_block_group(fs_info, start);
4285 BUG_ON(!cache);
4286 }
4287
4288 len = cache->key.objectid + cache->key.offset - start;
4289 len = min(len, end + 1 - start);
4290
4291 if (start < cache->last_byte_to_unpin) {
4292 len = min(len, cache->last_byte_to_unpin - start);
4293 btrfs_add_free_space(cache, start, len);
4294 }
4295
f0486c68
YZ
4296 start += len;
4297
11833d66
YZ
4298 spin_lock(&cache->space_info->lock);
4299 spin_lock(&cache->lock);
4300 cache->pinned -= len;
4301 cache->space_info->bytes_pinned -= len;
f0486c68
YZ
4302 if (cache->ro) {
4303 cache->space_info->bytes_readonly += len;
4304 } else if (cache->reserved_pinned > 0) {
4305 len = min(len, cache->reserved_pinned);
4306 cache->reserved_pinned -= len;
4307 cache->space_info->bytes_reserved += len;
4308 }
11833d66
YZ
4309 spin_unlock(&cache->lock);
4310 spin_unlock(&cache->space_info->lock);
ccd467d6 4311 }
11833d66
YZ
4312
4313 if (cache)
4314 btrfs_put_block_group(cache);
ccd467d6
CM
4315 return 0;
4316}
4317
4318int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
11833d66 4319 struct btrfs_root *root)
a28ec197 4320{
11833d66
YZ
4321 struct btrfs_fs_info *fs_info = root->fs_info;
4322 struct extent_io_tree *unpin;
f0486c68
YZ
4323 struct btrfs_block_rsv *block_rsv;
4324 struct btrfs_block_rsv *next_rsv;
1a5bc167
CM
4325 u64 start;
4326 u64 end;
f0486c68 4327 int idx;
a28ec197 4328 int ret;
a28ec197 4329
11833d66
YZ
4330 if (fs_info->pinned_extents == &fs_info->freed_extents[0])
4331 unpin = &fs_info->freed_extents[1];
4332 else
4333 unpin = &fs_info->freed_extents[0];
4334
d397712b 4335 while (1) {
1a5bc167
CM
4336 ret = find_first_extent_bit(unpin, 0, &start, &end,
4337 EXTENT_DIRTY);
4338 if (ret)
a28ec197 4339 break;
1f3c79a2
LH
4340
4341 ret = btrfs_discard_extent(root, start, end + 1 - start);
4342
1a5bc167 4343 clear_extent_dirty(unpin, start, end, GFP_NOFS);
11833d66 4344 unpin_extent_range(root, start, end);
b9473439 4345 cond_resched();
a28ec197 4346 }
817d52f8 4347
f0486c68
YZ
4348 mutex_lock(&fs_info->durable_block_rsv_mutex);
4349 list_for_each_entry_safe(block_rsv, next_rsv,
4350 &fs_info->durable_block_rsv_list, list) {
444528b3 4351
f0486c68
YZ
4352 idx = trans->transid & 0x1;
4353 if (block_rsv->freed[idx] > 0) {
4354 block_rsv_add_bytes(block_rsv,
4355 block_rsv->freed[idx], 0);
4356 block_rsv->freed[idx] = 0;
4357 }
4358 if (atomic_read(&block_rsv->usage) == 0) {
4359 btrfs_block_rsv_release(root, block_rsv, (u64)-1);
31840ae1 4360
f0486c68
YZ
4361 if (block_rsv->freed[0] == 0 &&
4362 block_rsv->freed[1] == 0) {
4363 list_del_init(&block_rsv->list);
4364 kfree(block_rsv);
4365 }
4366 } else {
4367 btrfs_block_rsv_release(root, block_rsv, 0);
8ef97622 4368 }
f4b9aa8d 4369 }
f0486c68 4370 mutex_unlock(&fs_info->durable_block_rsv_mutex);
31840ae1 4371
e20d96d6
CM
4372 return 0;
4373}
4374
5d4f98a2
YZ
4375static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
4376 struct btrfs_root *root,
4377 u64 bytenr, u64 num_bytes, u64 parent,
4378 u64 root_objectid, u64 owner_objectid,
4379 u64 owner_offset, int refs_to_drop,
4380 struct btrfs_delayed_extent_op *extent_op)
a28ec197 4381{
e2fa7227 4382 struct btrfs_key key;
5d4f98a2 4383 struct btrfs_path *path;
1261ec42
CM
4384 struct btrfs_fs_info *info = root->fs_info;
4385 struct btrfs_root *extent_root = info->extent_root;
5f39d397 4386 struct extent_buffer *leaf;
5d4f98a2
YZ
4387 struct btrfs_extent_item *ei;
4388 struct btrfs_extent_inline_ref *iref;
a28ec197 4389 int ret;
5d4f98a2 4390 int is_data;
952fccac
CM
4391 int extent_slot = 0;
4392 int found_extent = 0;
4393 int num_to_del = 1;
5d4f98a2
YZ
4394 u32 item_size;
4395 u64 refs;
037e6390 4396
5caf2a00 4397 path = btrfs_alloc_path();
54aa1f4d
CM
4398 if (!path)
4399 return -ENOMEM;
5f26f772 4400
3c12ac72 4401 path->reada = 1;
b9473439 4402 path->leave_spinning = 1;
5d4f98a2
YZ
4403
4404 is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
4405 BUG_ON(!is_data && refs_to_drop != 1);
4406
4407 ret = lookup_extent_backref(trans, extent_root, path, &iref,
4408 bytenr, num_bytes, parent,
4409 root_objectid, owner_objectid,
4410 owner_offset);
7bb86316 4411 if (ret == 0) {
952fccac 4412 extent_slot = path->slots[0];
5d4f98a2
YZ
4413 while (extent_slot >= 0) {
4414 btrfs_item_key_to_cpu(path->nodes[0], &key,
952fccac 4415 extent_slot);
5d4f98a2 4416 if (key.objectid != bytenr)
952fccac 4417 break;
5d4f98a2
YZ
4418 if (key.type == BTRFS_EXTENT_ITEM_KEY &&
4419 key.offset == num_bytes) {
952fccac
CM
4420 found_extent = 1;
4421 break;
4422 }
4423 if (path->slots[0] - extent_slot > 5)
4424 break;
5d4f98a2 4425 extent_slot--;
952fccac 4426 }
5d4f98a2
YZ
4427#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
4428 item_size = btrfs_item_size_nr(path->nodes[0], extent_slot);
4429 if (found_extent && item_size < sizeof(*ei))
4430 found_extent = 0;
4431#endif
31840ae1 4432 if (!found_extent) {
5d4f98a2 4433 BUG_ON(iref);
56bec294 4434 ret = remove_extent_backref(trans, extent_root, path,
5d4f98a2
YZ
4435 NULL, refs_to_drop,
4436 is_data);
31840ae1
ZY
4437 BUG_ON(ret);
4438 btrfs_release_path(extent_root, path);
b9473439 4439 path->leave_spinning = 1;
5d4f98a2
YZ
4440
4441 key.objectid = bytenr;
4442 key.type = BTRFS_EXTENT_ITEM_KEY;
4443 key.offset = num_bytes;
4444
31840ae1
ZY
4445 ret = btrfs_search_slot(trans, extent_root,
4446 &key, path, -1, 1);
f3465ca4
JB
4447 if (ret) {
4448 printk(KERN_ERR "umm, got %d back from search"
d397712b
CM
4449 ", was looking for %llu\n", ret,
4450 (unsigned long long)bytenr);
f3465ca4
JB
4451 btrfs_print_leaf(extent_root, path->nodes[0]);
4452 }
31840ae1
ZY
4453 BUG_ON(ret);
4454 extent_slot = path->slots[0];
4455 }
7bb86316
CM
4456 } else {
4457 btrfs_print_leaf(extent_root, path->nodes[0]);
4458 WARN_ON(1);
d397712b 4459 printk(KERN_ERR "btrfs unable to find ref byte nr %llu "
5d4f98a2 4460 "parent %llu root %llu owner %llu offset %llu\n",
d397712b 4461 (unsigned long long)bytenr,
56bec294 4462 (unsigned long long)parent,
d397712b 4463 (unsigned long long)root_objectid,
5d4f98a2
YZ
4464 (unsigned long long)owner_objectid,
4465 (unsigned long long)owner_offset);
7bb86316 4466 }
5f39d397
CM
4467
4468 leaf = path->nodes[0];
5d4f98a2
YZ
4469 item_size = btrfs_item_size_nr(leaf, extent_slot);
4470#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
4471 if (item_size < sizeof(*ei)) {
4472 BUG_ON(found_extent || extent_slot != path->slots[0]);
4473 ret = convert_extent_item_v0(trans, extent_root, path,
4474 owner_objectid, 0);
4475 BUG_ON(ret < 0);
4476
4477 btrfs_release_path(extent_root, path);
4478 path->leave_spinning = 1;
4479
4480 key.objectid = bytenr;
4481 key.type = BTRFS_EXTENT_ITEM_KEY;
4482 key.offset = num_bytes;
4483
4484 ret = btrfs_search_slot(trans, extent_root, &key, path,
4485 -1, 1);
4486 if (ret) {
4487 printk(KERN_ERR "umm, got %d back from search"
4488 ", was looking for %llu\n", ret,
4489 (unsigned long long)bytenr);
4490 btrfs_print_leaf(extent_root, path->nodes[0]);
4491 }
4492 BUG_ON(ret);
4493 extent_slot = path->slots[0];
4494 leaf = path->nodes[0];
4495 item_size = btrfs_item_size_nr(leaf, extent_slot);
4496 }
4497#endif
4498 BUG_ON(item_size < sizeof(*ei));
952fccac 4499 ei = btrfs_item_ptr(leaf, extent_slot,
123abc88 4500 struct btrfs_extent_item);
5d4f98a2
YZ
4501 if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID) {
4502 struct btrfs_tree_block_info *bi;
4503 BUG_ON(item_size < sizeof(*ei) + sizeof(*bi));
4504 bi = (struct btrfs_tree_block_info *)(ei + 1);
4505 WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
4506 }
56bec294 4507
5d4f98a2 4508 refs = btrfs_extent_refs(leaf, ei);
56bec294
CM
4509 BUG_ON(refs < refs_to_drop);
4510 refs -= refs_to_drop;
5f39d397 4511
5d4f98a2
YZ
4512 if (refs > 0) {
4513 if (extent_op)
4514 __run_delayed_extent_op(extent_op, leaf, ei);
4515 /*
4516 * In the case of inline back ref, reference count will
4517 * be updated by remove_extent_backref
952fccac 4518 */
5d4f98a2
YZ
4519 if (iref) {
4520 BUG_ON(!found_extent);
4521 } else {
4522 btrfs_set_extent_refs(leaf, ei, refs);
4523 btrfs_mark_buffer_dirty(leaf);
4524 }
4525 if (found_extent) {
4526 ret = remove_extent_backref(trans, extent_root, path,
4527 iref, refs_to_drop,
4528 is_data);
952fccac
CM
4529 BUG_ON(ret);
4530 }
5d4f98a2 4531 } else {
5d4f98a2
YZ
4532 if (found_extent) {
4533 BUG_ON(is_data && refs_to_drop !=
4534 extent_data_ref_count(root, path, iref));
4535 if (iref) {
4536 BUG_ON(path->slots[0] != extent_slot);
4537 } else {
4538 BUG_ON(path->slots[0] != extent_slot + 1);
4539 path->slots[0] = extent_slot;
4540 num_to_del = 2;
4541 }
78fae27e 4542 }
b9473439 4543
952fccac
CM
4544 ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
4545 num_to_del);
31840ae1 4546 BUG_ON(ret);
25179201 4547 btrfs_release_path(extent_root, path);
21af804c 4548
5d4f98a2 4549 if (is_data) {
459931ec
CM
4550 ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
4551 BUG_ON(ret);
d57e62b8
CM
4552 } else {
4553 invalidate_mapping_pages(info->btree_inode->i_mapping,
4554 bytenr >> PAGE_CACHE_SHIFT,
4555 (bytenr + num_bytes - 1) >> PAGE_CACHE_SHIFT);
459931ec
CM
4556 }
4557
f0486c68 4558 ret = update_block_group(trans, root, bytenr, num_bytes, 0);
dcbdd4dc 4559 BUG_ON(ret);
a28ec197 4560 }
5caf2a00 4561 btrfs_free_path(path);
a28ec197
CM
4562 return ret;
4563}
4564
1887be66 4565/*
f0486c68 4566 * when we free an block, it is possible (and likely) that we free the last
1887be66
CM
4567 * delayed ref for that extent as well. This searches the delayed ref tree for
4568 * a given extent, and if there are no other delayed refs to be processed, it
4569 * removes it from the tree.
4570 */
4571static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
4572 struct btrfs_root *root, u64 bytenr)
4573{
4574 struct btrfs_delayed_ref_head *head;
4575 struct btrfs_delayed_ref_root *delayed_refs;
4576 struct btrfs_delayed_ref_node *ref;
4577 struct rb_node *node;
f0486c68 4578 int ret = 0;
1887be66
CM
4579
4580 delayed_refs = &trans->transaction->delayed_refs;
4581 spin_lock(&delayed_refs->lock);
4582 head = btrfs_find_delayed_ref_head(trans, bytenr);
4583 if (!head)
4584 goto out;
4585
4586 node = rb_prev(&head->node.rb_node);
4587 if (!node)
4588 goto out;
4589
4590 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
4591
4592 /* there are still entries for this ref, we can't drop it */
4593 if (ref->bytenr == bytenr)
4594 goto out;
4595
5d4f98a2
YZ
4596 if (head->extent_op) {
4597 if (!head->must_insert_reserved)
4598 goto out;
4599 kfree(head->extent_op);
4600 head->extent_op = NULL;
4601 }
4602
1887be66
CM
4603 /*
4604 * waiting for the lock here would deadlock. If someone else has it
4605 * locked they are already in the process of dropping it anyway
4606 */
4607 if (!mutex_trylock(&head->mutex))
4608 goto out;
4609
4610 /*
4611 * at this point we have a head with no other entries. Go
4612 * ahead and process it.
4613 */
4614 head->node.in_tree = 0;
4615 rb_erase(&head->node.rb_node, &delayed_refs->root);
c3e69d58 4616
1887be66
CM
4617 delayed_refs->num_entries--;
4618
4619 /*
4620 * we don't take a ref on the node because we're removing it from the
4621 * tree, so we just steal the ref the tree was holding.
4622 */
c3e69d58
CM
4623 delayed_refs->num_heads--;
4624 if (list_empty(&head->cluster))
4625 delayed_refs->num_heads_ready--;
4626
4627 list_del_init(&head->cluster);
1887be66
CM
4628 spin_unlock(&delayed_refs->lock);
4629
f0486c68
YZ
4630 BUG_ON(head->extent_op);
4631 if (head->must_insert_reserved)
4632 ret = 1;
4633
4634 mutex_unlock(&head->mutex);
1887be66 4635 btrfs_put_delayed_ref(&head->node);
f0486c68 4636 return ret;
1887be66
CM
4637out:
4638 spin_unlock(&delayed_refs->lock);
4639 return 0;
4640}
4641
f0486c68
YZ
4642void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
4643 struct btrfs_root *root,
4644 struct extent_buffer *buf,
4645 u64 parent, int last_ref)
4646{
4647 struct btrfs_block_rsv *block_rsv;
4648 struct btrfs_block_group_cache *cache = NULL;
4649 int ret;
4650
4651 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
4652 ret = btrfs_add_delayed_tree_ref(trans, buf->start, buf->len,
4653 parent, root->root_key.objectid,
4654 btrfs_header_level(buf),
4655 BTRFS_DROP_DELAYED_REF, NULL);
4656 BUG_ON(ret);
4657 }
4658
4659 if (!last_ref)
4660 return;
4661
4662 block_rsv = get_block_rsv(trans, root);
4663 cache = btrfs_lookup_block_group(root->fs_info, buf->start);
3bf84a5a
YZ
4664 if (block_rsv->space_info != cache->space_info)
4665 goto out;
f0486c68
YZ
4666
4667 if (btrfs_header_generation(buf) == trans->transid) {
4668 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
4669 ret = check_ref_cleanup(trans, root, buf->start);
4670 if (!ret)
4671 goto pin;
4672 }
4673
4674 if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
4675 pin_down_extent(root, cache, buf->start, buf->len, 1);
4676 goto pin;
4677 }
4678
4679 WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
4680
4681 btrfs_add_free_space(cache, buf->start, buf->len);
4682 ret = update_reserved_bytes(cache, buf->len, 0, 0);
4683 if (ret == -EAGAIN) {
4684 /* block group became read-only */
4685 update_reserved_bytes(cache, buf->len, 0, 1);
4686 goto out;
4687 }
4688
4689 ret = 1;
4690 spin_lock(&block_rsv->lock);
4691 if (block_rsv->reserved < block_rsv->size) {
4692 block_rsv->reserved += buf->len;
4693 ret = 0;
4694 }
4695 spin_unlock(&block_rsv->lock);
4696
4697 if (ret) {
4698 spin_lock(&cache->space_info->lock);
4699 cache->space_info->bytes_reserved -= buf->len;
4700 spin_unlock(&cache->space_info->lock);
4701 }
4702 goto out;
4703 }
4704pin:
4705 if (block_rsv->durable && !cache->ro) {
4706 ret = 0;
4707 spin_lock(&cache->lock);
4708 if (!cache->ro) {
4709 cache->reserved_pinned += buf->len;
4710 ret = 1;
4711 }
4712 spin_unlock(&cache->lock);
4713
4714 if (ret) {
4715 spin_lock(&block_rsv->lock);
4716 block_rsv->freed[trans->transid & 0x1] += buf->len;
4717 spin_unlock(&block_rsv->lock);
4718 }
4719 }
4720out:
4721 btrfs_put_block_group(cache);
4722}
4723
925baedd 4724int btrfs_free_extent(struct btrfs_trans_handle *trans,
31840ae1
ZY
4725 struct btrfs_root *root,
4726 u64 bytenr, u64 num_bytes, u64 parent,
5d4f98a2 4727 u64 root_objectid, u64 owner, u64 offset)
925baedd
CM
4728{
4729 int ret;
4730
56bec294
CM
4731 /*
4732 * tree log blocks never actually go into the extent allocation
4733 * tree, just update pinning info and exit early.
56bec294 4734 */
5d4f98a2
YZ
4735 if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
4736 WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
b9473439 4737 /* unlocks the pinned mutex */
11833d66 4738 btrfs_pin_extent(root, bytenr, num_bytes, 1);
56bec294 4739 ret = 0;
5d4f98a2
YZ
4740 } else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
4741 ret = btrfs_add_delayed_tree_ref(trans, bytenr, num_bytes,
4742 parent, root_objectid, (int)owner,
4743 BTRFS_DROP_DELAYED_REF, NULL);
1887be66 4744 BUG_ON(ret);
5d4f98a2
YZ
4745 } else {
4746 ret = btrfs_add_delayed_data_ref(trans, bytenr, num_bytes,
4747 parent, root_objectid, owner,
4748 offset, BTRFS_DROP_DELAYED_REF, NULL);
4749 BUG_ON(ret);
56bec294 4750 }
925baedd
CM
4751 return ret;
4752}
4753
87ee04eb
CM
4754static u64 stripe_align(struct btrfs_root *root, u64 val)
4755{
4756 u64 mask = ((u64)root->stripesize - 1);
4757 u64 ret = (val + mask) & ~mask;
4758 return ret;
4759}
4760
817d52f8
JB
4761/*
4762 * when we wait for progress in the block group caching, its because
4763 * our allocation attempt failed at least once. So, we must sleep
4764 * and let some progress happen before we try again.
4765 *
4766 * This function will sleep at least once waiting for new free space to
4767 * show up, and then it will check the block group free space numbers
4768 * for our min num_bytes. Another option is to have it go ahead
4769 * and look in the rbtree for a free extent of a given size, but this
4770 * is a good start.
4771 */
4772static noinline int
4773wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
4774 u64 num_bytes)
4775{
11833d66 4776 struct btrfs_caching_control *caching_ctl;
817d52f8
JB
4777 DEFINE_WAIT(wait);
4778
11833d66
YZ
4779 caching_ctl = get_caching_control(cache);
4780 if (!caching_ctl)
817d52f8 4781 return 0;
817d52f8 4782
11833d66 4783 wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
817d52f8 4784 (cache->free_space >= num_bytes));
11833d66
YZ
4785
4786 put_caching_control(caching_ctl);
4787 return 0;
4788}
4789
4790static noinline int
4791wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
4792{
4793 struct btrfs_caching_control *caching_ctl;
4794 DEFINE_WAIT(wait);
4795
4796 caching_ctl = get_caching_control(cache);
4797 if (!caching_ctl)
4798 return 0;
4799
4800 wait_event(caching_ctl->wait, block_group_cache_done(cache));
4801
4802 put_caching_control(caching_ctl);
817d52f8
JB
4803 return 0;
4804}
4805
b742bb82
YZ
4806static int get_block_group_index(struct btrfs_block_group_cache *cache)
4807{
4808 int index;
4809 if (cache->flags & BTRFS_BLOCK_GROUP_RAID10)
4810 index = 0;
4811 else if (cache->flags & BTRFS_BLOCK_GROUP_RAID1)
4812 index = 1;
4813 else if (cache->flags & BTRFS_BLOCK_GROUP_DUP)
4814 index = 2;
4815 else if (cache->flags & BTRFS_BLOCK_GROUP_RAID0)
4816 index = 3;
4817 else
4818 index = 4;
4819 return index;
4820}
4821
817d52f8 4822enum btrfs_loop_type {
ccf0e725 4823 LOOP_FIND_IDEAL = 0,
817d52f8
JB
4824 LOOP_CACHING_NOWAIT = 1,
4825 LOOP_CACHING_WAIT = 2,
4826 LOOP_ALLOC_CHUNK = 3,
4827 LOOP_NO_EMPTY_SIZE = 4,
4828};
4829
fec577fb
CM
4830/*
4831 * walks the btree of allocated extents and find a hole of a given size.
4832 * The key ins is changed to record the hole:
4833 * ins->objectid == block start
62e2749e 4834 * ins->flags = BTRFS_EXTENT_ITEM_KEY
fec577fb
CM
4835 * ins->offset == number of blocks
4836 * Any available blocks before search_start are skipped.
4837 */
d397712b 4838static noinline int find_free_extent(struct btrfs_trans_handle *trans,
98ed5174
CM
4839 struct btrfs_root *orig_root,
4840 u64 num_bytes, u64 empty_size,
4841 u64 search_start, u64 search_end,
4842 u64 hint_byte, struct btrfs_key *ins,
98ed5174 4843 int data)
fec577fb 4844{
80eb234a 4845 int ret = 0;
d397712b 4846 struct btrfs_root *root = orig_root->fs_info->extent_root;
fa9c0d79 4847 struct btrfs_free_cluster *last_ptr = NULL;
80eb234a 4848 struct btrfs_block_group_cache *block_group = NULL;
239b14b3 4849 int empty_cluster = 2 * 1024 * 1024;
0ef3e66b 4850 int allowed_chunk_alloc = 0;
ccf0e725 4851 int done_chunk_alloc = 0;
80eb234a 4852 struct btrfs_space_info *space_info;
fa9c0d79
CM
4853 int last_ptr_loop = 0;
4854 int loop = 0;
f0486c68 4855 int index = 0;
817d52f8 4856 bool found_uncached_bg = false;
0a24325e 4857 bool failed_cluster_refill = false;
1cdda9b8 4858 bool failed_alloc = false;
67377734 4859 bool use_cluster = true;
ccf0e725
JB
4860 u64 ideal_cache_percent = 0;
4861 u64 ideal_cache_offset = 0;
fec577fb 4862
db94535d 4863 WARN_ON(num_bytes < root->sectorsize);
b1a4d965 4864 btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
80eb234a
JB
4865 ins->objectid = 0;
4866 ins->offset = 0;
b1a4d965 4867
2552d17e 4868 space_info = __find_space_info(root->fs_info, data);
1b1d1f66
JB
4869 if (!space_info) {
4870 printk(KERN_ERR "No space info for %d\n", data);
4871 return -ENOSPC;
4872 }
2552d17e 4873
67377734
JB
4874 /*
4875 * If the space info is for both data and metadata it means we have a
4876 * small filesystem and we can't use the clustering stuff.
4877 */
4878 if (btrfs_mixed_space_info(space_info))
4879 use_cluster = false;
4880
0ef3e66b
CM
4881 if (orig_root->ref_cows || empty_size)
4882 allowed_chunk_alloc = 1;
4883
67377734 4884 if (data & BTRFS_BLOCK_GROUP_METADATA && use_cluster) {
fa9c0d79 4885 last_ptr = &root->fs_info->meta_alloc_cluster;
536ac8ae
CM
4886 if (!btrfs_test_opt(root, SSD))
4887 empty_cluster = 64 * 1024;
239b14b3
CM
4888 }
4889
67377734
JB
4890 if ((data & BTRFS_BLOCK_GROUP_DATA) && use_cluster &&
4891 btrfs_test_opt(root, SSD)) {
fa9c0d79
CM
4892 last_ptr = &root->fs_info->data_alloc_cluster;
4893 }
0f9dd46c 4894
239b14b3 4895 if (last_ptr) {
fa9c0d79
CM
4896 spin_lock(&last_ptr->lock);
4897 if (last_ptr->block_group)
4898 hint_byte = last_ptr->window_start;
4899 spin_unlock(&last_ptr->lock);
239b14b3 4900 }
fa9c0d79 4901
a061fc8d 4902 search_start = max(search_start, first_logical_byte(root, 0));
239b14b3 4903 search_start = max(search_start, hint_byte);
0b86a832 4904
817d52f8 4905 if (!last_ptr)
fa9c0d79 4906 empty_cluster = 0;
fa9c0d79 4907
2552d17e 4908 if (search_start == hint_byte) {
ccf0e725 4909ideal_cache:
2552d17e
JB
4910 block_group = btrfs_lookup_block_group(root->fs_info,
4911 search_start);
817d52f8
JB
4912 /*
4913 * we don't want to use the block group if it doesn't match our
4914 * allocation bits, or if its not cached.
ccf0e725
JB
4915 *
4916 * However if we are re-searching with an ideal block group
4917 * picked out then we don't care that the block group is cached.
817d52f8
JB
4918 */
4919 if (block_group && block_group_bits(block_group, data) &&
ccf0e725
JB
4920 (block_group->cached != BTRFS_CACHE_NO ||
4921 search_start == ideal_cache_offset)) {
2552d17e 4922 down_read(&space_info->groups_sem);
44fb5511
CM
4923 if (list_empty(&block_group->list) ||
4924 block_group->ro) {
4925 /*
4926 * someone is removing this block group,
4927 * we can't jump into the have_block_group
4928 * target because our list pointers are not
4929 * valid
4930 */
4931 btrfs_put_block_group(block_group);
4932 up_read(&space_info->groups_sem);
ccf0e725 4933 } else {
b742bb82 4934 index = get_block_group_index(block_group);
44fb5511 4935 goto have_block_group;
ccf0e725 4936 }
2552d17e 4937 } else if (block_group) {
fa9c0d79 4938 btrfs_put_block_group(block_group);
2552d17e 4939 }
42e70e7a 4940 }
2552d17e 4941search:
80eb234a 4942 down_read(&space_info->groups_sem);
b742bb82
YZ
4943 list_for_each_entry(block_group, &space_info->block_groups[index],
4944 list) {
6226cb0a 4945 u64 offset;
817d52f8 4946 int cached;
8a1413a2 4947
11dfe35a 4948 btrfs_get_block_group(block_group);
2552d17e 4949 search_start = block_group->key.objectid;
42e70e7a 4950
83a50de9
CM
4951 /*
4952 * this can happen if we end up cycling through all the
4953 * raid types, but we want to make sure we only allocate
4954 * for the proper type.
4955 */
4956 if (!block_group_bits(block_group, data)) {
4957 u64 extra = BTRFS_BLOCK_GROUP_DUP |
4958 BTRFS_BLOCK_GROUP_RAID1 |
4959 BTRFS_BLOCK_GROUP_RAID10;
4960
4961 /*
4962 * if they asked for extra copies and this block group
4963 * doesn't provide them, bail. This does allow us to
4964 * fill raid0 from raid1.
4965 */
4966 if ((data & extra) && !(block_group->flags & extra))
4967 goto loop;
4968 }
4969
2552d17e 4970have_block_group:
817d52f8 4971 if (unlikely(block_group->cached == BTRFS_CACHE_NO)) {
ccf0e725
JB
4972 u64 free_percent;
4973
b8399dee
JB
4974 ret = cache_block_group(block_group, trans,
4975 orig_root, 1);
9d66e233
JB
4976 if (block_group->cached == BTRFS_CACHE_FINISHED)
4977 goto have_block_group;
4978
ccf0e725
JB
4979 free_percent = btrfs_block_group_used(&block_group->item);
4980 free_percent *= 100;
4981 free_percent = div64_u64(free_percent,
4982 block_group->key.offset);
4983 free_percent = 100 - free_percent;
4984 if (free_percent > ideal_cache_percent &&
4985 likely(!block_group->ro)) {
4986 ideal_cache_offset = block_group->key.objectid;
4987 ideal_cache_percent = free_percent;
4988 }
4989
817d52f8 4990 /*
ccf0e725
JB
4991 * We only want to start kthread caching if we are at
4992 * the point where we will wait for caching to make
4993 * progress, or if our ideal search is over and we've
4994 * found somebody to start caching.
817d52f8
JB
4995 */
4996 if (loop > LOOP_CACHING_NOWAIT ||
ccf0e725
JB
4997 (loop > LOOP_FIND_IDEAL &&
4998 atomic_read(&space_info->caching_threads) < 2)) {
b8399dee
JB
4999 ret = cache_block_group(block_group, trans,
5000 orig_root, 0);
817d52f8 5001 BUG_ON(ret);
2552d17e 5002 }
817d52f8
JB
5003 found_uncached_bg = true;
5004
ccf0e725
JB
5005 /*
5006 * If loop is set for cached only, try the next block
5007 * group.
5008 */
5009 if (loop == LOOP_FIND_IDEAL)
817d52f8
JB
5010 goto loop;
5011 }
5012
ccf0e725
JB
5013 cached = block_group_cache_done(block_group);
5014 if (unlikely(!cached))
5015 found_uncached_bg = true;
5016
ea6a478e 5017 if (unlikely(block_group->ro))
2552d17e 5018 goto loop;
0f9dd46c 5019
0a24325e
JB
5020 /*
5021 * Ok we want to try and use the cluster allocator, so lets look
5022 * there, unless we are on LOOP_NO_EMPTY_SIZE, since we will
5023 * have tried the cluster allocator plenty of times at this
5024 * point and not have found anything, so we are likely way too
5025 * fragmented for the clustering stuff to find anything, so lets
5026 * just skip it and let the allocator find whatever block it can
5027 * find
5028 */
5029 if (last_ptr && loop < LOOP_NO_EMPTY_SIZE) {
fa9c0d79
CM
5030 /*
5031 * the refill lock keeps out other
5032 * people trying to start a new cluster
5033 */
5034 spin_lock(&last_ptr->refill_lock);
44fb5511
CM
5035 if (last_ptr->block_group &&
5036 (last_ptr->block_group->ro ||
5037 !block_group_bits(last_ptr->block_group, data))) {
5038 offset = 0;
5039 goto refill_cluster;
5040 }
5041
fa9c0d79
CM
5042 offset = btrfs_alloc_from_cluster(block_group, last_ptr,
5043 num_bytes, search_start);
5044 if (offset) {
5045 /* we have a block, we're done */
5046 spin_unlock(&last_ptr->refill_lock);
5047 goto checks;
5048 }
5049
5050 spin_lock(&last_ptr->lock);
5051 /*
5052 * whoops, this cluster doesn't actually point to
5053 * this block group. Get a ref on the block
5054 * group is does point to and try again
5055 */
5056 if (!last_ptr_loop && last_ptr->block_group &&
5057 last_ptr->block_group != block_group) {
5058
5059 btrfs_put_block_group(block_group);
5060 block_group = last_ptr->block_group;
11dfe35a 5061 btrfs_get_block_group(block_group);
fa9c0d79
CM
5062 spin_unlock(&last_ptr->lock);
5063 spin_unlock(&last_ptr->refill_lock);
5064
5065 last_ptr_loop = 1;
5066 search_start = block_group->key.objectid;
44fb5511
CM
5067 /*
5068 * we know this block group is properly
5069 * in the list because
5070 * btrfs_remove_block_group, drops the
5071 * cluster before it removes the block
5072 * group from the list
5073 */
fa9c0d79
CM
5074 goto have_block_group;
5075 }
5076 spin_unlock(&last_ptr->lock);
44fb5511 5077refill_cluster:
fa9c0d79
CM
5078 /*
5079 * this cluster didn't work out, free it and
5080 * start over
5081 */
5082 btrfs_return_cluster_to_free_space(NULL, last_ptr);
5083
5084 last_ptr_loop = 0;
5085
5086 /* allocate a cluster in this block group */
451d7585 5087 ret = btrfs_find_space_cluster(trans, root,
fa9c0d79
CM
5088 block_group, last_ptr,
5089 offset, num_bytes,
5090 empty_cluster + empty_size);
5091 if (ret == 0) {
5092 /*
5093 * now pull our allocation out of this
5094 * cluster
5095 */
5096 offset = btrfs_alloc_from_cluster(block_group,
5097 last_ptr, num_bytes,
5098 search_start);
5099 if (offset) {
5100 /* we found one, proceed */
5101 spin_unlock(&last_ptr->refill_lock);
5102 goto checks;
5103 }
0a24325e
JB
5104 } else if (!cached && loop > LOOP_CACHING_NOWAIT
5105 && !failed_cluster_refill) {
817d52f8
JB
5106 spin_unlock(&last_ptr->refill_lock);
5107
0a24325e 5108 failed_cluster_refill = true;
817d52f8
JB
5109 wait_block_group_cache_progress(block_group,
5110 num_bytes + empty_cluster + empty_size);
5111 goto have_block_group;
fa9c0d79 5112 }
817d52f8 5113
fa9c0d79
CM
5114 /*
5115 * at this point we either didn't find a cluster
5116 * or we weren't able to allocate a block from our
5117 * cluster. Free the cluster we've been trying
5118 * to use, and go to the next block group
5119 */
0a24325e 5120 btrfs_return_cluster_to_free_space(NULL, last_ptr);
fa9c0d79 5121 spin_unlock(&last_ptr->refill_lock);
0a24325e 5122 goto loop;
fa9c0d79
CM
5123 }
5124
6226cb0a
JB
5125 offset = btrfs_find_space_for_alloc(block_group, search_start,
5126 num_bytes, empty_size);
1cdda9b8
JB
5127 /*
5128 * If we didn't find a chunk, and we haven't failed on this
5129 * block group before, and this block group is in the middle of
5130 * caching and we are ok with waiting, then go ahead and wait
5131 * for progress to be made, and set failed_alloc to true.
5132 *
5133 * If failed_alloc is true then we've already waited on this
5134 * block group once and should move on to the next block group.
5135 */
5136 if (!offset && !failed_alloc && !cached &&
5137 loop > LOOP_CACHING_NOWAIT) {
817d52f8 5138 wait_block_group_cache_progress(block_group,
1cdda9b8
JB
5139 num_bytes + empty_size);
5140 failed_alloc = true;
817d52f8 5141 goto have_block_group;
1cdda9b8
JB
5142 } else if (!offset) {
5143 goto loop;
817d52f8 5144 }
fa9c0d79 5145checks:
6226cb0a 5146 search_start = stripe_align(root, offset);
2552d17e 5147 /* move on to the next group */
6226cb0a
JB
5148 if (search_start + num_bytes >= search_end) {
5149 btrfs_add_free_space(block_group, offset, num_bytes);
2552d17e 5150 goto loop;
6226cb0a 5151 }
25179201 5152
2552d17e
JB
5153 /* move on to the next group */
5154 if (search_start + num_bytes >
6226cb0a
JB
5155 block_group->key.objectid + block_group->key.offset) {
5156 btrfs_add_free_space(block_group, offset, num_bytes);
2552d17e 5157 goto loop;
6226cb0a 5158 }
f5a31e16 5159
f0486c68
YZ
5160 ins->objectid = search_start;
5161 ins->offset = num_bytes;
2552d17e 5162
f0486c68
YZ
5163 if (offset < search_start)
5164 btrfs_add_free_space(block_group, offset,
5165 search_start - offset);
5166 BUG_ON(offset > search_start);
2552d17e 5167
f0486c68
YZ
5168 ret = update_reserved_bytes(block_group, num_bytes, 1,
5169 (data & BTRFS_BLOCK_GROUP_DATA));
5170 if (ret == -EAGAIN) {
6226cb0a 5171 btrfs_add_free_space(block_group, offset, num_bytes);
2552d17e 5172 goto loop;
0f9dd46c 5173 }
0b86a832 5174
f0486c68 5175 /* we are all good, lets return */
2552d17e
JB
5176 ins->objectid = search_start;
5177 ins->offset = num_bytes;
d2fb3437 5178
6226cb0a
JB
5179 if (offset < search_start)
5180 btrfs_add_free_space(block_group, offset,
5181 search_start - offset);
5182 BUG_ON(offset > search_start);
2552d17e
JB
5183 break;
5184loop:
0a24325e 5185 failed_cluster_refill = false;
1cdda9b8 5186 failed_alloc = false;
b742bb82 5187 BUG_ON(index != get_block_group_index(block_group));
fa9c0d79 5188 btrfs_put_block_group(block_group);
2552d17e
JB
5189 }
5190 up_read(&space_info->groups_sem);
5191
b742bb82
YZ
5192 if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES)
5193 goto search;
5194
ccf0e725
JB
5195 /* LOOP_FIND_IDEAL, only search caching/cached bg's, and don't wait for
5196 * for them to make caching progress. Also
5197 * determine the best possible bg to cache
5198 * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
5199 * caching kthreads as we move along
817d52f8
JB
5200 * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
5201 * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
5202 * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
5203 * again
fa9c0d79 5204 */
817d52f8
JB
5205 if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE &&
5206 (found_uncached_bg || empty_size || empty_cluster ||
5207 allowed_chunk_alloc)) {
b742bb82 5208 index = 0;
ccf0e725 5209 if (loop == LOOP_FIND_IDEAL && found_uncached_bg) {
817d52f8 5210 found_uncached_bg = false;
ccf0e725
JB
5211 loop++;
5212 if (!ideal_cache_percent &&
5213 atomic_read(&space_info->caching_threads))
817d52f8 5214 goto search;
ccf0e725
JB
5215
5216 /*
5217 * 1 of the following 2 things have happened so far
5218 *
5219 * 1) We found an ideal block group for caching that
5220 * is mostly full and will cache quickly, so we might
5221 * as well wait for it.
5222 *
5223 * 2) We searched for cached only and we didn't find
5224 * anything, and we didn't start any caching kthreads
5225 * either, so chances are we will loop through and
5226 * start a couple caching kthreads, and then come back
5227 * around and just wait for them. This will be slower
5228 * because we will have 2 caching kthreads reading at
5229 * the same time when we could have just started one
5230 * and waited for it to get far enough to give us an
5231 * allocation, so go ahead and go to the wait caching
5232 * loop.
5233 */
5234 loop = LOOP_CACHING_WAIT;
5235 search_start = ideal_cache_offset;
5236 ideal_cache_percent = 0;
5237 goto ideal_cache;
5238 } else if (loop == LOOP_FIND_IDEAL) {
5239 /*
5240 * Didn't find a uncached bg, wait on anything we find
5241 * next.
5242 */
5243 loop = LOOP_CACHING_WAIT;
5244 goto search;
5245 }
5246
5247 if (loop < LOOP_CACHING_WAIT) {
5248 loop++;
5249 goto search;
817d52f8
JB
5250 }
5251
5252 if (loop == LOOP_ALLOC_CHUNK) {
fa9c0d79
CM
5253 empty_size = 0;
5254 empty_cluster = 0;
5255 }
2552d17e
JB
5256
5257 if (allowed_chunk_alloc) {
5258 ret = do_chunk_alloc(trans, root, num_bytes +
5259 2 * 1024 * 1024, data, 1);
2552d17e 5260 allowed_chunk_alloc = 0;
ccf0e725
JB
5261 done_chunk_alloc = 1;
5262 } else if (!done_chunk_alloc) {
2552d17e
JB
5263 space_info->force_alloc = 1;
5264 }
5265
817d52f8 5266 if (loop < LOOP_NO_EMPTY_SIZE) {
fa9c0d79 5267 loop++;
2552d17e 5268 goto search;
fa9c0d79 5269 }
2552d17e
JB
5270 ret = -ENOSPC;
5271 } else if (!ins->objectid) {
5272 ret = -ENOSPC;
f2654de4 5273 }
0b86a832 5274
80eb234a
JB
5275 /* we found what we needed */
5276 if (ins->objectid) {
5277 if (!(data & BTRFS_BLOCK_GROUP_DATA))
d2fb3437 5278 trans->block_group = block_group->key.objectid;
0f9dd46c 5279
fa9c0d79 5280 btrfs_put_block_group(block_group);
80eb234a 5281 ret = 0;
be744175 5282 }
be744175 5283
0f70abe2 5284 return ret;
fec577fb 5285}
ec44a35c 5286
9ed74f2d
JB
5287static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
5288 int dump_block_groups)
0f9dd46c
JB
5289{
5290 struct btrfs_block_group_cache *cache;
b742bb82 5291 int index = 0;
0f9dd46c 5292
9ed74f2d 5293 spin_lock(&info->lock);
d397712b
CM
5294 printk(KERN_INFO "space_info has %llu free, is %sfull\n",
5295 (unsigned long long)(info->total_bytes - info->bytes_used -
9ed74f2d 5296 info->bytes_pinned - info->bytes_reserved -
8929ecfa 5297 info->bytes_readonly),
d397712b 5298 (info->full) ? "" : "not ");
8929ecfa
YZ
5299 printk(KERN_INFO "space_info total=%llu, used=%llu, pinned=%llu, "
5300 "reserved=%llu, may_use=%llu, readonly=%llu\n",
21380931 5301 (unsigned long long)info->total_bytes,
8929ecfa 5302 (unsigned long long)info->bytes_used,
21380931 5303 (unsigned long long)info->bytes_pinned,
8929ecfa 5304 (unsigned long long)info->bytes_reserved,
21380931 5305 (unsigned long long)info->bytes_may_use,
8929ecfa 5306 (unsigned long long)info->bytes_readonly);
9ed74f2d
JB
5307 spin_unlock(&info->lock);
5308
5309 if (!dump_block_groups)
5310 return;
0f9dd46c 5311
80eb234a 5312 down_read(&info->groups_sem);
b742bb82
YZ
5313again:
5314 list_for_each_entry(cache, &info->block_groups[index], list) {
0f9dd46c 5315 spin_lock(&cache->lock);
d397712b
CM
5316 printk(KERN_INFO "block group %llu has %llu bytes, %llu used "
5317 "%llu pinned %llu reserved\n",
5318 (unsigned long long)cache->key.objectid,
5319 (unsigned long long)cache->key.offset,
5320 (unsigned long long)btrfs_block_group_used(&cache->item),
5321 (unsigned long long)cache->pinned,
5322 (unsigned long long)cache->reserved);
0f9dd46c
JB
5323 btrfs_dump_free_space(cache, bytes);
5324 spin_unlock(&cache->lock);
5325 }
b742bb82
YZ
5326 if (++index < BTRFS_NR_RAID_TYPES)
5327 goto again;
80eb234a 5328 up_read(&info->groups_sem);
0f9dd46c 5329}
e8569813 5330
11833d66
YZ
5331int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
5332 struct btrfs_root *root,
5333 u64 num_bytes, u64 min_alloc_size,
5334 u64 empty_size, u64 hint_byte,
5335 u64 search_end, struct btrfs_key *ins,
5336 u64 data)
fec577fb
CM
5337{
5338 int ret;
fbdc762b 5339 u64 search_start = 0;
925baedd 5340
6a63209f 5341 data = btrfs_get_alloc_profile(root, data);
98d20f67 5342again:
0ef3e66b
CM
5343 /*
5344 * the only place that sets empty_size is btrfs_realloc_node, which
5345 * is not called recursively on allocations
5346 */
83d3c969 5347 if (empty_size || root->ref_cows)
6324fbf3 5348 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
0ef3e66b 5349 num_bytes + 2 * 1024 * 1024, data, 0);
0b86a832 5350
db94535d
CM
5351 WARN_ON(num_bytes < root->sectorsize);
5352 ret = find_free_extent(trans, root, num_bytes, empty_size,
f0486c68
YZ
5353 search_start, search_end, hint_byte,
5354 ins, data);
3b951516 5355
98d20f67
CM
5356 if (ret == -ENOSPC && num_bytes > min_alloc_size) {
5357 num_bytes = num_bytes >> 1;
0f9dd46c 5358 num_bytes = num_bytes & ~(root->sectorsize - 1);
98d20f67 5359 num_bytes = max(num_bytes, min_alloc_size);
0ef3e66b
CM
5360 do_chunk_alloc(trans, root->fs_info->extent_root,
5361 num_bytes, data, 1);
98d20f67
CM
5362 goto again;
5363 }
817d52f8 5364 if (ret == -ENOSPC) {
0f9dd46c
JB
5365 struct btrfs_space_info *sinfo;
5366
5367 sinfo = __find_space_info(root->fs_info, data);
d397712b
CM
5368 printk(KERN_ERR "btrfs allocation failed flags %llu, "
5369 "wanted %llu\n", (unsigned long long)data,
5370 (unsigned long long)num_bytes);
9ed74f2d 5371 dump_space_info(sinfo, num_bytes, 1);
925baedd 5372 }
0f9dd46c
JB
5373
5374 return ret;
e6dcd2dc
CM
5375}
5376
65b51a00
CM
5377int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len)
5378{
0f9dd46c 5379 struct btrfs_block_group_cache *cache;
1f3c79a2 5380 int ret = 0;
0f9dd46c 5381
0f9dd46c
JB
5382 cache = btrfs_lookup_block_group(root->fs_info, start);
5383 if (!cache) {
d397712b
CM
5384 printk(KERN_ERR "Unable to find block group for %llu\n",
5385 (unsigned long long)start);
0f9dd46c
JB
5386 return -ENOSPC;
5387 }
1f3c79a2
LH
5388
5389 ret = btrfs_discard_extent(root, start, len);
5390
0f9dd46c 5391 btrfs_add_free_space(cache, start, len);
f0486c68 5392 update_reserved_bytes(cache, len, 0, 1);
fa9c0d79 5393 btrfs_put_block_group(cache);
817d52f8 5394
e6dcd2dc
CM
5395 return ret;
5396}
5397
5d4f98a2
YZ
5398static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
5399 struct btrfs_root *root,
5400 u64 parent, u64 root_objectid,
5401 u64 flags, u64 owner, u64 offset,
5402 struct btrfs_key *ins, int ref_mod)
e6dcd2dc
CM
5403{
5404 int ret;
5d4f98a2 5405 struct btrfs_fs_info *fs_info = root->fs_info;
e6dcd2dc 5406 struct btrfs_extent_item *extent_item;
5d4f98a2 5407 struct btrfs_extent_inline_ref *iref;
e6dcd2dc 5408 struct btrfs_path *path;
5d4f98a2
YZ
5409 struct extent_buffer *leaf;
5410 int type;
5411 u32 size;
26b8003f 5412
5d4f98a2
YZ
5413 if (parent > 0)
5414 type = BTRFS_SHARED_DATA_REF_KEY;
5415 else
5416 type = BTRFS_EXTENT_DATA_REF_KEY;
58176a96 5417
5d4f98a2 5418 size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
7bb86316
CM
5419
5420 path = btrfs_alloc_path();
5421 BUG_ON(!path);
47e4bb98 5422
b9473439 5423 path->leave_spinning = 1;
5d4f98a2
YZ
5424 ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
5425 ins, size);
ccd467d6 5426 BUG_ON(ret);
0f9dd46c 5427
5d4f98a2
YZ
5428 leaf = path->nodes[0];
5429 extent_item = btrfs_item_ptr(leaf, path->slots[0],
47e4bb98 5430 struct btrfs_extent_item);
5d4f98a2
YZ
5431 btrfs_set_extent_refs(leaf, extent_item, ref_mod);
5432 btrfs_set_extent_generation(leaf, extent_item, trans->transid);
5433 btrfs_set_extent_flags(leaf, extent_item,
5434 flags | BTRFS_EXTENT_FLAG_DATA);
5435
5436 iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
5437 btrfs_set_extent_inline_ref_type(leaf, iref, type);
5438 if (parent > 0) {
5439 struct btrfs_shared_data_ref *ref;
5440 ref = (struct btrfs_shared_data_ref *)(iref + 1);
5441 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
5442 btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
5443 } else {
5444 struct btrfs_extent_data_ref *ref;
5445 ref = (struct btrfs_extent_data_ref *)(&iref->offset);
5446 btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
5447 btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
5448 btrfs_set_extent_data_ref_offset(leaf, ref, offset);
5449 btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
5450 }
47e4bb98
CM
5451
5452 btrfs_mark_buffer_dirty(path->nodes[0]);
7bb86316 5453 btrfs_free_path(path);
f510cfec 5454
f0486c68 5455 ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
f5947066 5456 if (ret) {
d397712b
CM
5457 printk(KERN_ERR "btrfs update block group failed for %llu "
5458 "%llu\n", (unsigned long long)ins->objectid,
5459 (unsigned long long)ins->offset);
f5947066
CM
5460 BUG();
5461 }
e6dcd2dc
CM
5462 return ret;
5463}
5464
5d4f98a2
YZ
5465static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
5466 struct btrfs_root *root,
5467 u64 parent, u64 root_objectid,
5468 u64 flags, struct btrfs_disk_key *key,
5469 int level, struct btrfs_key *ins)
e6dcd2dc
CM
5470{
5471 int ret;
5d4f98a2
YZ
5472 struct btrfs_fs_info *fs_info = root->fs_info;
5473 struct btrfs_extent_item *extent_item;
5474 struct btrfs_tree_block_info *block_info;
5475 struct btrfs_extent_inline_ref *iref;
5476 struct btrfs_path *path;
5477 struct extent_buffer *leaf;
5478 u32 size = sizeof(*extent_item) + sizeof(*block_info) + sizeof(*iref);
1c2308f8 5479
5d4f98a2
YZ
5480 path = btrfs_alloc_path();
5481 BUG_ON(!path);
56bec294 5482
5d4f98a2
YZ
5483 path->leave_spinning = 1;
5484 ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
5485 ins, size);
56bec294 5486 BUG_ON(ret);
5d4f98a2
YZ
5487
5488 leaf = path->nodes[0];
5489 extent_item = btrfs_item_ptr(leaf, path->slots[0],
5490 struct btrfs_extent_item);
5491 btrfs_set_extent_refs(leaf, extent_item, 1);
5492 btrfs_set_extent_generation(leaf, extent_item, trans->transid);
5493 btrfs_set_extent_flags(leaf, extent_item,
5494 flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
5495 block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
5496
5497 btrfs_set_tree_block_key(leaf, block_info, key);
5498 btrfs_set_tree_block_level(leaf, block_info, level);
5499
5500 iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
5501 if (parent > 0) {
5502 BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
5503 btrfs_set_extent_inline_ref_type(leaf, iref,
5504 BTRFS_SHARED_BLOCK_REF_KEY);
5505 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
5506 } else {
5507 btrfs_set_extent_inline_ref_type(leaf, iref,
5508 BTRFS_TREE_BLOCK_REF_KEY);
5509 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
5510 }
5511
5512 btrfs_mark_buffer_dirty(leaf);
5513 btrfs_free_path(path);
5514
f0486c68 5515 ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
5d4f98a2
YZ
5516 if (ret) {
5517 printk(KERN_ERR "btrfs update block group failed for %llu "
5518 "%llu\n", (unsigned long long)ins->objectid,
5519 (unsigned long long)ins->offset);
5520 BUG();
5521 }
5522 return ret;
5523}
5524
5525int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
5526 struct btrfs_root *root,
5527 u64 root_objectid, u64 owner,
5528 u64 offset, struct btrfs_key *ins)
5529{
5530 int ret;
5531
5532 BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID);
5533
5534 ret = btrfs_add_delayed_data_ref(trans, ins->objectid, ins->offset,
5535 0, root_objectid, owner, offset,
5536 BTRFS_ADD_DELAYED_EXTENT, NULL);
e6dcd2dc
CM
5537 return ret;
5538}
e02119d5
CM
5539
5540/*
5541 * this is used by the tree logging recovery code. It records that
5542 * an extent has been allocated and makes sure to clear the free
5543 * space cache bits as well
5544 */
5d4f98a2
YZ
5545int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
5546 struct btrfs_root *root,
5547 u64 root_objectid, u64 owner, u64 offset,
5548 struct btrfs_key *ins)
e02119d5
CM
5549{
5550 int ret;
5551 struct btrfs_block_group_cache *block_group;
11833d66
YZ
5552 struct btrfs_caching_control *caching_ctl;
5553 u64 start = ins->objectid;
5554 u64 num_bytes = ins->offset;
e02119d5 5555
e02119d5 5556 block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
b8399dee 5557 cache_block_group(block_group, trans, NULL, 0);
11833d66 5558 caching_ctl = get_caching_control(block_group);
e02119d5 5559
11833d66
YZ
5560 if (!caching_ctl) {
5561 BUG_ON(!block_group_cache_done(block_group));
5562 ret = btrfs_remove_free_space(block_group, start, num_bytes);
5563 BUG_ON(ret);
5564 } else {
5565 mutex_lock(&caching_ctl->mutex);
5566
5567 if (start >= caching_ctl->progress) {
5568 ret = add_excluded_extent(root, start, num_bytes);
5569 BUG_ON(ret);
5570 } else if (start + num_bytes <= caching_ctl->progress) {
5571 ret = btrfs_remove_free_space(block_group,
5572 start, num_bytes);
5573 BUG_ON(ret);
5574 } else {
5575 num_bytes = caching_ctl->progress - start;
5576 ret = btrfs_remove_free_space(block_group,
5577 start, num_bytes);
5578 BUG_ON(ret);
5579
5580 start = caching_ctl->progress;
5581 num_bytes = ins->objectid + ins->offset -
5582 caching_ctl->progress;
5583 ret = add_excluded_extent(root, start, num_bytes);
5584 BUG_ON(ret);
5585 }
5586
5587 mutex_unlock(&caching_ctl->mutex);
5588 put_caching_control(caching_ctl);
5589 }
5590
f0486c68
YZ
5591 ret = update_reserved_bytes(block_group, ins->offset, 1, 1);
5592 BUG_ON(ret);
fa9c0d79 5593 btrfs_put_block_group(block_group);
5d4f98a2
YZ
5594 ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
5595 0, owner, offset, ins, 1);
e02119d5
CM
5596 return ret;
5597}
5598
65b51a00
CM
5599struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
5600 struct btrfs_root *root,
4008c04a
CM
5601 u64 bytenr, u32 blocksize,
5602 int level)
65b51a00
CM
5603{
5604 struct extent_buffer *buf;
5605
5606 buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
5607 if (!buf)
5608 return ERR_PTR(-ENOMEM);
5609 btrfs_set_header_generation(buf, trans->transid);
4008c04a 5610 btrfs_set_buffer_lockdep_class(buf, level);
65b51a00
CM
5611 btrfs_tree_lock(buf);
5612 clean_tree_block(trans, root, buf);
b4ce94de
CM
5613
5614 btrfs_set_lock_blocking(buf);
65b51a00 5615 btrfs_set_buffer_uptodate(buf);
b4ce94de 5616
d0c803c4 5617 if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
8cef4e16
YZ
5618 /*
5619 * we allow two log transactions at a time, use different
5620 * EXENT bit to differentiate dirty pages.
5621 */
5622 if (root->log_transid % 2 == 0)
5623 set_extent_dirty(&root->dirty_log_pages, buf->start,
5624 buf->start + buf->len - 1, GFP_NOFS);
5625 else
5626 set_extent_new(&root->dirty_log_pages, buf->start,
5627 buf->start + buf->len - 1, GFP_NOFS);
d0c803c4
CM
5628 } else {
5629 set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
65b51a00 5630 buf->start + buf->len - 1, GFP_NOFS);
d0c803c4 5631 }
65b51a00 5632 trans->blocks_used++;
b4ce94de 5633 /* this returns a buffer locked for blocking */
65b51a00
CM
5634 return buf;
5635}
5636
f0486c68
YZ
5637static struct btrfs_block_rsv *
5638use_block_rsv(struct btrfs_trans_handle *trans,
5639 struct btrfs_root *root, u32 blocksize)
5640{
5641 struct btrfs_block_rsv *block_rsv;
5642 int ret;
5643
5644 block_rsv = get_block_rsv(trans, root);
5645
5646 if (block_rsv->size == 0) {
8bb8ab2e
JB
5647 ret = reserve_metadata_bytes(trans, root, block_rsv,
5648 blocksize, 0);
f0486c68
YZ
5649 if (ret)
5650 return ERR_PTR(ret);
5651 return block_rsv;
5652 }
5653
5654 ret = block_rsv_use_bytes(block_rsv, blocksize);
5655 if (!ret)
5656 return block_rsv;
5657
f0486c68
YZ
5658 return ERR_PTR(-ENOSPC);
5659}
5660
5661static void unuse_block_rsv(struct btrfs_block_rsv *block_rsv, u32 blocksize)
5662{
5663 block_rsv_add_bytes(block_rsv, blocksize, 0);
5664 block_rsv_release_bytes(block_rsv, NULL, 0);
5665}
5666
fec577fb 5667/*
f0486c68
YZ
5668 * finds a free extent and does all the dirty work required for allocation
5669 * returns the key for the extent through ins, and a tree buffer for
5670 * the first block of the extent through buf.
5671 *
fec577fb
CM
5672 * returns the tree buffer or NULL.
5673 */
5f39d397 5674struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
5d4f98a2
YZ
5675 struct btrfs_root *root, u32 blocksize,
5676 u64 parent, u64 root_objectid,
5677 struct btrfs_disk_key *key, int level,
5678 u64 hint, u64 empty_size)
fec577fb 5679{
e2fa7227 5680 struct btrfs_key ins;
f0486c68 5681 struct btrfs_block_rsv *block_rsv;
5f39d397 5682 struct extent_buffer *buf;
f0486c68
YZ
5683 u64 flags = 0;
5684 int ret;
5685
fec577fb 5686
f0486c68
YZ
5687 block_rsv = use_block_rsv(trans, root, blocksize);
5688 if (IS_ERR(block_rsv))
5689 return ERR_CAST(block_rsv);
5690
5691 ret = btrfs_reserve_extent(trans, root, blocksize, blocksize,
5692 empty_size, hint, (u64)-1, &ins, 0);
fec577fb 5693 if (ret) {
f0486c68 5694 unuse_block_rsv(block_rsv, blocksize);
54aa1f4d 5695 return ERR_PTR(ret);
fec577fb 5696 }
55c69072 5697
4008c04a
CM
5698 buf = btrfs_init_new_buffer(trans, root, ins.objectid,
5699 blocksize, level);
f0486c68
YZ
5700 BUG_ON(IS_ERR(buf));
5701
5702 if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
5703 if (parent == 0)
5704 parent = ins.objectid;
5705 flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
5706 } else
5707 BUG_ON(parent > 0);
5708
5709 if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
5710 struct btrfs_delayed_extent_op *extent_op;
5711 extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
5712 BUG_ON(!extent_op);
5713 if (key)
5714 memcpy(&extent_op->key, key, sizeof(extent_op->key));
5715 else
5716 memset(&extent_op->key, 0, sizeof(extent_op->key));
5717 extent_op->flags_to_set = flags;
5718 extent_op->update_key = 1;
5719 extent_op->update_flags = 1;
5720 extent_op->is_data = 0;
5721
5722 ret = btrfs_add_delayed_tree_ref(trans, ins.objectid,
5723 ins.offset, parent, root_objectid,
5724 level, BTRFS_ADD_DELAYED_EXTENT,
5725 extent_op);
5726 BUG_ON(ret);
5727 }
fec577fb
CM
5728 return buf;
5729}
a28ec197 5730
2c47e605
YZ
5731struct walk_control {
5732 u64 refs[BTRFS_MAX_LEVEL];
5733 u64 flags[BTRFS_MAX_LEVEL];
5734 struct btrfs_key update_progress;
5735 int stage;
5736 int level;
5737 int shared_level;
5738 int update_ref;
5739 int keep_locks;
1c4850e2
YZ
5740 int reada_slot;
5741 int reada_count;
2c47e605
YZ
5742};
5743
5744#define DROP_REFERENCE 1
5745#define UPDATE_BACKREF 2
5746
1c4850e2
YZ
5747static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
5748 struct btrfs_root *root,
5749 struct walk_control *wc,
5750 struct btrfs_path *path)
6407bf6d 5751{
1c4850e2
YZ
5752 u64 bytenr;
5753 u64 generation;
5754 u64 refs;
94fcca9f 5755 u64 flags;
5d4f98a2 5756 u32 nritems;
1c4850e2
YZ
5757 u32 blocksize;
5758 struct btrfs_key key;
5759 struct extent_buffer *eb;
6407bf6d 5760 int ret;
1c4850e2
YZ
5761 int slot;
5762 int nread = 0;
6407bf6d 5763
1c4850e2
YZ
5764 if (path->slots[wc->level] < wc->reada_slot) {
5765 wc->reada_count = wc->reada_count * 2 / 3;
5766 wc->reada_count = max(wc->reada_count, 2);
5767 } else {
5768 wc->reada_count = wc->reada_count * 3 / 2;
5769 wc->reada_count = min_t(int, wc->reada_count,
5770 BTRFS_NODEPTRS_PER_BLOCK(root));
5771 }
7bb86316 5772
1c4850e2
YZ
5773 eb = path->nodes[wc->level];
5774 nritems = btrfs_header_nritems(eb);
5775 blocksize = btrfs_level_size(root, wc->level - 1);
bd56b302 5776
1c4850e2
YZ
5777 for (slot = path->slots[wc->level]; slot < nritems; slot++) {
5778 if (nread >= wc->reada_count)
5779 break;
bd56b302 5780
2dd3e67b 5781 cond_resched();
1c4850e2
YZ
5782 bytenr = btrfs_node_blockptr(eb, slot);
5783 generation = btrfs_node_ptr_generation(eb, slot);
2dd3e67b 5784
1c4850e2
YZ
5785 if (slot == path->slots[wc->level])
5786 goto reada;
5d4f98a2 5787
1c4850e2
YZ
5788 if (wc->stage == UPDATE_BACKREF &&
5789 generation <= root->root_key.offset)
bd56b302
CM
5790 continue;
5791
94fcca9f
YZ
5792 /* We don't lock the tree block, it's OK to be racy here */
5793 ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize,
5794 &refs, &flags);
5795 BUG_ON(ret);
5796 BUG_ON(refs == 0);
5797
1c4850e2 5798 if (wc->stage == DROP_REFERENCE) {
1c4850e2
YZ
5799 if (refs == 1)
5800 goto reada;
bd56b302 5801
94fcca9f
YZ
5802 if (wc->level == 1 &&
5803 (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
5804 continue;
1c4850e2
YZ
5805 if (!wc->update_ref ||
5806 generation <= root->root_key.offset)
5807 continue;
5808 btrfs_node_key_to_cpu(eb, &key, slot);
5809 ret = btrfs_comp_cpu_keys(&key,
5810 &wc->update_progress);
5811 if (ret < 0)
5812 continue;
94fcca9f
YZ
5813 } else {
5814 if (wc->level == 1 &&
5815 (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
5816 continue;
6407bf6d 5817 }
1c4850e2
YZ
5818reada:
5819 ret = readahead_tree_block(root, bytenr, blocksize,
5820 generation);
5821 if (ret)
bd56b302 5822 break;
1c4850e2 5823 nread++;
20524f02 5824 }
1c4850e2 5825 wc->reada_slot = slot;
20524f02 5826}
2c47e605 5827
f82d02d9 5828/*
2c47e605
YZ
5829 * hepler to process tree block while walking down the tree.
5830 *
2c47e605
YZ
5831 * when wc->stage == UPDATE_BACKREF, this function updates
5832 * back refs for pointers in the block.
5833 *
5834 * NOTE: return value 1 means we should stop walking down.
f82d02d9 5835 */
2c47e605 5836static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
5d4f98a2 5837 struct btrfs_root *root,
2c47e605 5838 struct btrfs_path *path,
94fcca9f 5839 struct walk_control *wc, int lookup_info)
f82d02d9 5840{
2c47e605
YZ
5841 int level = wc->level;
5842 struct extent_buffer *eb = path->nodes[level];
2c47e605 5843 u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
f82d02d9
YZ
5844 int ret;
5845
2c47e605
YZ
5846 if (wc->stage == UPDATE_BACKREF &&
5847 btrfs_header_owner(eb) != root->root_key.objectid)
5848 return 1;
f82d02d9 5849
2c47e605
YZ
5850 /*
5851 * when reference count of tree block is 1, it won't increase
5852 * again. once full backref flag is set, we never clear it.
5853 */
94fcca9f
YZ
5854 if (lookup_info &&
5855 ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
5856 (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
2c47e605
YZ
5857 BUG_ON(!path->locks[level]);
5858 ret = btrfs_lookup_extent_info(trans, root,
5859 eb->start, eb->len,
5860 &wc->refs[level],
5861 &wc->flags[level]);
5862 BUG_ON(ret);
5863 BUG_ON(wc->refs[level] == 0);
5864 }
5d4f98a2 5865
2c47e605
YZ
5866 if (wc->stage == DROP_REFERENCE) {
5867 if (wc->refs[level] > 1)
5868 return 1;
f82d02d9 5869
2c47e605
YZ
5870 if (path->locks[level] && !wc->keep_locks) {
5871 btrfs_tree_unlock(eb);
5872 path->locks[level] = 0;
5873 }
5874 return 0;
5875 }
f82d02d9 5876
2c47e605
YZ
5877 /* wc->stage == UPDATE_BACKREF */
5878 if (!(wc->flags[level] & flag)) {
5879 BUG_ON(!path->locks[level]);
5880 ret = btrfs_inc_ref(trans, root, eb, 1);
f82d02d9 5881 BUG_ON(ret);
2c47e605
YZ
5882 ret = btrfs_dec_ref(trans, root, eb, 0);
5883 BUG_ON(ret);
5884 ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
5885 eb->len, flag, 0);
5886 BUG_ON(ret);
5887 wc->flags[level] |= flag;
5888 }
5889
5890 /*
5891 * the block is shared by multiple trees, so it's not good to
5892 * keep the tree lock
5893 */
5894 if (path->locks[level] && level > 0) {
5895 btrfs_tree_unlock(eb);
5896 path->locks[level] = 0;
5897 }
5898 return 0;
5899}
5900
1c4850e2
YZ
5901/*
5902 * hepler to process tree block pointer.
5903 *
5904 * when wc->stage == DROP_REFERENCE, this function checks
5905 * reference count of the block pointed to. if the block
5906 * is shared and we need update back refs for the subtree
5907 * rooted at the block, this function changes wc->stage to
5908 * UPDATE_BACKREF. if the block is shared and there is no
5909 * need to update back, this function drops the reference
5910 * to the block.
5911 *
5912 * NOTE: return value 1 means we should stop walking down.
5913 */
5914static noinline int do_walk_down(struct btrfs_trans_handle *trans,
5915 struct btrfs_root *root,
5916 struct btrfs_path *path,
94fcca9f 5917 struct walk_control *wc, int *lookup_info)
1c4850e2
YZ
5918{
5919 u64 bytenr;
5920 u64 generation;
5921 u64 parent;
5922 u32 blocksize;
5923 struct btrfs_key key;
5924 struct extent_buffer *next;
5925 int level = wc->level;
5926 int reada = 0;
5927 int ret = 0;
5928
5929 generation = btrfs_node_ptr_generation(path->nodes[level],
5930 path->slots[level]);
5931 /*
5932 * if the lower level block was created before the snapshot
5933 * was created, we know there is no need to update back refs
5934 * for the subtree
5935 */
5936 if (wc->stage == UPDATE_BACKREF &&
94fcca9f
YZ
5937 generation <= root->root_key.offset) {
5938 *lookup_info = 1;
1c4850e2 5939 return 1;
94fcca9f 5940 }
1c4850e2
YZ
5941
5942 bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
5943 blocksize = btrfs_level_size(root, level - 1);
5944
5945 next = btrfs_find_tree_block(root, bytenr, blocksize);
5946 if (!next) {
5947 next = btrfs_find_create_tree_block(root, bytenr, blocksize);
90d2c51d
MX
5948 if (!next)
5949 return -ENOMEM;
1c4850e2
YZ
5950 reada = 1;
5951 }
5952 btrfs_tree_lock(next);
5953 btrfs_set_lock_blocking(next);
5954
94fcca9f
YZ
5955 ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize,
5956 &wc->refs[level - 1],
5957 &wc->flags[level - 1]);
5958 BUG_ON(ret);
5959 BUG_ON(wc->refs[level - 1] == 0);
5960 *lookup_info = 0;
1c4850e2 5961
94fcca9f 5962 if (wc->stage == DROP_REFERENCE) {
1c4850e2 5963 if (wc->refs[level - 1] > 1) {
94fcca9f
YZ
5964 if (level == 1 &&
5965 (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
5966 goto skip;
5967
1c4850e2
YZ
5968 if (!wc->update_ref ||
5969 generation <= root->root_key.offset)
5970 goto skip;
5971
5972 btrfs_node_key_to_cpu(path->nodes[level], &key,
5973 path->slots[level]);
5974 ret = btrfs_comp_cpu_keys(&key, &wc->update_progress);
5975 if (ret < 0)
5976 goto skip;
5977
5978 wc->stage = UPDATE_BACKREF;
5979 wc->shared_level = level - 1;
5980 }
94fcca9f
YZ
5981 } else {
5982 if (level == 1 &&
5983 (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
5984 goto skip;
1c4850e2
YZ
5985 }
5986
5987 if (!btrfs_buffer_uptodate(next, generation)) {
5988 btrfs_tree_unlock(next);
5989 free_extent_buffer(next);
5990 next = NULL;
94fcca9f 5991 *lookup_info = 1;
1c4850e2
YZ
5992 }
5993
5994 if (!next) {
5995 if (reada && level == 1)
5996 reada_walk_down(trans, root, wc, path);
5997 next = read_tree_block(root, bytenr, blocksize, generation);
5998 btrfs_tree_lock(next);
5999 btrfs_set_lock_blocking(next);
6000 }
6001
6002 level--;
6003 BUG_ON(level != btrfs_header_level(next));
6004 path->nodes[level] = next;
6005 path->slots[level] = 0;
6006 path->locks[level] = 1;
6007 wc->level = level;
6008 if (wc->level == 1)
6009 wc->reada_slot = 0;
6010 return 0;
6011skip:
6012 wc->refs[level - 1] = 0;
6013 wc->flags[level - 1] = 0;
94fcca9f
YZ
6014 if (wc->stage == DROP_REFERENCE) {
6015 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
6016 parent = path->nodes[level]->start;
6017 } else {
6018 BUG_ON(root->root_key.objectid !=
6019 btrfs_header_owner(path->nodes[level]));
6020 parent = 0;
6021 }
1c4850e2 6022
94fcca9f
YZ
6023 ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
6024 root->root_key.objectid, level - 1, 0);
6025 BUG_ON(ret);
1c4850e2 6026 }
1c4850e2
YZ
6027 btrfs_tree_unlock(next);
6028 free_extent_buffer(next);
94fcca9f 6029 *lookup_info = 1;
1c4850e2
YZ
6030 return 1;
6031}
6032
2c47e605
YZ
6033/*
6034 * hepler to process tree block while walking up the tree.
6035 *
6036 * when wc->stage == DROP_REFERENCE, this function drops
6037 * reference count on the block.
6038 *
6039 * when wc->stage == UPDATE_BACKREF, this function changes
6040 * wc->stage back to DROP_REFERENCE if we changed wc->stage
6041 * to UPDATE_BACKREF previously while processing the block.
6042 *
6043 * NOTE: return value 1 means we should stop walking up.
6044 */
6045static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
6046 struct btrfs_root *root,
6047 struct btrfs_path *path,
6048 struct walk_control *wc)
6049{
f0486c68 6050 int ret;
2c47e605
YZ
6051 int level = wc->level;
6052 struct extent_buffer *eb = path->nodes[level];
6053 u64 parent = 0;
6054
6055 if (wc->stage == UPDATE_BACKREF) {
6056 BUG_ON(wc->shared_level < level);
6057 if (level < wc->shared_level)
6058 goto out;
6059
2c47e605
YZ
6060 ret = find_next_key(path, level + 1, &wc->update_progress);
6061 if (ret > 0)
6062 wc->update_ref = 0;
6063
6064 wc->stage = DROP_REFERENCE;
6065 wc->shared_level = -1;
6066 path->slots[level] = 0;
6067
6068 /*
6069 * check reference count again if the block isn't locked.
6070 * we should start walking down the tree again if reference
6071 * count is one.
6072 */
6073 if (!path->locks[level]) {
6074 BUG_ON(level == 0);
6075 btrfs_tree_lock(eb);
6076 btrfs_set_lock_blocking(eb);
6077 path->locks[level] = 1;
6078
6079 ret = btrfs_lookup_extent_info(trans, root,
6080 eb->start, eb->len,
6081 &wc->refs[level],
6082 &wc->flags[level]);
f82d02d9 6083 BUG_ON(ret);
2c47e605
YZ
6084 BUG_ON(wc->refs[level] == 0);
6085 if (wc->refs[level] == 1) {
6086 btrfs_tree_unlock(eb);
6087 path->locks[level] = 0;
6088 return 1;
6089 }
f82d02d9 6090 }
2c47e605 6091 }
f82d02d9 6092
2c47e605
YZ
6093 /* wc->stage == DROP_REFERENCE */
6094 BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
5d4f98a2 6095
2c47e605
YZ
6096 if (wc->refs[level] == 1) {
6097 if (level == 0) {
6098 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
6099 ret = btrfs_dec_ref(trans, root, eb, 1);
6100 else
6101 ret = btrfs_dec_ref(trans, root, eb, 0);
6102 BUG_ON(ret);
6103 }
6104 /* make block locked assertion in clean_tree_block happy */
6105 if (!path->locks[level] &&
6106 btrfs_header_generation(eb) == trans->transid) {
6107 btrfs_tree_lock(eb);
6108 btrfs_set_lock_blocking(eb);
6109 path->locks[level] = 1;
6110 }
6111 clean_tree_block(trans, root, eb);
6112 }
6113
6114 if (eb == root->node) {
6115 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
6116 parent = eb->start;
6117 else
6118 BUG_ON(root->root_key.objectid !=
6119 btrfs_header_owner(eb));
6120 } else {
6121 if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
6122 parent = path->nodes[level + 1]->start;
6123 else
6124 BUG_ON(root->root_key.objectid !=
6125 btrfs_header_owner(path->nodes[level + 1]));
f82d02d9 6126 }
f82d02d9 6127
f0486c68 6128 btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1);
2c47e605
YZ
6129out:
6130 wc->refs[level] = 0;
6131 wc->flags[level] = 0;
f0486c68 6132 return 0;
2c47e605
YZ
6133}
6134
6135static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
6136 struct btrfs_root *root,
6137 struct btrfs_path *path,
6138 struct walk_control *wc)
6139{
2c47e605 6140 int level = wc->level;
94fcca9f 6141 int lookup_info = 1;
2c47e605
YZ
6142 int ret;
6143
6144 while (level >= 0) {
94fcca9f 6145 ret = walk_down_proc(trans, root, path, wc, lookup_info);
2c47e605
YZ
6146 if (ret > 0)
6147 break;
6148
6149 if (level == 0)
6150 break;
6151
7a7965f8
YZ
6152 if (path->slots[level] >=
6153 btrfs_header_nritems(path->nodes[level]))
6154 break;
6155
94fcca9f 6156 ret = do_walk_down(trans, root, path, wc, &lookup_info);
1c4850e2
YZ
6157 if (ret > 0) {
6158 path->slots[level]++;
6159 continue;
90d2c51d
MX
6160 } else if (ret < 0)
6161 return ret;
1c4850e2 6162 level = wc->level;
f82d02d9 6163 }
f82d02d9
YZ
6164 return 0;
6165}
6166
d397712b 6167static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
98ed5174 6168 struct btrfs_root *root,
f82d02d9 6169 struct btrfs_path *path,
2c47e605 6170 struct walk_control *wc, int max_level)
20524f02 6171{
2c47e605 6172 int level = wc->level;
20524f02 6173 int ret;
9f3a7427 6174
2c47e605
YZ
6175 path->slots[level] = btrfs_header_nritems(path->nodes[level]);
6176 while (level < max_level && path->nodes[level]) {
6177 wc->level = level;
6178 if (path->slots[level] + 1 <
6179 btrfs_header_nritems(path->nodes[level])) {
6180 path->slots[level]++;
20524f02
CM
6181 return 0;
6182 } else {
2c47e605
YZ
6183 ret = walk_up_proc(trans, root, path, wc);
6184 if (ret > 0)
6185 return 0;
bd56b302 6186
2c47e605
YZ
6187 if (path->locks[level]) {
6188 btrfs_tree_unlock(path->nodes[level]);
6189 path->locks[level] = 0;
f82d02d9 6190 }
2c47e605
YZ
6191 free_extent_buffer(path->nodes[level]);
6192 path->nodes[level] = NULL;
6193 level++;
20524f02
CM
6194 }
6195 }
6196 return 1;
6197}
6198
9aca1d51 6199/*
2c47e605
YZ
6200 * drop a subvolume tree.
6201 *
6202 * this function traverses the tree freeing any blocks that only
6203 * referenced by the tree.
6204 *
6205 * when a shared tree block is found. this function decreases its
6206 * reference count by one. if update_ref is true, this function
6207 * also make sure backrefs for the shared block and all lower level
6208 * blocks are properly updated.
9aca1d51 6209 */
3fd0a558
YZ
6210int btrfs_drop_snapshot(struct btrfs_root *root,
6211 struct btrfs_block_rsv *block_rsv, int update_ref)
20524f02 6212{
5caf2a00 6213 struct btrfs_path *path;
2c47e605
YZ
6214 struct btrfs_trans_handle *trans;
6215 struct btrfs_root *tree_root = root->fs_info->tree_root;
9f3a7427 6216 struct btrfs_root_item *root_item = &root->root_item;
2c47e605
YZ
6217 struct walk_control *wc;
6218 struct btrfs_key key;
6219 int err = 0;
6220 int ret;
6221 int level;
20524f02 6222
5caf2a00
CM
6223 path = btrfs_alloc_path();
6224 BUG_ON(!path);
20524f02 6225
2c47e605
YZ
6226 wc = kzalloc(sizeof(*wc), GFP_NOFS);
6227 BUG_ON(!wc);
6228
a22285a6 6229 trans = btrfs_start_transaction(tree_root, 0);
3fd0a558
YZ
6230 if (block_rsv)
6231 trans->block_rsv = block_rsv;
2c47e605 6232
9f3a7427 6233 if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
2c47e605 6234 level = btrfs_header_level(root->node);
5d4f98a2
YZ
6235 path->nodes[level] = btrfs_lock_root_node(root);
6236 btrfs_set_lock_blocking(path->nodes[level]);
9f3a7427 6237 path->slots[level] = 0;
5d4f98a2 6238 path->locks[level] = 1;
2c47e605
YZ
6239 memset(&wc->update_progress, 0,
6240 sizeof(wc->update_progress));
9f3a7427 6241 } else {
9f3a7427 6242 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
2c47e605
YZ
6243 memcpy(&wc->update_progress, &key,
6244 sizeof(wc->update_progress));
6245
6702ed49 6246 level = root_item->drop_level;
2c47e605 6247 BUG_ON(level == 0);
6702ed49 6248 path->lowest_level = level;
2c47e605
YZ
6249 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
6250 path->lowest_level = 0;
6251 if (ret < 0) {
6252 err = ret;
9f3a7427
CM
6253 goto out;
6254 }
1c4850e2 6255 WARN_ON(ret > 0);
2c47e605 6256
7d9eb12c
CM
6257 /*
6258 * unlock our path, this is safe because only this
6259 * function is allowed to delete this snapshot
6260 */
5d4f98a2 6261 btrfs_unlock_up_safe(path, 0);
2c47e605
YZ
6262
6263 level = btrfs_header_level(root->node);
6264 while (1) {
6265 btrfs_tree_lock(path->nodes[level]);
6266 btrfs_set_lock_blocking(path->nodes[level]);
6267
6268 ret = btrfs_lookup_extent_info(trans, root,
6269 path->nodes[level]->start,
6270 path->nodes[level]->len,
6271 &wc->refs[level],
6272 &wc->flags[level]);
6273 BUG_ON(ret);
6274 BUG_ON(wc->refs[level] == 0);
6275
6276 if (level == root_item->drop_level)
6277 break;
6278
6279 btrfs_tree_unlock(path->nodes[level]);
6280 WARN_ON(wc->refs[level] != 1);
6281 level--;
6282 }
9f3a7427 6283 }
2c47e605
YZ
6284
6285 wc->level = level;
6286 wc->shared_level = -1;
6287 wc->stage = DROP_REFERENCE;
6288 wc->update_ref = update_ref;
6289 wc->keep_locks = 0;
1c4850e2 6290 wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
2c47e605 6291
d397712b 6292 while (1) {
2c47e605
YZ
6293 ret = walk_down_tree(trans, root, path, wc);
6294 if (ret < 0) {
6295 err = ret;
20524f02 6296 break;
2c47e605 6297 }
9aca1d51 6298
2c47e605
YZ
6299 ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
6300 if (ret < 0) {
6301 err = ret;
20524f02 6302 break;
2c47e605
YZ
6303 }
6304
6305 if (ret > 0) {
6306 BUG_ON(wc->stage != DROP_REFERENCE);
e7a84565
CM
6307 break;
6308 }
2c47e605
YZ
6309
6310 if (wc->stage == DROP_REFERENCE) {
6311 level = wc->level;
6312 btrfs_node_key(path->nodes[level],
6313 &root_item->drop_progress,
6314 path->slots[level]);
6315 root_item->drop_level = level;
6316 }
6317
6318 BUG_ON(wc->level == 0);
3fd0a558 6319 if (btrfs_should_end_transaction(trans, tree_root)) {
2c47e605
YZ
6320 ret = btrfs_update_root(trans, tree_root,
6321 &root->root_key,
6322 root_item);
6323 BUG_ON(ret);
6324
3fd0a558 6325 btrfs_end_transaction_throttle(trans, tree_root);
a22285a6 6326 trans = btrfs_start_transaction(tree_root, 0);
3fd0a558
YZ
6327 if (block_rsv)
6328 trans->block_rsv = block_rsv;
c3e69d58 6329 }
20524f02 6330 }
2c47e605
YZ
6331 btrfs_release_path(root, path);
6332 BUG_ON(err);
6333
6334 ret = btrfs_del_root(trans, tree_root, &root->root_key);
6335 BUG_ON(ret);
6336
76dda93c
YZ
6337 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
6338 ret = btrfs_find_last_root(tree_root, root->root_key.objectid,
6339 NULL, NULL);
6340 BUG_ON(ret < 0);
6341 if (ret > 0) {
84cd948c
JB
6342 /* if we fail to delete the orphan item this time
6343 * around, it'll get picked up the next time.
6344 *
6345 * The most common failure here is just -ENOENT.
6346 */
6347 btrfs_del_orphan_item(trans, tree_root,
6348 root->root_key.objectid);
76dda93c
YZ
6349 }
6350 }
6351
6352 if (root->in_radix) {
6353 btrfs_free_fs_root(tree_root->fs_info, root);
6354 } else {
6355 free_extent_buffer(root->node);
6356 free_extent_buffer(root->commit_root);
6357 kfree(root);
6358 }
9f3a7427 6359out:
3fd0a558 6360 btrfs_end_transaction_throttle(trans, tree_root);
2c47e605 6361 kfree(wc);
5caf2a00 6362 btrfs_free_path(path);
2c47e605 6363 return err;
20524f02 6364}
9078a3e1 6365
2c47e605
YZ
6366/*
6367 * drop subtree rooted at tree block 'node'.
6368 *
6369 * NOTE: this function will unlock and release tree block 'node'
6370 */
f82d02d9
YZ
6371int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
6372 struct btrfs_root *root,
6373 struct extent_buffer *node,
6374 struct extent_buffer *parent)
6375{
6376 struct btrfs_path *path;
2c47e605 6377 struct walk_control *wc;
f82d02d9
YZ
6378 int level;
6379 int parent_level;
6380 int ret = 0;
6381 int wret;
6382
2c47e605
YZ
6383 BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
6384
f82d02d9
YZ
6385 path = btrfs_alloc_path();
6386 BUG_ON(!path);
6387
2c47e605
YZ
6388 wc = kzalloc(sizeof(*wc), GFP_NOFS);
6389 BUG_ON(!wc);
6390
b9447ef8 6391 btrfs_assert_tree_locked(parent);
f82d02d9
YZ
6392 parent_level = btrfs_header_level(parent);
6393 extent_buffer_get(parent);
6394 path->nodes[parent_level] = parent;
6395 path->slots[parent_level] = btrfs_header_nritems(parent);
6396
b9447ef8 6397 btrfs_assert_tree_locked(node);
f82d02d9 6398 level = btrfs_header_level(node);
f82d02d9
YZ
6399 path->nodes[level] = node;
6400 path->slots[level] = 0;
2c47e605
YZ
6401 path->locks[level] = 1;
6402
6403 wc->refs[parent_level] = 1;
6404 wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
6405 wc->level = level;
6406 wc->shared_level = -1;
6407 wc->stage = DROP_REFERENCE;
6408 wc->update_ref = 0;
6409 wc->keep_locks = 1;
1c4850e2 6410 wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
f82d02d9
YZ
6411
6412 while (1) {
2c47e605
YZ
6413 wret = walk_down_tree(trans, root, path, wc);
6414 if (wret < 0) {
f82d02d9 6415 ret = wret;
f82d02d9 6416 break;
2c47e605 6417 }
f82d02d9 6418
2c47e605 6419 wret = walk_up_tree(trans, root, path, wc, parent_level);
f82d02d9
YZ
6420 if (wret < 0)
6421 ret = wret;
6422 if (wret != 0)
6423 break;
6424 }
6425
2c47e605 6426 kfree(wc);
f82d02d9
YZ
6427 btrfs_free_path(path);
6428 return ret;
6429}
6430
5d4f98a2 6431#if 0
8e7bf94f
CM
6432static unsigned long calc_ra(unsigned long start, unsigned long last,
6433 unsigned long nr)
6434{
6435 return min(last, start + nr - 1);
6436}
6437
d397712b 6438static noinline int relocate_inode_pages(struct inode *inode, u64 start,
98ed5174 6439 u64 len)
edbd8d4e
CM
6440{
6441 u64 page_start;
6442 u64 page_end;
1a40e23b 6443 unsigned long first_index;
edbd8d4e 6444 unsigned long last_index;
edbd8d4e
CM
6445 unsigned long i;
6446 struct page *page;
d1310b2e 6447 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
4313b399 6448 struct file_ra_state *ra;
3eaa2885 6449 struct btrfs_ordered_extent *ordered;
1a40e23b
ZY
6450 unsigned int total_read = 0;
6451 unsigned int total_dirty = 0;
6452 int ret = 0;
4313b399
CM
6453
6454 ra = kzalloc(sizeof(*ra), GFP_NOFS);
edbd8d4e
CM
6455
6456 mutex_lock(&inode->i_mutex);
1a40e23b 6457 first_index = start >> PAGE_CACHE_SHIFT;
edbd8d4e
CM
6458 last_index = (start + len - 1) >> PAGE_CACHE_SHIFT;
6459
1a40e23b
ZY
6460 /* make sure the dirty trick played by the caller work */
6461 ret = invalidate_inode_pages2_range(inode->i_mapping,
6462 first_index, last_index);
6463 if (ret)
6464 goto out_unlock;
8e7bf94f 6465
4313b399 6466 file_ra_state_init(ra, inode->i_mapping);
edbd8d4e 6467
1a40e23b
ZY
6468 for (i = first_index ; i <= last_index; i++) {
6469 if (total_read % ra->ra_pages == 0) {
8e7bf94f 6470 btrfs_force_ra(inode->i_mapping, ra, NULL, i,
1a40e23b 6471 calc_ra(i, last_index, ra->ra_pages));
8e7bf94f
CM
6472 }
6473 total_read++;
3eaa2885
CM
6474again:
6475 if (((u64)i << PAGE_CACHE_SHIFT) > i_size_read(inode))
1a40e23b 6476 BUG_ON(1);
edbd8d4e 6477 page = grab_cache_page(inode->i_mapping, i);
a061fc8d 6478 if (!page) {
1a40e23b 6479 ret = -ENOMEM;
edbd8d4e 6480 goto out_unlock;
a061fc8d 6481 }
edbd8d4e
CM
6482 if (!PageUptodate(page)) {
6483 btrfs_readpage(NULL, page);
6484 lock_page(page);
6485 if (!PageUptodate(page)) {
6486 unlock_page(page);
6487 page_cache_release(page);
1a40e23b 6488 ret = -EIO;
edbd8d4e
CM
6489 goto out_unlock;
6490 }
6491 }
ec44a35c 6492 wait_on_page_writeback(page);
3eaa2885 6493
edbd8d4e
CM
6494 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
6495 page_end = page_start + PAGE_CACHE_SIZE - 1;
d1310b2e 6496 lock_extent(io_tree, page_start, page_end, GFP_NOFS);
edbd8d4e 6497
3eaa2885
CM
6498 ordered = btrfs_lookup_ordered_extent(inode, page_start);
6499 if (ordered) {
6500 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
6501 unlock_page(page);
6502 page_cache_release(page);
6503 btrfs_start_ordered_extent(inode, ordered, 1);
6504 btrfs_put_ordered_extent(ordered);
6505 goto again;
6506 }
6507 set_page_extent_mapped(page);
6508
1a40e23b
ZY
6509 if (i == first_index)
6510 set_extent_bits(io_tree, page_start, page_end,
6511 EXTENT_BOUNDARY, GFP_NOFS);
1f80e4db 6512 btrfs_set_extent_delalloc(inode, page_start, page_end);
1a40e23b 6513
a061fc8d 6514 set_page_dirty(page);
1a40e23b 6515 total_dirty++;
edbd8d4e 6516
d1310b2e 6517 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
edbd8d4e
CM
6518 unlock_page(page);
6519 page_cache_release(page);
6520 }
6521
6522out_unlock:
ec44a35c 6523 kfree(ra);
edbd8d4e 6524 mutex_unlock(&inode->i_mutex);
1a40e23b
ZY
6525 balance_dirty_pages_ratelimited_nr(inode->i_mapping, total_dirty);
6526 return ret;
edbd8d4e
CM
6527}
6528
d397712b 6529static noinline int relocate_data_extent(struct inode *reloc_inode,
1a40e23b
ZY
6530 struct btrfs_key *extent_key,
6531 u64 offset)
6532{
6533 struct btrfs_root *root = BTRFS_I(reloc_inode)->root;
6534 struct extent_map_tree *em_tree = &BTRFS_I(reloc_inode)->extent_tree;
6535 struct extent_map *em;
6643558d
YZ
6536 u64 start = extent_key->objectid - offset;
6537 u64 end = start + extent_key->offset - 1;
bf4ef679 6538
1a40e23b
ZY
6539 em = alloc_extent_map(GFP_NOFS);
6540 BUG_ON(!em || IS_ERR(em));
bf4ef679 6541
6643558d 6542 em->start = start;
1a40e23b 6543 em->len = extent_key->offset;
c8b97818 6544 em->block_len = extent_key->offset;
1a40e23b
ZY
6545 em->block_start = extent_key->objectid;
6546 em->bdev = root->fs_info->fs_devices->latest_bdev;
6547 set_bit(EXTENT_FLAG_PINNED, &em->flags);
6548
6549 /* setup extent map to cheat btrfs_readpage */
6643558d 6550 lock_extent(&BTRFS_I(reloc_inode)->io_tree, start, end, GFP_NOFS);
1a40e23b
ZY
6551 while (1) {
6552 int ret;
890871be 6553 write_lock(&em_tree->lock);
1a40e23b 6554 ret = add_extent_mapping(em_tree, em);
890871be 6555 write_unlock(&em_tree->lock);
1a40e23b
ZY
6556 if (ret != -EEXIST) {
6557 free_extent_map(em);
bf4ef679
CM
6558 break;
6559 }
6643558d 6560 btrfs_drop_extent_cache(reloc_inode, start, end, 0);
bf4ef679 6561 }
6643558d 6562 unlock_extent(&BTRFS_I(reloc_inode)->io_tree, start, end, GFP_NOFS);
bf4ef679 6563
6643558d 6564 return relocate_inode_pages(reloc_inode, start, extent_key->offset);
1a40e23b 6565}
edbd8d4e 6566
1a40e23b
ZY
6567struct btrfs_ref_path {
6568 u64 extent_start;
6569 u64 nodes[BTRFS_MAX_LEVEL];
6570 u64 root_objectid;
6571 u64 root_generation;
6572 u64 owner_objectid;
1a40e23b
ZY
6573 u32 num_refs;
6574 int lowest_level;
6575 int current_level;
f82d02d9
YZ
6576 int shared_level;
6577
6578 struct btrfs_key node_keys[BTRFS_MAX_LEVEL];
6579 u64 new_nodes[BTRFS_MAX_LEVEL];
1a40e23b 6580};
7d9eb12c 6581
1a40e23b 6582struct disk_extent {
c8b97818 6583 u64 ram_bytes;
1a40e23b
ZY
6584 u64 disk_bytenr;
6585 u64 disk_num_bytes;
6586 u64 offset;
6587 u64 num_bytes;
c8b97818
CM
6588 u8 compression;
6589 u8 encryption;
6590 u16 other_encoding;
1a40e23b 6591};
4313b399 6592
1a40e23b
ZY
6593static int is_cowonly_root(u64 root_objectid)
6594{
6595 if (root_objectid == BTRFS_ROOT_TREE_OBJECTID ||
6596 root_objectid == BTRFS_EXTENT_TREE_OBJECTID ||
6597 root_objectid == BTRFS_CHUNK_TREE_OBJECTID ||
6598 root_objectid == BTRFS_DEV_TREE_OBJECTID ||
0403e47e
YZ
6599 root_objectid == BTRFS_TREE_LOG_OBJECTID ||
6600 root_objectid == BTRFS_CSUM_TREE_OBJECTID)
1a40e23b
ZY
6601 return 1;
6602 return 0;
6603}
edbd8d4e 6604
d397712b 6605static noinline int __next_ref_path(struct btrfs_trans_handle *trans,
1a40e23b
ZY
6606 struct btrfs_root *extent_root,
6607 struct btrfs_ref_path *ref_path,
6608 int first_time)
6609{
6610 struct extent_buffer *leaf;
6611 struct btrfs_path *path;
6612 struct btrfs_extent_ref *ref;
6613 struct btrfs_key key;
6614 struct btrfs_key found_key;
6615 u64 bytenr;
6616 u32 nritems;
6617 int level;
6618 int ret = 1;
edbd8d4e 6619
1a40e23b
ZY
6620 path = btrfs_alloc_path();
6621 if (!path)
6622 return -ENOMEM;
bf4ef679 6623
1a40e23b
ZY
6624 if (first_time) {
6625 ref_path->lowest_level = -1;
6626 ref_path->current_level = -1;
f82d02d9 6627 ref_path->shared_level = -1;
1a40e23b
ZY
6628 goto walk_up;
6629 }
6630walk_down:
6631 level = ref_path->current_level - 1;
6632 while (level >= -1) {
6633 u64 parent;
6634 if (level < ref_path->lowest_level)
6635 break;
bf4ef679 6636
d397712b 6637 if (level >= 0)
1a40e23b 6638 bytenr = ref_path->nodes[level];
d397712b 6639 else
1a40e23b 6640 bytenr = ref_path->extent_start;
1a40e23b 6641 BUG_ON(bytenr == 0);
bf4ef679 6642
1a40e23b
ZY
6643 parent = ref_path->nodes[level + 1];
6644 ref_path->nodes[level + 1] = 0;
6645 ref_path->current_level = level;
6646 BUG_ON(parent == 0);
0ef3e66b 6647
1a40e23b
ZY
6648 key.objectid = bytenr;
6649 key.offset = parent + 1;
6650 key.type = BTRFS_EXTENT_REF_KEY;
edbd8d4e 6651
1a40e23b
ZY
6652 ret = btrfs_search_slot(trans, extent_root, &key, path, 0, 0);
6653 if (ret < 0)
edbd8d4e 6654 goto out;
1a40e23b 6655 BUG_ON(ret == 0);
7d9eb12c 6656
1a40e23b
ZY
6657 leaf = path->nodes[0];
6658 nritems = btrfs_header_nritems(leaf);
6659 if (path->slots[0] >= nritems) {
6660 ret = btrfs_next_leaf(extent_root, path);
6661 if (ret < 0)
6662 goto out;
6663 if (ret > 0)
6664 goto next;
6665 leaf = path->nodes[0];
6666 }
0ef3e66b 6667
1a40e23b
ZY
6668 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6669 if (found_key.objectid == bytenr &&
f82d02d9
YZ
6670 found_key.type == BTRFS_EXTENT_REF_KEY) {
6671 if (level < ref_path->shared_level)
6672 ref_path->shared_level = level;
1a40e23b 6673 goto found;
f82d02d9 6674 }
1a40e23b
ZY
6675next:
6676 level--;
6677 btrfs_release_path(extent_root, path);
d899e052 6678 cond_resched();
1a40e23b
ZY
6679 }
6680 /* reached lowest level */
6681 ret = 1;
6682 goto out;
6683walk_up:
6684 level = ref_path->current_level;
6685 while (level < BTRFS_MAX_LEVEL - 1) {
6686 u64 ref_objectid;
d397712b
CM
6687
6688 if (level >= 0)
1a40e23b 6689 bytenr = ref_path->nodes[level];
d397712b 6690 else
1a40e23b 6691 bytenr = ref_path->extent_start;
d397712b 6692
1a40e23b 6693 BUG_ON(bytenr == 0);
edbd8d4e 6694
1a40e23b
ZY
6695 key.objectid = bytenr;
6696 key.offset = 0;
6697 key.type = BTRFS_EXTENT_REF_KEY;
edbd8d4e 6698
1a40e23b
ZY
6699 ret = btrfs_search_slot(trans, extent_root, &key, path, 0, 0);
6700 if (ret < 0)
6701 goto out;
edbd8d4e 6702
1a40e23b
ZY
6703 leaf = path->nodes[0];
6704 nritems = btrfs_header_nritems(leaf);
6705 if (path->slots[0] >= nritems) {
6706 ret = btrfs_next_leaf(extent_root, path);
6707 if (ret < 0)
6708 goto out;
6709 if (ret > 0) {
6710 /* the extent was freed by someone */
6711 if (ref_path->lowest_level == level)
6712 goto out;
6713 btrfs_release_path(extent_root, path);
6714 goto walk_down;
6715 }
6716 leaf = path->nodes[0];
6717 }
edbd8d4e 6718
1a40e23b
ZY
6719 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6720 if (found_key.objectid != bytenr ||
6721 found_key.type != BTRFS_EXTENT_REF_KEY) {
6722 /* the extent was freed by someone */
6723 if (ref_path->lowest_level == level) {
6724 ret = 1;
6725 goto out;
6726 }
6727 btrfs_release_path(extent_root, path);
6728 goto walk_down;
6729 }
6730found:
6731 ref = btrfs_item_ptr(leaf, path->slots[0],
6732 struct btrfs_extent_ref);
6733 ref_objectid = btrfs_ref_objectid(leaf, ref);
6734 if (ref_objectid < BTRFS_FIRST_FREE_OBJECTID) {
6735 if (first_time) {
6736 level = (int)ref_objectid;
6737 BUG_ON(level >= BTRFS_MAX_LEVEL);
6738 ref_path->lowest_level = level;
6739 ref_path->current_level = level;
6740 ref_path->nodes[level] = bytenr;
6741 } else {
6742 WARN_ON(ref_objectid != level);
6743 }
6744 } else {
6745 WARN_ON(level != -1);
6746 }
6747 first_time = 0;
bf4ef679 6748
1a40e23b
ZY
6749 if (ref_path->lowest_level == level) {
6750 ref_path->owner_objectid = ref_objectid;
1a40e23b
ZY
6751 ref_path->num_refs = btrfs_ref_num_refs(leaf, ref);
6752 }
bf4ef679 6753
7d9eb12c 6754 /*
1a40e23b
ZY
6755 * the block is tree root or the block isn't in reference
6756 * counted tree.
7d9eb12c 6757 */
1a40e23b
ZY
6758 if (found_key.objectid == found_key.offset ||
6759 is_cowonly_root(btrfs_ref_root(leaf, ref))) {
6760 ref_path->root_objectid = btrfs_ref_root(leaf, ref);
6761 ref_path->root_generation =
6762 btrfs_ref_generation(leaf, ref);
6763 if (level < 0) {
6764 /* special reference from the tree log */
6765 ref_path->nodes[0] = found_key.offset;
6766 ref_path->current_level = 0;
6767 }
6768 ret = 0;
6769 goto out;
6770 }
7d9eb12c 6771
1a40e23b
ZY
6772 level++;
6773 BUG_ON(ref_path->nodes[level] != 0);
6774 ref_path->nodes[level] = found_key.offset;
6775 ref_path->current_level = level;
bf4ef679 6776
1a40e23b
ZY
6777 /*
6778 * the reference was created in the running transaction,
6779 * no need to continue walking up.
6780 */
6781 if (btrfs_ref_generation(leaf, ref) == trans->transid) {
6782 ref_path->root_objectid = btrfs_ref_root(leaf, ref);
6783 ref_path->root_generation =
6784 btrfs_ref_generation(leaf, ref);
6785 ret = 0;
6786 goto out;
7d9eb12c
CM
6787 }
6788
1a40e23b 6789 btrfs_release_path(extent_root, path);
d899e052 6790 cond_resched();
7d9eb12c 6791 }
1a40e23b
ZY
6792 /* reached max tree level, but no tree root found. */
6793 BUG();
edbd8d4e 6794out:
1a40e23b
ZY
6795 btrfs_free_path(path);
6796 return ret;
edbd8d4e
CM
6797}
6798
1a40e23b
ZY
6799static int btrfs_first_ref_path(struct btrfs_trans_handle *trans,
6800 struct btrfs_root *extent_root,
6801 struct btrfs_ref_path *ref_path,
6802 u64 extent_start)
a061fc8d 6803{
1a40e23b
ZY
6804 memset(ref_path, 0, sizeof(*ref_path));
6805 ref_path->extent_start = extent_start;
a061fc8d 6806
1a40e23b 6807 return __next_ref_path(trans, extent_root, ref_path, 1);
a061fc8d
CM
6808}
6809
1a40e23b
ZY
6810static int btrfs_next_ref_path(struct btrfs_trans_handle *trans,
6811 struct btrfs_root *extent_root,
6812 struct btrfs_ref_path *ref_path)
edbd8d4e 6813{
1a40e23b
ZY
6814 return __next_ref_path(trans, extent_root, ref_path, 0);
6815}
6816
d397712b 6817static noinline int get_new_locations(struct inode *reloc_inode,
1a40e23b
ZY
6818 struct btrfs_key *extent_key,
6819 u64 offset, int no_fragment,
6820 struct disk_extent **extents,
6821 int *nr_extents)
6822{
6823 struct btrfs_root *root = BTRFS_I(reloc_inode)->root;
6824 struct btrfs_path *path;
6825 struct btrfs_file_extent_item *fi;
edbd8d4e 6826 struct extent_buffer *leaf;
1a40e23b
ZY
6827 struct disk_extent *exts = *extents;
6828 struct btrfs_key found_key;
6829 u64 cur_pos;
6830 u64 last_byte;
edbd8d4e 6831 u32 nritems;
1a40e23b
ZY
6832 int nr = 0;
6833 int max = *nr_extents;
6834 int ret;
edbd8d4e 6835
1a40e23b
ZY
6836 WARN_ON(!no_fragment && *extents);
6837 if (!exts) {
6838 max = 1;
6839 exts = kmalloc(sizeof(*exts) * max, GFP_NOFS);
6840 if (!exts)
6841 return -ENOMEM;
a061fc8d 6842 }
edbd8d4e 6843
1a40e23b
ZY
6844 path = btrfs_alloc_path();
6845 BUG_ON(!path);
edbd8d4e 6846
1a40e23b
ZY
6847 cur_pos = extent_key->objectid - offset;
6848 last_byte = extent_key->objectid + extent_key->offset;
6849 ret = btrfs_lookup_file_extent(NULL, root, path, reloc_inode->i_ino,
6850 cur_pos, 0);
6851 if (ret < 0)
6852 goto out;
6853 if (ret > 0) {
6854 ret = -ENOENT;
6855 goto out;
6856 }
edbd8d4e 6857
1a40e23b 6858 while (1) {
edbd8d4e
CM
6859 leaf = path->nodes[0];
6860 nritems = btrfs_header_nritems(leaf);
1a40e23b
ZY
6861 if (path->slots[0] >= nritems) {
6862 ret = btrfs_next_leaf(root, path);
a061fc8d
CM
6863 if (ret < 0)
6864 goto out;
1a40e23b
ZY
6865 if (ret > 0)
6866 break;
bf4ef679 6867 leaf = path->nodes[0];
a061fc8d 6868 }
edbd8d4e
CM
6869
6870 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1a40e23b
ZY
6871 if (found_key.offset != cur_pos ||
6872 found_key.type != BTRFS_EXTENT_DATA_KEY ||
6873 found_key.objectid != reloc_inode->i_ino)
edbd8d4e
CM
6874 break;
6875
1a40e23b
ZY
6876 fi = btrfs_item_ptr(leaf, path->slots[0],
6877 struct btrfs_file_extent_item);
6878 if (btrfs_file_extent_type(leaf, fi) !=
6879 BTRFS_FILE_EXTENT_REG ||
6880 btrfs_file_extent_disk_bytenr(leaf, fi) == 0)
edbd8d4e 6881 break;
1a40e23b
ZY
6882
6883 if (nr == max) {
6884 struct disk_extent *old = exts;
6885 max *= 2;
6886 exts = kzalloc(sizeof(*exts) * max, GFP_NOFS);
6887 memcpy(exts, old, sizeof(*exts) * nr);
6888 if (old != *extents)
6889 kfree(old);
a061fc8d 6890 }
edbd8d4e 6891
1a40e23b
ZY
6892 exts[nr].disk_bytenr =
6893 btrfs_file_extent_disk_bytenr(leaf, fi);
6894 exts[nr].disk_num_bytes =
6895 btrfs_file_extent_disk_num_bytes(leaf, fi);
6896 exts[nr].offset = btrfs_file_extent_offset(leaf, fi);
6897 exts[nr].num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
c8b97818
CM
6898 exts[nr].ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
6899 exts[nr].compression = btrfs_file_extent_compression(leaf, fi);
6900 exts[nr].encryption = btrfs_file_extent_encryption(leaf, fi);
6901 exts[nr].other_encoding = btrfs_file_extent_other_encoding(leaf,
6902 fi);
d899e052
YZ
6903 BUG_ON(exts[nr].offset > 0);
6904 BUG_ON(exts[nr].compression || exts[nr].encryption);
6905 BUG_ON(exts[nr].num_bytes != exts[nr].disk_num_bytes);
edbd8d4e 6906
1a40e23b
ZY
6907 cur_pos += exts[nr].num_bytes;
6908 nr++;
6909
6910 if (cur_pos + offset >= last_byte)
6911 break;
6912
6913 if (no_fragment) {
6914 ret = 1;
edbd8d4e 6915 goto out;
1a40e23b
ZY
6916 }
6917 path->slots[0]++;
6918 }
6919
1f80e4db 6920 BUG_ON(cur_pos + offset > last_byte);
1a40e23b
ZY
6921 if (cur_pos + offset < last_byte) {
6922 ret = -ENOENT;
6923 goto out;
edbd8d4e
CM
6924 }
6925 ret = 0;
6926out:
1a40e23b
ZY
6927 btrfs_free_path(path);
6928 if (ret) {
6929 if (exts != *extents)
6930 kfree(exts);
6931 } else {
6932 *extents = exts;
6933 *nr_extents = nr;
6934 }
6935 return ret;
6936}
6937
d397712b 6938static noinline int replace_one_extent(struct btrfs_trans_handle *trans,
1a40e23b
ZY
6939 struct btrfs_root *root,
6940 struct btrfs_path *path,
6941 struct btrfs_key *extent_key,
6942 struct btrfs_key *leaf_key,
6943 struct btrfs_ref_path *ref_path,
6944 struct disk_extent *new_extents,
6945 int nr_extents)
6946{
6947 struct extent_buffer *leaf;
6948 struct btrfs_file_extent_item *fi;
6949 struct inode *inode = NULL;
6950 struct btrfs_key key;
6951 u64 lock_start = 0;
6952 u64 lock_end = 0;
6953 u64 num_bytes;
6954 u64 ext_offset;
86288a19 6955 u64 search_end = (u64)-1;
1a40e23b 6956 u32 nritems;
3bb1a1bc 6957 int nr_scaned = 0;
1a40e23b 6958 int extent_locked = 0;
d899e052 6959 int extent_type;
1a40e23b
ZY
6960 int ret;
6961
3bb1a1bc 6962 memcpy(&key, leaf_key, sizeof(key));
1a40e23b 6963 if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS) {
3bb1a1bc
YZ
6964 if (key.objectid < ref_path->owner_objectid ||
6965 (key.objectid == ref_path->owner_objectid &&
6966 key.type < BTRFS_EXTENT_DATA_KEY)) {
6967 key.objectid = ref_path->owner_objectid;
6968 key.type = BTRFS_EXTENT_DATA_KEY;
6969 key.offset = 0;
6970 }
1a40e23b
ZY
6971 }
6972
6973 while (1) {
6974 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
6975 if (ret < 0)
6976 goto out;
6977
6978 leaf = path->nodes[0];
6979 nritems = btrfs_header_nritems(leaf);
6980next:
6981 if (extent_locked && ret > 0) {
6982 /*
6983 * the file extent item was modified by someone
6984 * before the extent got locked.
6985 */
1a40e23b
ZY
6986 unlock_extent(&BTRFS_I(inode)->io_tree, lock_start,
6987 lock_end, GFP_NOFS);
6988 extent_locked = 0;
6989 }
6990
6991 if (path->slots[0] >= nritems) {
3bb1a1bc 6992 if (++nr_scaned > 2)
1a40e23b
ZY
6993 break;
6994
6995 BUG_ON(extent_locked);
6996 ret = btrfs_next_leaf(root, path);
6997 if (ret < 0)
6998 goto out;
6999 if (ret > 0)
7000 break;
7001 leaf = path->nodes[0];
7002 nritems = btrfs_header_nritems(leaf);
7003 }
7004
7005 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
7006
7007 if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS) {
7008 if ((key.objectid > ref_path->owner_objectid) ||
7009 (key.objectid == ref_path->owner_objectid &&
7010 key.type > BTRFS_EXTENT_DATA_KEY) ||
86288a19 7011 key.offset >= search_end)
1a40e23b
ZY
7012 break;
7013 }
7014
7015 if (inode && key.objectid != inode->i_ino) {
7016 BUG_ON(extent_locked);
7017 btrfs_release_path(root, path);
7018 mutex_unlock(&inode->i_mutex);
7019 iput(inode);
7020 inode = NULL;
7021 continue;
7022 }
7023
7024 if (key.type != BTRFS_EXTENT_DATA_KEY) {
7025 path->slots[0]++;
7026 ret = 1;
7027 goto next;
7028 }
7029 fi = btrfs_item_ptr(leaf, path->slots[0],
7030 struct btrfs_file_extent_item);
d899e052
YZ
7031 extent_type = btrfs_file_extent_type(leaf, fi);
7032 if ((extent_type != BTRFS_FILE_EXTENT_REG &&
7033 extent_type != BTRFS_FILE_EXTENT_PREALLOC) ||
1a40e23b
ZY
7034 (btrfs_file_extent_disk_bytenr(leaf, fi) !=
7035 extent_key->objectid)) {
7036 path->slots[0]++;
7037 ret = 1;
7038 goto next;
7039 }
7040
7041 num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
7042 ext_offset = btrfs_file_extent_offset(leaf, fi);
7043
86288a19
YZ
7044 if (search_end == (u64)-1) {
7045 search_end = key.offset - ext_offset +
7046 btrfs_file_extent_ram_bytes(leaf, fi);
7047 }
1a40e23b
ZY
7048
7049 if (!extent_locked) {
7050 lock_start = key.offset;
7051 lock_end = lock_start + num_bytes - 1;
7052 } else {
6643558d
YZ
7053 if (lock_start > key.offset ||
7054 lock_end + 1 < key.offset + num_bytes) {
7055 unlock_extent(&BTRFS_I(inode)->io_tree,
7056 lock_start, lock_end, GFP_NOFS);
7057 extent_locked = 0;
7058 }
1a40e23b
ZY
7059 }
7060
7061 if (!inode) {
7062 btrfs_release_path(root, path);
7063
7064 inode = btrfs_iget_locked(root->fs_info->sb,
7065 key.objectid, root);
7066 if (inode->i_state & I_NEW) {
7067 BTRFS_I(inode)->root = root;
7068 BTRFS_I(inode)->location.objectid =
7069 key.objectid;
7070 BTRFS_I(inode)->location.type =
7071 BTRFS_INODE_ITEM_KEY;
7072 BTRFS_I(inode)->location.offset = 0;
7073 btrfs_read_locked_inode(inode);
7074 unlock_new_inode(inode);
7075 }
7076 /*
7077 * some code call btrfs_commit_transaction while
7078 * holding the i_mutex, so we can't use mutex_lock
7079 * here.
7080 */
7081 if (is_bad_inode(inode) ||
7082 !mutex_trylock(&inode->i_mutex)) {
7083 iput(inode);
7084 inode = NULL;
7085 key.offset = (u64)-1;
7086 goto skip;
7087 }
7088 }
7089
7090 if (!extent_locked) {
7091 struct btrfs_ordered_extent *ordered;
7092
7093 btrfs_release_path(root, path);
7094
7095 lock_extent(&BTRFS_I(inode)->io_tree, lock_start,
7096 lock_end, GFP_NOFS);
7097 ordered = btrfs_lookup_first_ordered_extent(inode,
7098 lock_end);
7099 if (ordered &&
7100 ordered->file_offset <= lock_end &&
7101 ordered->file_offset + ordered->len > lock_start) {
7102 unlock_extent(&BTRFS_I(inode)->io_tree,
7103 lock_start, lock_end, GFP_NOFS);
7104 btrfs_start_ordered_extent(inode, ordered, 1);
7105 btrfs_put_ordered_extent(ordered);
7106 key.offset += num_bytes;
7107 goto skip;
7108 }
7109 if (ordered)
7110 btrfs_put_ordered_extent(ordered);
7111
1a40e23b
ZY
7112 extent_locked = 1;
7113 continue;
7114 }
7115
7116 if (nr_extents == 1) {
7117 /* update extent pointer in place */
1a40e23b
ZY
7118 btrfs_set_file_extent_disk_bytenr(leaf, fi,
7119 new_extents[0].disk_bytenr);
7120 btrfs_set_file_extent_disk_num_bytes(leaf, fi,
7121 new_extents[0].disk_num_bytes);
1a40e23b
ZY
7122 btrfs_mark_buffer_dirty(leaf);
7123
7124 btrfs_drop_extent_cache(inode, key.offset,
7125 key.offset + num_bytes - 1, 0);
7126
7127 ret = btrfs_inc_extent_ref(trans, root,
7128 new_extents[0].disk_bytenr,
7129 new_extents[0].disk_num_bytes,
7130 leaf->start,
7131 root->root_key.objectid,
7132 trans->transid,
3bb1a1bc 7133 key.objectid);
1a40e23b
ZY
7134 BUG_ON(ret);
7135
7136 ret = btrfs_free_extent(trans, root,
7137 extent_key->objectid,
7138 extent_key->offset,
7139 leaf->start,
7140 btrfs_header_owner(leaf),
7141 btrfs_header_generation(leaf),
3bb1a1bc 7142 key.objectid, 0);
1a40e23b
ZY
7143 BUG_ON(ret);
7144
7145 btrfs_release_path(root, path);
7146 key.offset += num_bytes;
7147 } else {
d899e052
YZ
7148 BUG_ON(1);
7149#if 0
1a40e23b
ZY
7150 u64 alloc_hint;
7151 u64 extent_len;
7152 int i;
7153 /*
7154 * drop old extent pointer at first, then insert the
7155 * new pointers one bye one
7156 */
7157 btrfs_release_path(root, path);
7158 ret = btrfs_drop_extents(trans, root, inode, key.offset,
7159 key.offset + num_bytes,
7160 key.offset, &alloc_hint);
7161 BUG_ON(ret);
7162
7163 for (i = 0; i < nr_extents; i++) {
7164 if (ext_offset >= new_extents[i].num_bytes) {
7165 ext_offset -= new_extents[i].num_bytes;
7166 continue;
7167 }
7168 extent_len = min(new_extents[i].num_bytes -
7169 ext_offset, num_bytes);
7170
7171 ret = btrfs_insert_empty_item(trans, root,
7172 path, &key,
7173 sizeof(*fi));
7174 BUG_ON(ret);
7175
7176 leaf = path->nodes[0];
7177 fi = btrfs_item_ptr(leaf, path->slots[0],
7178 struct btrfs_file_extent_item);
7179 btrfs_set_file_extent_generation(leaf, fi,
7180 trans->transid);
7181 btrfs_set_file_extent_type(leaf, fi,
7182 BTRFS_FILE_EXTENT_REG);
7183 btrfs_set_file_extent_disk_bytenr(leaf, fi,
7184 new_extents[i].disk_bytenr);
7185 btrfs_set_file_extent_disk_num_bytes(leaf, fi,
7186 new_extents[i].disk_num_bytes);
c8b97818
CM
7187 btrfs_set_file_extent_ram_bytes(leaf, fi,
7188 new_extents[i].ram_bytes);
7189
7190 btrfs_set_file_extent_compression(leaf, fi,
7191 new_extents[i].compression);
7192 btrfs_set_file_extent_encryption(leaf, fi,
7193 new_extents[i].encryption);
7194 btrfs_set_file_extent_other_encoding(leaf, fi,
7195 new_extents[i].other_encoding);
7196
1a40e23b
ZY
7197 btrfs_set_file_extent_num_bytes(leaf, fi,
7198 extent_len);
7199 ext_offset += new_extents[i].offset;
7200 btrfs_set_file_extent_offset(leaf, fi,
7201 ext_offset);
7202 btrfs_mark_buffer_dirty(leaf);
7203
7204 btrfs_drop_extent_cache(inode, key.offset,
7205 key.offset + extent_len - 1, 0);
7206
7207 ret = btrfs_inc_extent_ref(trans, root,
7208 new_extents[i].disk_bytenr,
7209 new_extents[i].disk_num_bytes,
7210 leaf->start,
7211 root->root_key.objectid,
3bb1a1bc 7212 trans->transid, key.objectid);
1a40e23b
ZY
7213 BUG_ON(ret);
7214 btrfs_release_path(root, path);
7215
a76a3cd4 7216 inode_add_bytes(inode, extent_len);
1a40e23b
ZY
7217
7218 ext_offset = 0;
7219 num_bytes -= extent_len;
7220 key.offset += extent_len;
7221
7222 if (num_bytes == 0)
7223 break;
7224 }
7225 BUG_ON(i >= nr_extents);
d899e052 7226#endif
1a40e23b
ZY
7227 }
7228
7229 if (extent_locked) {
1a40e23b
ZY
7230 unlock_extent(&BTRFS_I(inode)->io_tree, lock_start,
7231 lock_end, GFP_NOFS);
7232 extent_locked = 0;
7233 }
7234skip:
7235 if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS &&
86288a19 7236 key.offset >= search_end)
1a40e23b
ZY
7237 break;
7238
7239 cond_resched();
7240 }
7241 ret = 0;
7242out:
7243 btrfs_release_path(root, path);
7244 if (inode) {
7245 mutex_unlock(&inode->i_mutex);
7246 if (extent_locked) {
1a40e23b
ZY
7247 unlock_extent(&BTRFS_I(inode)->io_tree, lock_start,
7248 lock_end, GFP_NOFS);
7249 }
7250 iput(inode);
7251 }
7252 return ret;
7253}
7254
1a40e23b
ZY
7255int btrfs_reloc_tree_cache_ref(struct btrfs_trans_handle *trans,
7256 struct btrfs_root *root,
7257 struct extent_buffer *buf, u64 orig_start)
7258{
7259 int level;
7260 int ret;
7261
7262 BUG_ON(btrfs_header_generation(buf) != trans->transid);
7263 BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
7264
7265 level = btrfs_header_level(buf);
7266 if (level == 0) {
7267 struct btrfs_leaf_ref *ref;
7268 struct btrfs_leaf_ref *orig_ref;
7269
7270 orig_ref = btrfs_lookup_leaf_ref(root, orig_start);
7271 if (!orig_ref)
7272 return -ENOENT;
7273
7274 ref = btrfs_alloc_leaf_ref(root, orig_ref->nritems);
7275 if (!ref) {
7276 btrfs_free_leaf_ref(root, orig_ref);
7277 return -ENOMEM;
7278 }
7279
7280 ref->nritems = orig_ref->nritems;
7281 memcpy(ref->extents, orig_ref->extents,
7282 sizeof(ref->extents[0]) * ref->nritems);
7283
7284 btrfs_free_leaf_ref(root, orig_ref);
7285
7286 ref->root_gen = trans->transid;
7287 ref->bytenr = buf->start;
7288 ref->owner = btrfs_header_owner(buf);
7289 ref->generation = btrfs_header_generation(buf);
bd56b302 7290
1a40e23b
ZY
7291 ret = btrfs_add_leaf_ref(root, ref, 0);
7292 WARN_ON(ret);
7293 btrfs_free_leaf_ref(root, ref);
7294 }
7295 return 0;
7296}
7297
d397712b 7298static noinline int invalidate_extent_cache(struct btrfs_root *root,
1a40e23b
ZY
7299 struct extent_buffer *leaf,
7300 struct btrfs_block_group_cache *group,
7301 struct btrfs_root *target_root)
7302{
7303 struct btrfs_key key;
7304 struct inode *inode = NULL;
7305 struct btrfs_file_extent_item *fi;
2ac55d41 7306 struct extent_state *cached_state = NULL;
1a40e23b
ZY
7307 u64 num_bytes;
7308 u64 skip_objectid = 0;
7309 u32 nritems;
7310 u32 i;
7311
7312 nritems = btrfs_header_nritems(leaf);
7313 for (i = 0; i < nritems; i++) {
7314 btrfs_item_key_to_cpu(leaf, &key, i);
7315 if (key.objectid == skip_objectid ||
7316 key.type != BTRFS_EXTENT_DATA_KEY)
7317 continue;
7318 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
7319 if (btrfs_file_extent_type(leaf, fi) ==
7320 BTRFS_FILE_EXTENT_INLINE)
7321 continue;
7322 if (btrfs_file_extent_disk_bytenr(leaf, fi) == 0)
7323 continue;
7324 if (!inode || inode->i_ino != key.objectid) {
7325 iput(inode);
7326 inode = btrfs_ilookup(target_root->fs_info->sb,
7327 key.objectid, target_root, 1);
7328 }
7329 if (!inode) {
7330 skip_objectid = key.objectid;
7331 continue;
7332 }
7333 num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
7334
2ac55d41
JB
7335 lock_extent_bits(&BTRFS_I(inode)->io_tree, key.offset,
7336 key.offset + num_bytes - 1, 0, &cached_state,
7337 GFP_NOFS);
1a40e23b
ZY
7338 btrfs_drop_extent_cache(inode, key.offset,
7339 key.offset + num_bytes - 1, 1);
2ac55d41
JB
7340 unlock_extent_cached(&BTRFS_I(inode)->io_tree, key.offset,
7341 key.offset + num_bytes - 1, &cached_state,
7342 GFP_NOFS);
1a40e23b
ZY
7343 cond_resched();
7344 }
7345 iput(inode);
7346 return 0;
7347}
7348
d397712b 7349static noinline int replace_extents_in_leaf(struct btrfs_trans_handle *trans,
1a40e23b
ZY
7350 struct btrfs_root *root,
7351 struct extent_buffer *leaf,
7352 struct btrfs_block_group_cache *group,
7353 struct inode *reloc_inode)
7354{
7355 struct btrfs_key key;
7356 struct btrfs_key extent_key;
7357 struct btrfs_file_extent_item *fi;
7358 struct btrfs_leaf_ref *ref;
7359 struct disk_extent *new_extent;
7360 u64 bytenr;
7361 u64 num_bytes;
7362 u32 nritems;
7363 u32 i;
7364 int ext_index;
7365 int nr_extent;
7366 int ret;
7367
7368 new_extent = kmalloc(sizeof(*new_extent), GFP_NOFS);
7369 BUG_ON(!new_extent);
7370
7371 ref = btrfs_lookup_leaf_ref(root, leaf->start);
7372 BUG_ON(!ref);
7373
7374 ext_index = -1;
7375 nritems = btrfs_header_nritems(leaf);
7376 for (i = 0; i < nritems; i++) {
7377 btrfs_item_key_to_cpu(leaf, &key, i);
7378 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
7379 continue;
7380 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
7381 if (btrfs_file_extent_type(leaf, fi) ==
7382 BTRFS_FILE_EXTENT_INLINE)
7383 continue;
7384 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
7385 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
7386 if (bytenr == 0)
7387 continue;
7388
7389 ext_index++;
7390 if (bytenr >= group->key.objectid + group->key.offset ||
7391 bytenr + num_bytes <= group->key.objectid)
7392 continue;
7393
7394 extent_key.objectid = bytenr;
7395 extent_key.offset = num_bytes;
7396 extent_key.type = BTRFS_EXTENT_ITEM_KEY;
7397 nr_extent = 1;
7398 ret = get_new_locations(reloc_inode, &extent_key,
7399 group->key.objectid, 1,
7400 &new_extent, &nr_extent);
7401 if (ret > 0)
7402 continue;
7403 BUG_ON(ret < 0);
7404
7405 BUG_ON(ref->extents[ext_index].bytenr != bytenr);
7406 BUG_ON(ref->extents[ext_index].num_bytes != num_bytes);
7407 ref->extents[ext_index].bytenr = new_extent->disk_bytenr;
7408 ref->extents[ext_index].num_bytes = new_extent->disk_num_bytes;
7409
1a40e23b
ZY
7410 btrfs_set_file_extent_disk_bytenr(leaf, fi,
7411 new_extent->disk_bytenr);
7412 btrfs_set_file_extent_disk_num_bytes(leaf, fi,
7413 new_extent->disk_num_bytes);
1a40e23b
ZY
7414 btrfs_mark_buffer_dirty(leaf);
7415
7416 ret = btrfs_inc_extent_ref(trans, root,
7417 new_extent->disk_bytenr,
7418 new_extent->disk_num_bytes,
7419 leaf->start,
7420 root->root_key.objectid,
3bb1a1bc 7421 trans->transid, key.objectid);
1a40e23b 7422 BUG_ON(ret);
56bec294 7423
1a40e23b
ZY
7424 ret = btrfs_free_extent(trans, root,
7425 bytenr, num_bytes, leaf->start,
7426 btrfs_header_owner(leaf),
7427 btrfs_header_generation(leaf),
3bb1a1bc 7428 key.objectid, 0);
1a40e23b
ZY
7429 BUG_ON(ret);
7430 cond_resched();
7431 }
7432 kfree(new_extent);
7433 BUG_ON(ext_index + 1 != ref->nritems);
7434 btrfs_free_leaf_ref(root, ref);
7435 return 0;
7436}
7437
f82d02d9
YZ
7438int btrfs_free_reloc_root(struct btrfs_trans_handle *trans,
7439 struct btrfs_root *root)
1a40e23b
ZY
7440{
7441 struct btrfs_root *reloc_root;
f82d02d9 7442 int ret;
1a40e23b
ZY
7443
7444 if (root->reloc_root) {
7445 reloc_root = root->reloc_root;
7446 root->reloc_root = NULL;
7447 list_add(&reloc_root->dead_list,
7448 &root->fs_info->dead_reloc_roots);
f82d02d9
YZ
7449
7450 btrfs_set_root_bytenr(&reloc_root->root_item,
7451 reloc_root->node->start);
7452 btrfs_set_root_level(&root->root_item,
7453 btrfs_header_level(reloc_root->node));
7454 memset(&reloc_root->root_item.drop_progress, 0,
7455 sizeof(struct btrfs_disk_key));
7456 reloc_root->root_item.drop_level = 0;
7457
7458 ret = btrfs_update_root(trans, root->fs_info->tree_root,
7459 &reloc_root->root_key,
7460 &reloc_root->root_item);
7461 BUG_ON(ret);
1a40e23b
ZY
7462 }
7463 return 0;
7464}
7465
7466int btrfs_drop_dead_reloc_roots(struct btrfs_root *root)
7467{
7468 struct btrfs_trans_handle *trans;
7469 struct btrfs_root *reloc_root;
7470 struct btrfs_root *prev_root = NULL;
7471 struct list_head dead_roots;
7472 int ret;
7473 unsigned long nr;
7474
7475 INIT_LIST_HEAD(&dead_roots);
7476 list_splice_init(&root->fs_info->dead_reloc_roots, &dead_roots);
7477
7478 while (!list_empty(&dead_roots)) {
7479 reloc_root = list_entry(dead_roots.prev,
7480 struct btrfs_root, dead_list);
7481 list_del_init(&reloc_root->dead_list);
7482
7483 BUG_ON(reloc_root->commit_root != NULL);
7484 while (1) {
7485 trans = btrfs_join_transaction(root, 1);
7486 BUG_ON(!trans);
7487
7488 mutex_lock(&root->fs_info->drop_mutex);
7489 ret = btrfs_drop_snapshot(trans, reloc_root);
7490 if (ret != -EAGAIN)
7491 break;
7492 mutex_unlock(&root->fs_info->drop_mutex);
7493
7494 nr = trans->blocks_used;
7495 ret = btrfs_end_transaction(trans, root);
7496 BUG_ON(ret);
7497 btrfs_btree_balance_dirty(root, nr);
7498 }
7499
7500 free_extent_buffer(reloc_root->node);
7501
7502 ret = btrfs_del_root(trans, root->fs_info->tree_root,
7503 &reloc_root->root_key);
7504 BUG_ON(ret);
7505 mutex_unlock(&root->fs_info->drop_mutex);
7506
7507 nr = trans->blocks_used;
7508 ret = btrfs_end_transaction(trans, root);
7509 BUG_ON(ret);
7510 btrfs_btree_balance_dirty(root, nr);
7511
7512 kfree(prev_root);
7513 prev_root = reloc_root;
7514 }
7515 if (prev_root) {
7516 btrfs_remove_leaf_refs(prev_root, (u64)-1, 0);
7517 kfree(prev_root);
7518 }
7519 return 0;
7520}
7521
7522int btrfs_add_dead_reloc_root(struct btrfs_root *root)
7523{
7524 list_add(&root->dead_list, &root->fs_info->dead_reloc_roots);
7525 return 0;
7526}
7527
7528int btrfs_cleanup_reloc_trees(struct btrfs_root *root)
7529{
7530 struct btrfs_root *reloc_root;
7531 struct btrfs_trans_handle *trans;
7532 struct btrfs_key location;
7533 int found;
7534 int ret;
7535
7536 mutex_lock(&root->fs_info->tree_reloc_mutex);
7537 ret = btrfs_find_dead_roots(root, BTRFS_TREE_RELOC_OBJECTID, NULL);
7538 BUG_ON(ret);
7539 found = !list_empty(&root->fs_info->dead_reloc_roots);
7540 mutex_unlock(&root->fs_info->tree_reloc_mutex);
7541
7542 if (found) {
7543 trans = btrfs_start_transaction(root, 1);
7544 BUG_ON(!trans);
7545 ret = btrfs_commit_transaction(trans, root);
7546 BUG_ON(ret);
7547 }
7548
7549 location.objectid = BTRFS_DATA_RELOC_TREE_OBJECTID;
7550 location.offset = (u64)-1;
7551 location.type = BTRFS_ROOT_ITEM_KEY;
7552
7553 reloc_root = btrfs_read_fs_root_no_name(root->fs_info, &location);
7554 BUG_ON(!reloc_root);
7555 btrfs_orphan_cleanup(reloc_root);
7556 return 0;
7557}
7558
d397712b 7559static noinline int init_reloc_tree(struct btrfs_trans_handle *trans,
1a40e23b
ZY
7560 struct btrfs_root *root)
7561{
7562 struct btrfs_root *reloc_root;
7563 struct extent_buffer *eb;
7564 struct btrfs_root_item *root_item;
7565 struct btrfs_key root_key;
7566 int ret;
7567
7568 BUG_ON(!root->ref_cows);
7569 if (root->reloc_root)
7570 return 0;
7571
7572 root_item = kmalloc(sizeof(*root_item), GFP_NOFS);
7573 BUG_ON(!root_item);
7574
7575 ret = btrfs_copy_root(trans, root, root->commit_root,
7576 &eb, BTRFS_TREE_RELOC_OBJECTID);
7577 BUG_ON(ret);
7578
7579 root_key.objectid = BTRFS_TREE_RELOC_OBJECTID;
7580 root_key.offset = root->root_key.objectid;
7581 root_key.type = BTRFS_ROOT_ITEM_KEY;
7582
7583 memcpy(root_item, &root->root_item, sizeof(root_item));
7584 btrfs_set_root_refs(root_item, 0);
7585 btrfs_set_root_bytenr(root_item, eb->start);
7586 btrfs_set_root_level(root_item, btrfs_header_level(eb));
84234f3a 7587 btrfs_set_root_generation(root_item, trans->transid);
1a40e23b
ZY
7588
7589 btrfs_tree_unlock(eb);
7590 free_extent_buffer(eb);
7591
7592 ret = btrfs_insert_root(trans, root->fs_info->tree_root,
7593 &root_key, root_item);
7594 BUG_ON(ret);
7595 kfree(root_item);
7596
7597 reloc_root = btrfs_read_fs_root_no_radix(root->fs_info->tree_root,
7598 &root_key);
7599 BUG_ON(!reloc_root);
7600 reloc_root->last_trans = trans->transid;
7601 reloc_root->commit_root = NULL;
7602 reloc_root->ref_tree = &root->fs_info->reloc_ref_tree;
7603
7604 root->reloc_root = reloc_root;
7605 return 0;
7606}
7607
7608/*
7609 * Core function of space balance.
7610 *
7611 * The idea is using reloc trees to relocate tree blocks in reference
f82d02d9
YZ
7612 * counted roots. There is one reloc tree for each subvol, and all
7613 * reloc trees share same root key objectid. Reloc trees are snapshots
7614 * of the latest committed roots of subvols (root->commit_root).
7615 *
7616 * To relocate a tree block referenced by a subvol, there are two steps.
7617 * COW the block through subvol's reloc tree, then update block pointer
7618 * in the subvol to point to the new block. Since all reloc trees share
7619 * same root key objectid, doing special handing for tree blocks owned
7620 * by them is easy. Once a tree block has been COWed in one reloc tree,
7621 * we can use the resulting new block directly when the same block is
7622 * required to COW again through other reloc trees. By this way, relocated
7623 * tree blocks are shared between reloc trees, so they are also shared
7624 * between subvols.
1a40e23b 7625 */
d397712b 7626static noinline int relocate_one_path(struct btrfs_trans_handle *trans,
1a40e23b
ZY
7627 struct btrfs_root *root,
7628 struct btrfs_path *path,
7629 struct btrfs_key *first_key,
7630 struct btrfs_ref_path *ref_path,
7631 struct btrfs_block_group_cache *group,
7632 struct inode *reloc_inode)
7633{
7634 struct btrfs_root *reloc_root;
7635 struct extent_buffer *eb = NULL;
7636 struct btrfs_key *keys;
7637 u64 *nodes;
7638 int level;
f82d02d9 7639 int shared_level;
1a40e23b 7640 int lowest_level = 0;
1a40e23b
ZY
7641 int ret;
7642
7643 if (ref_path->owner_objectid < BTRFS_FIRST_FREE_OBJECTID)
7644 lowest_level = ref_path->owner_objectid;
7645
f82d02d9 7646 if (!root->ref_cows) {
1a40e23b
ZY
7647 path->lowest_level = lowest_level;
7648 ret = btrfs_search_slot(trans, root, first_key, path, 0, 1);
7649 BUG_ON(ret < 0);
7650 path->lowest_level = 0;
7651 btrfs_release_path(root, path);
7652 return 0;
7653 }
7654
1a40e23b
ZY
7655 mutex_lock(&root->fs_info->tree_reloc_mutex);
7656 ret = init_reloc_tree(trans, root);
7657 BUG_ON(ret);
7658 reloc_root = root->reloc_root;
7659
f82d02d9
YZ
7660 shared_level = ref_path->shared_level;
7661 ref_path->shared_level = BTRFS_MAX_LEVEL - 1;
1a40e23b 7662
f82d02d9
YZ
7663 keys = ref_path->node_keys;
7664 nodes = ref_path->new_nodes;
7665 memset(&keys[shared_level + 1], 0,
7666 sizeof(*keys) * (BTRFS_MAX_LEVEL - shared_level - 1));
7667 memset(&nodes[shared_level + 1], 0,
7668 sizeof(*nodes) * (BTRFS_MAX_LEVEL - shared_level - 1));
1a40e23b 7669
f82d02d9
YZ
7670 if (nodes[lowest_level] == 0) {
7671 path->lowest_level = lowest_level;
7672 ret = btrfs_search_slot(trans, reloc_root, first_key, path,
7673 0, 1);
7674 BUG_ON(ret);
7675 for (level = lowest_level; level < BTRFS_MAX_LEVEL; level++) {
7676 eb = path->nodes[level];
7677 if (!eb || eb == reloc_root->node)
7678 break;
7679 nodes[level] = eb->start;
7680 if (level == 0)
7681 btrfs_item_key_to_cpu(eb, &keys[level], 0);
7682 else
7683 btrfs_node_key_to_cpu(eb, &keys[level], 0);
7684 }
2b82032c
YZ
7685 if (nodes[0] &&
7686 ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
f82d02d9
YZ
7687 eb = path->nodes[0];
7688 ret = replace_extents_in_leaf(trans, reloc_root, eb,
7689 group, reloc_inode);
7690 BUG_ON(ret);
7691 }
7692 btrfs_release_path(reloc_root, path);
7693 } else {
1a40e23b 7694 ret = btrfs_merge_path(trans, reloc_root, keys, nodes,
f82d02d9 7695 lowest_level);
1a40e23b
ZY
7696 BUG_ON(ret);
7697 }
7698
1a40e23b
ZY
7699 /*
7700 * replace tree blocks in the fs tree with tree blocks in
7701 * the reloc tree.
7702 */
7703 ret = btrfs_merge_path(trans, root, keys, nodes, lowest_level);
7704 BUG_ON(ret < 0);
7705
7706 if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
f82d02d9
YZ
7707 ret = btrfs_search_slot(trans, reloc_root, first_key, path,
7708 0, 0);
7709 BUG_ON(ret);
7710 extent_buffer_get(path->nodes[0]);
7711 eb = path->nodes[0];
7712 btrfs_release_path(reloc_root, path);
1a40e23b
ZY
7713 ret = invalidate_extent_cache(reloc_root, eb, group, root);
7714 BUG_ON(ret);
7715 free_extent_buffer(eb);
7716 }
1a40e23b 7717
f82d02d9 7718 mutex_unlock(&root->fs_info->tree_reloc_mutex);
1a40e23b 7719 path->lowest_level = 0;
1a40e23b
ZY
7720 return 0;
7721}
7722
d397712b 7723static noinline int relocate_tree_block(struct btrfs_trans_handle *trans,
1a40e23b
ZY
7724 struct btrfs_root *root,
7725 struct btrfs_path *path,
7726 struct btrfs_key *first_key,
7727 struct btrfs_ref_path *ref_path)
7728{
7729 int ret;
1a40e23b
ZY
7730
7731 ret = relocate_one_path(trans, root, path, first_key,
7732 ref_path, NULL, NULL);
7733 BUG_ON(ret);
7734
1a40e23b
ZY
7735 return 0;
7736}
7737
d397712b 7738static noinline int del_extent_zero(struct btrfs_trans_handle *trans,
1a40e23b
ZY
7739 struct btrfs_root *extent_root,
7740 struct btrfs_path *path,
7741 struct btrfs_key *extent_key)
7742{
7743 int ret;
7744
1a40e23b
ZY
7745 ret = btrfs_search_slot(trans, extent_root, extent_key, path, -1, 1);
7746 if (ret)
7747 goto out;
7748 ret = btrfs_del_item(trans, extent_root, path);
7749out:
7750 btrfs_release_path(extent_root, path);
1a40e23b
ZY
7751 return ret;
7752}
7753
d397712b 7754static noinline struct btrfs_root *read_ref_root(struct btrfs_fs_info *fs_info,
1a40e23b
ZY
7755 struct btrfs_ref_path *ref_path)
7756{
7757 struct btrfs_key root_key;
7758
7759 root_key.objectid = ref_path->root_objectid;
7760 root_key.type = BTRFS_ROOT_ITEM_KEY;
7761 if (is_cowonly_root(ref_path->root_objectid))
7762 root_key.offset = 0;
7763 else
7764 root_key.offset = (u64)-1;
7765
7766 return btrfs_read_fs_root_no_name(fs_info, &root_key);
7767}
7768
d397712b 7769static noinline int relocate_one_extent(struct btrfs_root *extent_root,
1a40e23b
ZY
7770 struct btrfs_path *path,
7771 struct btrfs_key *extent_key,
7772 struct btrfs_block_group_cache *group,
7773 struct inode *reloc_inode, int pass)
7774{
7775 struct btrfs_trans_handle *trans;
7776 struct btrfs_root *found_root;
7777 struct btrfs_ref_path *ref_path = NULL;
7778 struct disk_extent *new_extents = NULL;
7779 int nr_extents = 0;
7780 int loops;
7781 int ret;
7782 int level;
7783 struct btrfs_key first_key;
7784 u64 prev_block = 0;
7785
1a40e23b
ZY
7786
7787 trans = btrfs_start_transaction(extent_root, 1);
7788 BUG_ON(!trans);
7789
7790 if (extent_key->objectid == 0) {
7791 ret = del_extent_zero(trans, extent_root, path, extent_key);
7792 goto out;
7793 }
7794
7795 ref_path = kmalloc(sizeof(*ref_path), GFP_NOFS);
7796 if (!ref_path) {
d397712b
CM
7797 ret = -ENOMEM;
7798 goto out;
1a40e23b
ZY
7799 }
7800
7801 for (loops = 0; ; loops++) {
7802 if (loops == 0) {
7803 ret = btrfs_first_ref_path(trans, extent_root, ref_path,
7804 extent_key->objectid);
7805 } else {
7806 ret = btrfs_next_ref_path(trans, extent_root, ref_path);
7807 }
7808 if (ret < 0)
7809 goto out;
7810 if (ret > 0)
7811 break;
7812
7813 if (ref_path->root_objectid == BTRFS_TREE_LOG_OBJECTID ||
7814 ref_path->root_objectid == BTRFS_TREE_RELOC_OBJECTID)
7815 continue;
7816
7817 found_root = read_ref_root(extent_root->fs_info, ref_path);
7818 BUG_ON(!found_root);
7819 /*
7820 * for reference counted tree, only process reference paths
7821 * rooted at the latest committed root.
7822 */
7823 if (found_root->ref_cows &&
7824 ref_path->root_generation != found_root->root_key.offset)
7825 continue;
7826
7827 if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
7828 if (pass == 0) {
7829 /*
7830 * copy data extents to new locations
7831 */
7832 u64 group_start = group->key.objectid;
7833 ret = relocate_data_extent(reloc_inode,
7834 extent_key,
7835 group_start);
7836 if (ret < 0)
7837 goto out;
7838 break;
7839 }
7840 level = 0;
7841 } else {
7842 level = ref_path->owner_objectid;
7843 }
7844
7845 if (prev_block != ref_path->nodes[level]) {
7846 struct extent_buffer *eb;
7847 u64 block_start = ref_path->nodes[level];
7848 u64 block_size = btrfs_level_size(found_root, level);
7849
7850 eb = read_tree_block(found_root, block_start,
7851 block_size, 0);
7852 btrfs_tree_lock(eb);
7853 BUG_ON(level != btrfs_header_level(eb));
7854
7855 if (level == 0)
7856 btrfs_item_key_to_cpu(eb, &first_key, 0);
7857 else
7858 btrfs_node_key_to_cpu(eb, &first_key, 0);
7859
7860 btrfs_tree_unlock(eb);
7861 free_extent_buffer(eb);
7862 prev_block = block_start;
7863 }
7864
24562425 7865 mutex_lock(&extent_root->fs_info->trans_mutex);
e4404d6e 7866 btrfs_record_root_in_trans(found_root);
24562425 7867 mutex_unlock(&extent_root->fs_info->trans_mutex);
e4404d6e
YZ
7868 if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
7869 /*
7870 * try to update data extent references while
7871 * keeping metadata shared between snapshots.
7872 */
7873 if (pass == 1) {
7874 ret = relocate_one_path(trans, found_root,
7875 path, &first_key, ref_path,
7876 group, reloc_inode);
7877 if (ret < 0)
7878 goto out;
7879 continue;
7880 }
1a40e23b
ZY
7881 /*
7882 * use fallback method to process the remaining
7883 * references.
7884 */
7885 if (!new_extents) {
7886 u64 group_start = group->key.objectid;
d899e052
YZ
7887 new_extents = kmalloc(sizeof(*new_extents),
7888 GFP_NOFS);
7889 nr_extents = 1;
1a40e23b
ZY
7890 ret = get_new_locations(reloc_inode,
7891 extent_key,
d899e052 7892 group_start, 1,
1a40e23b
ZY
7893 &new_extents,
7894 &nr_extents);
d899e052 7895 if (ret)
1a40e23b
ZY
7896 goto out;
7897 }
1a40e23b
ZY
7898 ret = replace_one_extent(trans, found_root,
7899 path, extent_key,
7900 &first_key, ref_path,
7901 new_extents, nr_extents);
e4404d6e 7902 } else {
1a40e23b
ZY
7903 ret = relocate_tree_block(trans, found_root, path,
7904 &first_key, ref_path);
1a40e23b
ZY
7905 }
7906 if (ret < 0)
7907 goto out;
7908 }
7909 ret = 0;
7910out:
7911 btrfs_end_transaction(trans, extent_root);
7912 kfree(new_extents);
7913 kfree(ref_path);
1a40e23b
ZY
7914 return ret;
7915}
5d4f98a2 7916#endif
1a40e23b 7917
ec44a35c
CM
7918static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
7919{
7920 u64 num_devices;
7921 u64 stripped = BTRFS_BLOCK_GROUP_RAID0 |
7922 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
7923
cd02dca5
CM
7924 /*
7925 * we add in the count of missing devices because we want
7926 * to make sure that any RAID levels on a degraded FS
7927 * continue to be honored.
7928 */
7929 num_devices = root->fs_info->fs_devices->rw_devices +
7930 root->fs_info->fs_devices->missing_devices;
7931
ec44a35c
CM
7932 if (num_devices == 1) {
7933 stripped |= BTRFS_BLOCK_GROUP_DUP;
7934 stripped = flags & ~stripped;
7935
7936 /* turn raid0 into single device chunks */
7937 if (flags & BTRFS_BLOCK_GROUP_RAID0)
7938 return stripped;
7939
7940 /* turn mirroring into duplication */
7941 if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
7942 BTRFS_BLOCK_GROUP_RAID10))
7943 return stripped | BTRFS_BLOCK_GROUP_DUP;
7944 return flags;
7945 } else {
7946 /* they already had raid on here, just return */
ec44a35c
CM
7947 if (flags & stripped)
7948 return flags;
7949
7950 stripped |= BTRFS_BLOCK_GROUP_DUP;
7951 stripped = flags & ~stripped;
7952
7953 /* switch duplicated blocks with raid1 */
7954 if (flags & BTRFS_BLOCK_GROUP_DUP)
7955 return stripped | BTRFS_BLOCK_GROUP_RAID1;
7956
7957 /* turn single device chunks into raid0 */
7958 return stripped | BTRFS_BLOCK_GROUP_RAID0;
7959 }
7960 return flags;
7961}
7962
f0486c68 7963static int set_block_group_ro(struct btrfs_block_group_cache *cache)
0ef3e66b 7964{
f0486c68
YZ
7965 struct btrfs_space_info *sinfo = cache->space_info;
7966 u64 num_bytes;
7967 int ret = -ENOSPC;
0ef3e66b 7968
f0486c68
YZ
7969 if (cache->ro)
7970 return 0;
c286ac48 7971
f0486c68
YZ
7972 spin_lock(&sinfo->lock);
7973 spin_lock(&cache->lock);
7974 num_bytes = cache->key.offset - cache->reserved - cache->pinned -
7975 cache->bytes_super - btrfs_block_group_used(&cache->item);
7976
7977 if (sinfo->bytes_used + sinfo->bytes_reserved + sinfo->bytes_pinned +
7978 sinfo->bytes_may_use + sinfo->bytes_readonly +
65e5341b 7979 cache->reserved_pinned + num_bytes <= sinfo->total_bytes) {
f0486c68
YZ
7980 sinfo->bytes_readonly += num_bytes;
7981 sinfo->bytes_reserved += cache->reserved_pinned;
7982 cache->reserved_pinned = 0;
7983 cache->ro = 1;
7984 ret = 0;
7985 }
65e5341b 7986
f0486c68
YZ
7987 spin_unlock(&cache->lock);
7988 spin_unlock(&sinfo->lock);
7989 return ret;
7990}
7d9eb12c 7991
f0486c68
YZ
7992int btrfs_set_block_group_ro(struct btrfs_root *root,
7993 struct btrfs_block_group_cache *cache)
c286ac48 7994
f0486c68
YZ
7995{
7996 struct btrfs_trans_handle *trans;
7997 u64 alloc_flags;
7998 int ret;
7d9eb12c 7999
f0486c68 8000 BUG_ON(cache->ro);
0ef3e66b 8001
f0486c68
YZ
8002 trans = btrfs_join_transaction(root, 1);
8003 BUG_ON(IS_ERR(trans));
5d4f98a2 8004
f0486c68
YZ
8005 alloc_flags = update_block_group_flags(root, cache->flags);
8006 if (alloc_flags != cache->flags)
8007 do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, 1);
5d4f98a2 8008
f0486c68
YZ
8009 ret = set_block_group_ro(cache);
8010 if (!ret)
8011 goto out;
8012 alloc_flags = get_alloc_profile(root, cache->space_info->flags);
8013 ret = do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, 1);
8014 if (ret < 0)
8015 goto out;
8016 ret = set_block_group_ro(cache);
8017out:
8018 btrfs_end_transaction(trans, root);
8019 return ret;
8020}
5d4f98a2 8021
f0486c68
YZ
8022int btrfs_set_block_group_rw(struct btrfs_root *root,
8023 struct btrfs_block_group_cache *cache)
5d4f98a2 8024{
f0486c68
YZ
8025 struct btrfs_space_info *sinfo = cache->space_info;
8026 u64 num_bytes;
8027
8028 BUG_ON(!cache->ro);
8029
8030 spin_lock(&sinfo->lock);
8031 spin_lock(&cache->lock);
8032 num_bytes = cache->key.offset - cache->reserved - cache->pinned -
8033 cache->bytes_super - btrfs_block_group_used(&cache->item);
8034 sinfo->bytes_readonly -= num_bytes;
8035 cache->ro = 0;
8036 spin_unlock(&cache->lock);
8037 spin_unlock(&sinfo->lock);
5d4f98a2
YZ
8038 return 0;
8039}
8040
ba1bf481
JB
8041/*
8042 * checks to see if its even possible to relocate this block group.
8043 *
8044 * @return - -1 if it's not a good idea to relocate this block group, 0 if its
8045 * ok to go ahead and try.
8046 */
8047int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
1a40e23b 8048{
ba1bf481
JB
8049 struct btrfs_block_group_cache *block_group;
8050 struct btrfs_space_info *space_info;
8051 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
8052 struct btrfs_device *device;
8053 int full = 0;
8054 int ret = 0;
1a40e23b 8055
ba1bf481 8056 block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
1a40e23b 8057
ba1bf481
JB
8058 /* odd, couldn't find the block group, leave it alone */
8059 if (!block_group)
8060 return -1;
1a40e23b 8061
ba1bf481
JB
8062 /* no bytes used, we're good */
8063 if (!btrfs_block_group_used(&block_group->item))
1a40e23b
ZY
8064 goto out;
8065
ba1bf481
JB
8066 space_info = block_group->space_info;
8067 spin_lock(&space_info->lock);
17d217fe 8068
ba1bf481 8069 full = space_info->full;
17d217fe 8070
ba1bf481
JB
8071 /*
8072 * if this is the last block group we have in this space, we can't
7ce618db
CM
8073 * relocate it unless we're able to allocate a new chunk below.
8074 *
8075 * Otherwise, we need to make sure we have room in the space to handle
8076 * all of the extents from this block group. If we can, we're good
ba1bf481 8077 */
7ce618db
CM
8078 if ((space_info->total_bytes != block_group->key.offset) &&
8079 (space_info->bytes_used + space_info->bytes_reserved +
ba1bf481
JB
8080 space_info->bytes_pinned + space_info->bytes_readonly +
8081 btrfs_block_group_used(&block_group->item) <
7ce618db 8082 space_info->total_bytes)) {
ba1bf481
JB
8083 spin_unlock(&space_info->lock);
8084 goto out;
17d217fe 8085 }
ba1bf481 8086 spin_unlock(&space_info->lock);
ea8c2819 8087
ba1bf481
JB
8088 /*
8089 * ok we don't have enough space, but maybe we have free space on our
8090 * devices to allocate new chunks for relocation, so loop through our
8091 * alloc devices and guess if we have enough space. However, if we
8092 * were marked as full, then we know there aren't enough chunks, and we
8093 * can just return.
8094 */
8095 ret = -1;
8096 if (full)
8097 goto out;
ea8c2819 8098
ba1bf481
JB
8099 mutex_lock(&root->fs_info->chunk_mutex);
8100 list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
8101 u64 min_free = btrfs_block_group_used(&block_group->item);
7bfc837d 8102 u64 dev_offset;
56bec294 8103
ba1bf481
JB
8104 /*
8105 * check to make sure we can actually find a chunk with enough
8106 * space to fit our block group in.
8107 */
8108 if (device->total_bytes > device->bytes_used + min_free) {
8109 ret = find_free_dev_extent(NULL, device, min_free,
7bfc837d 8110 &dev_offset, NULL);
ba1bf481 8111 if (!ret)
73e48b27 8112 break;
ba1bf481 8113 ret = -1;
725c8463 8114 }
edbd8d4e 8115 }
ba1bf481 8116 mutex_unlock(&root->fs_info->chunk_mutex);
edbd8d4e 8117out:
ba1bf481 8118 btrfs_put_block_group(block_group);
edbd8d4e
CM
8119 return ret;
8120}
8121
b2950863
CH
8122static int find_first_block_group(struct btrfs_root *root,
8123 struct btrfs_path *path, struct btrfs_key *key)
0b86a832 8124{
925baedd 8125 int ret = 0;
0b86a832
CM
8126 struct btrfs_key found_key;
8127 struct extent_buffer *leaf;
8128 int slot;
edbd8d4e 8129
0b86a832
CM
8130 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
8131 if (ret < 0)
925baedd
CM
8132 goto out;
8133
d397712b 8134 while (1) {
0b86a832 8135 slot = path->slots[0];
edbd8d4e 8136 leaf = path->nodes[0];
0b86a832
CM
8137 if (slot >= btrfs_header_nritems(leaf)) {
8138 ret = btrfs_next_leaf(root, path);
8139 if (ret == 0)
8140 continue;
8141 if (ret < 0)
925baedd 8142 goto out;
0b86a832 8143 break;
edbd8d4e 8144 }
0b86a832 8145 btrfs_item_key_to_cpu(leaf, &found_key, slot);
edbd8d4e 8146
0b86a832 8147 if (found_key.objectid >= key->objectid &&
925baedd
CM
8148 found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
8149 ret = 0;
8150 goto out;
8151 }
0b86a832 8152 path->slots[0]++;
edbd8d4e 8153 }
925baedd 8154out:
0b86a832 8155 return ret;
edbd8d4e
CM
8156}
8157
0af3d00b
JB
8158void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
8159{
8160 struct btrfs_block_group_cache *block_group;
8161 u64 last = 0;
8162
8163 while (1) {
8164 struct inode *inode;
8165
8166 block_group = btrfs_lookup_first_block_group(info, last);
8167 while (block_group) {
8168 spin_lock(&block_group->lock);
8169 if (block_group->iref)
8170 break;
8171 spin_unlock(&block_group->lock);
8172 block_group = next_block_group(info->tree_root,
8173 block_group);
8174 }
8175 if (!block_group) {
8176 if (last == 0)
8177 break;
8178 last = 0;
8179 continue;
8180 }
8181
8182 inode = block_group->inode;
8183 block_group->iref = 0;
8184 block_group->inode = NULL;
8185 spin_unlock(&block_group->lock);
8186 iput(inode);
8187 last = block_group->key.objectid + block_group->key.offset;
8188 btrfs_put_block_group(block_group);
8189 }
8190}
8191
1a40e23b
ZY
8192int btrfs_free_block_groups(struct btrfs_fs_info *info)
8193{
8194 struct btrfs_block_group_cache *block_group;
4184ea7f 8195 struct btrfs_space_info *space_info;
11833d66 8196 struct btrfs_caching_control *caching_ctl;
1a40e23b
ZY
8197 struct rb_node *n;
8198
11833d66
YZ
8199 down_write(&info->extent_commit_sem);
8200 while (!list_empty(&info->caching_block_groups)) {
8201 caching_ctl = list_entry(info->caching_block_groups.next,
8202 struct btrfs_caching_control, list);
8203 list_del(&caching_ctl->list);
8204 put_caching_control(caching_ctl);
8205 }
8206 up_write(&info->extent_commit_sem);
8207
1a40e23b
ZY
8208 spin_lock(&info->block_group_cache_lock);
8209 while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
8210 block_group = rb_entry(n, struct btrfs_block_group_cache,
8211 cache_node);
1a40e23b
ZY
8212 rb_erase(&block_group->cache_node,
8213 &info->block_group_cache_tree);
d899e052
YZ
8214 spin_unlock(&info->block_group_cache_lock);
8215
80eb234a 8216 down_write(&block_group->space_info->groups_sem);
1a40e23b 8217 list_del(&block_group->list);
80eb234a 8218 up_write(&block_group->space_info->groups_sem);
d2fb3437 8219
817d52f8 8220 if (block_group->cached == BTRFS_CACHE_STARTED)
11833d66 8221 wait_block_group_cache_done(block_group);
817d52f8
JB
8222
8223 btrfs_remove_free_space_cache(block_group);
11dfe35a 8224 btrfs_put_block_group(block_group);
d899e052
YZ
8225
8226 spin_lock(&info->block_group_cache_lock);
1a40e23b
ZY
8227 }
8228 spin_unlock(&info->block_group_cache_lock);
4184ea7f
CM
8229
8230 /* now that all the block groups are freed, go through and
8231 * free all the space_info structs. This is only called during
8232 * the final stages of unmount, and so we know nobody is
8233 * using them. We call synchronize_rcu() once before we start,
8234 * just to be on the safe side.
8235 */
8236 synchronize_rcu();
8237
8929ecfa
YZ
8238 release_global_block_rsv(info);
8239
4184ea7f
CM
8240 while(!list_empty(&info->space_info)) {
8241 space_info = list_entry(info->space_info.next,
8242 struct btrfs_space_info,
8243 list);
f0486c68
YZ
8244 if (space_info->bytes_pinned > 0 ||
8245 space_info->bytes_reserved > 0) {
8246 WARN_ON(1);
8247 dump_space_info(space_info, 0, 0);
8248 }
4184ea7f
CM
8249 list_del(&space_info->list);
8250 kfree(space_info);
8251 }
1a40e23b
ZY
8252 return 0;
8253}
8254
b742bb82
YZ
8255static void __link_block_group(struct btrfs_space_info *space_info,
8256 struct btrfs_block_group_cache *cache)
8257{
8258 int index = get_block_group_index(cache);
8259
8260 down_write(&space_info->groups_sem);
8261 list_add_tail(&cache->list, &space_info->block_groups[index]);
8262 up_write(&space_info->groups_sem);
8263}
8264
9078a3e1
CM
8265int btrfs_read_block_groups(struct btrfs_root *root)
8266{
8267 struct btrfs_path *path;
8268 int ret;
9078a3e1 8269 struct btrfs_block_group_cache *cache;
be744175 8270 struct btrfs_fs_info *info = root->fs_info;
6324fbf3 8271 struct btrfs_space_info *space_info;
9078a3e1
CM
8272 struct btrfs_key key;
8273 struct btrfs_key found_key;
5f39d397 8274 struct extent_buffer *leaf;
0af3d00b
JB
8275 int need_clear = 0;
8276 u64 cache_gen;
96b5179d 8277
be744175 8278 root = info->extent_root;
9078a3e1 8279 key.objectid = 0;
0b86a832 8280 key.offset = 0;
9078a3e1 8281 btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY);
9078a3e1
CM
8282 path = btrfs_alloc_path();
8283 if (!path)
8284 return -ENOMEM;
8285
0af3d00b
JB
8286 cache_gen = btrfs_super_cache_generation(&root->fs_info->super_copy);
8287 if (cache_gen != 0 &&
8288 btrfs_super_generation(&root->fs_info->super_copy) != cache_gen)
8289 need_clear = 1;
88c2ba3b
JB
8290 if (btrfs_test_opt(root, CLEAR_CACHE))
8291 need_clear = 1;
8216ef86
JB
8292 if (!btrfs_test_opt(root, SPACE_CACHE) && cache_gen)
8293 printk(KERN_INFO "btrfs: disk space caching is enabled\n");
0af3d00b 8294
d397712b 8295 while (1) {
0b86a832 8296 ret = find_first_block_group(root, path, &key);
b742bb82
YZ
8297 if (ret > 0)
8298 break;
0b86a832
CM
8299 if (ret != 0)
8300 goto error;
5f39d397
CM
8301 leaf = path->nodes[0];
8302 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
8f18cf13 8303 cache = kzalloc(sizeof(*cache), GFP_NOFS);
9078a3e1 8304 if (!cache) {
0b86a832 8305 ret = -ENOMEM;
f0486c68 8306 goto error;
9078a3e1 8307 }
3e1ad54f 8308
d2fb3437 8309 atomic_set(&cache->count, 1);
c286ac48 8310 spin_lock_init(&cache->lock);
6226cb0a 8311 spin_lock_init(&cache->tree_lock);
817d52f8 8312 cache->fs_info = info;
0f9dd46c 8313 INIT_LIST_HEAD(&cache->list);
fa9c0d79 8314 INIT_LIST_HEAD(&cache->cluster_list);
96303081 8315
0af3d00b
JB
8316 if (need_clear)
8317 cache->disk_cache_state = BTRFS_DC_CLEAR;
8318
96303081
JB
8319 /*
8320 * we only want to have 32k of ram per block group for keeping
8321 * track of free space, and if we pass 1/2 of that we want to
8322 * start converting things over to using bitmaps
8323 */
8324 cache->extents_thresh = ((1024 * 32) / 2) /
8325 sizeof(struct btrfs_free_space);
8326
5f39d397
CM
8327 read_extent_buffer(leaf, &cache->item,
8328 btrfs_item_ptr_offset(leaf, path->slots[0]),
8329 sizeof(cache->item));
9078a3e1 8330 memcpy(&cache->key, &found_key, sizeof(found_key));
0b86a832 8331
9078a3e1
CM
8332 key.objectid = found_key.objectid + found_key.offset;
8333 btrfs_release_path(root, path);
0b86a832 8334 cache->flags = btrfs_block_group_flags(&cache->item);
817d52f8
JB
8335 cache->sectorsize = root->sectorsize;
8336
817d52f8
JB
8337 /*
8338 * check for two cases, either we are full, and therefore
8339 * don't need to bother with the caching work since we won't
8340 * find any space, or we are empty, and we can just add all
8341 * the space in and be done with it. This saves us _alot_ of
8342 * time, particularly in the full case.
8343 */
8344 if (found_key.offset == btrfs_block_group_used(&cache->item)) {
1b2da372 8345 exclude_super_stripes(root, cache);
11833d66 8346 cache->last_byte_to_unpin = (u64)-1;
817d52f8 8347 cache->cached = BTRFS_CACHE_FINISHED;
1b2da372 8348 free_excluded_extents(root, cache);
817d52f8 8349 } else if (btrfs_block_group_used(&cache->item) == 0) {
11833d66
YZ
8350 exclude_super_stripes(root, cache);
8351 cache->last_byte_to_unpin = (u64)-1;
817d52f8
JB
8352 cache->cached = BTRFS_CACHE_FINISHED;
8353 add_new_free_space(cache, root->fs_info,
8354 found_key.objectid,
8355 found_key.objectid +
8356 found_key.offset);
11833d66 8357 free_excluded_extents(root, cache);
817d52f8 8358 }
96b5179d 8359
6324fbf3
CM
8360 ret = update_space_info(info, cache->flags, found_key.offset,
8361 btrfs_block_group_used(&cache->item),
8362 &space_info);
8363 BUG_ON(ret);
8364 cache->space_info = space_info;
1b2da372 8365 spin_lock(&cache->space_info->lock);
f0486c68 8366 cache->space_info->bytes_readonly += cache->bytes_super;
1b2da372
JB
8367 spin_unlock(&cache->space_info->lock);
8368
b742bb82 8369 __link_block_group(space_info, cache);
0f9dd46c
JB
8370
8371 ret = btrfs_add_block_group_cache(root->fs_info, cache);
8372 BUG_ON(ret);
75ccf47d
CM
8373
8374 set_avail_alloc_bits(root->fs_info, cache->flags);
2b82032c 8375 if (btrfs_chunk_readonly(root, cache->key.objectid))
f0486c68 8376 set_block_group_ro(cache);
9078a3e1 8377 }
b742bb82
YZ
8378
8379 list_for_each_entry_rcu(space_info, &root->fs_info->space_info, list) {
8380 if (!(get_alloc_profile(root, space_info->flags) &
8381 (BTRFS_BLOCK_GROUP_RAID10 |
8382 BTRFS_BLOCK_GROUP_RAID1 |
8383 BTRFS_BLOCK_GROUP_DUP)))
8384 continue;
8385 /*
8386 * avoid allocating from un-mirrored block group if there are
8387 * mirrored block groups.
8388 */
8389 list_for_each_entry(cache, &space_info->block_groups[3], list)
f0486c68 8390 set_block_group_ro(cache);
b742bb82 8391 list_for_each_entry(cache, &space_info->block_groups[4], list)
f0486c68 8392 set_block_group_ro(cache);
9078a3e1 8393 }
f0486c68
YZ
8394
8395 init_global_block_rsv(info);
0b86a832
CM
8396 ret = 0;
8397error:
9078a3e1 8398 btrfs_free_path(path);
0b86a832 8399 return ret;
9078a3e1 8400}
6324fbf3
CM
8401
8402int btrfs_make_block_group(struct btrfs_trans_handle *trans,
8403 struct btrfs_root *root, u64 bytes_used,
e17cade2 8404 u64 type, u64 chunk_objectid, u64 chunk_offset,
6324fbf3
CM
8405 u64 size)
8406{
8407 int ret;
6324fbf3
CM
8408 struct btrfs_root *extent_root;
8409 struct btrfs_block_group_cache *cache;
6324fbf3
CM
8410
8411 extent_root = root->fs_info->extent_root;
6324fbf3 8412
12fcfd22 8413 root->fs_info->last_trans_log_full_commit = trans->transid;
e02119d5 8414
8f18cf13 8415 cache = kzalloc(sizeof(*cache), GFP_NOFS);
0f9dd46c
JB
8416 if (!cache)
8417 return -ENOMEM;
8418
e17cade2 8419 cache->key.objectid = chunk_offset;
6324fbf3 8420 cache->key.offset = size;
d2fb3437 8421 cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
96303081 8422 cache->sectorsize = root->sectorsize;
0af3d00b 8423 cache->fs_info = root->fs_info;
96303081
JB
8424
8425 /*
8426 * we only want to have 32k of ram per block group for keeping track
8427 * of free space, and if we pass 1/2 of that we want to start
8428 * converting things over to using bitmaps
8429 */
8430 cache->extents_thresh = ((1024 * 32) / 2) /
8431 sizeof(struct btrfs_free_space);
d2fb3437 8432 atomic_set(&cache->count, 1);
c286ac48 8433 spin_lock_init(&cache->lock);
6226cb0a 8434 spin_lock_init(&cache->tree_lock);
0f9dd46c 8435 INIT_LIST_HEAD(&cache->list);
fa9c0d79 8436 INIT_LIST_HEAD(&cache->cluster_list);
0ef3e66b 8437
6324fbf3 8438 btrfs_set_block_group_used(&cache->item, bytes_used);
6324fbf3
CM
8439 btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
8440 cache->flags = type;
8441 btrfs_set_block_group_flags(&cache->item, type);
8442
11833d66 8443 cache->last_byte_to_unpin = (u64)-1;
817d52f8 8444 cache->cached = BTRFS_CACHE_FINISHED;
11833d66 8445 exclude_super_stripes(root, cache);
96303081 8446
817d52f8
JB
8447 add_new_free_space(cache, root->fs_info, chunk_offset,
8448 chunk_offset + size);
8449
11833d66
YZ
8450 free_excluded_extents(root, cache);
8451
6324fbf3
CM
8452 ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
8453 &cache->space_info);
8454 BUG_ON(ret);
1b2da372
JB
8455
8456 spin_lock(&cache->space_info->lock);
f0486c68 8457 cache->space_info->bytes_readonly += cache->bytes_super;
1b2da372
JB
8458 spin_unlock(&cache->space_info->lock);
8459
b742bb82 8460 __link_block_group(cache->space_info, cache);
6324fbf3 8461
0f9dd46c
JB
8462 ret = btrfs_add_block_group_cache(root->fs_info, cache);
8463 BUG_ON(ret);
c286ac48 8464
6324fbf3
CM
8465 ret = btrfs_insert_item(trans, extent_root, &cache->key, &cache->item,
8466 sizeof(cache->item));
8467 BUG_ON(ret);
8468
d18a2c44 8469 set_avail_alloc_bits(extent_root->fs_info, type);
925baedd 8470
6324fbf3
CM
8471 return 0;
8472}
1a40e23b
ZY
8473
8474int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
8475 struct btrfs_root *root, u64 group_start)
8476{
8477 struct btrfs_path *path;
8478 struct btrfs_block_group_cache *block_group;
44fb5511 8479 struct btrfs_free_cluster *cluster;
0af3d00b 8480 struct btrfs_root *tree_root = root->fs_info->tree_root;
1a40e23b 8481 struct btrfs_key key;
0af3d00b 8482 struct inode *inode;
1a40e23b 8483 int ret;
89a55897 8484 int factor;
1a40e23b 8485
1a40e23b
ZY
8486 root = root->fs_info->extent_root;
8487
8488 block_group = btrfs_lookup_block_group(root->fs_info, group_start);
8489 BUG_ON(!block_group);
c146afad 8490 BUG_ON(!block_group->ro);
1a40e23b
ZY
8491
8492 memcpy(&key, &block_group->key, sizeof(key));
89a55897
JB
8493 if (block_group->flags & (BTRFS_BLOCK_GROUP_DUP |
8494 BTRFS_BLOCK_GROUP_RAID1 |
8495 BTRFS_BLOCK_GROUP_RAID10))
8496 factor = 2;
8497 else
8498 factor = 1;
1a40e23b 8499
44fb5511
CM
8500 /* make sure this block group isn't part of an allocation cluster */
8501 cluster = &root->fs_info->data_alloc_cluster;
8502 spin_lock(&cluster->refill_lock);
8503 btrfs_return_cluster_to_free_space(block_group, cluster);
8504 spin_unlock(&cluster->refill_lock);
8505
8506 /*
8507 * make sure this block group isn't part of a metadata
8508 * allocation cluster
8509 */
8510 cluster = &root->fs_info->meta_alloc_cluster;
8511 spin_lock(&cluster->refill_lock);
8512 btrfs_return_cluster_to_free_space(block_group, cluster);
8513 spin_unlock(&cluster->refill_lock);
8514
1a40e23b
ZY
8515 path = btrfs_alloc_path();
8516 BUG_ON(!path);
8517
0af3d00b
JB
8518 inode = lookup_free_space_inode(root, block_group, path);
8519 if (!IS_ERR(inode)) {
8520 btrfs_orphan_add(trans, inode);
8521 clear_nlink(inode);
8522 /* One for the block groups ref */
8523 spin_lock(&block_group->lock);
8524 if (block_group->iref) {
8525 block_group->iref = 0;
8526 block_group->inode = NULL;
8527 spin_unlock(&block_group->lock);
8528 iput(inode);
8529 } else {
8530 spin_unlock(&block_group->lock);
8531 }
8532 /* One for our lookup ref */
8533 iput(inode);
8534 }
8535
8536 key.objectid = BTRFS_FREE_SPACE_OBJECTID;
8537 key.offset = block_group->key.objectid;
8538 key.type = 0;
8539
8540 ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
8541 if (ret < 0)
8542 goto out;
8543 if (ret > 0)
8544 btrfs_release_path(tree_root, path);
8545 if (ret == 0) {
8546 ret = btrfs_del_item(trans, tree_root, path);
8547 if (ret)
8548 goto out;
8549 btrfs_release_path(tree_root, path);
8550 }
8551
3dfdb934 8552 spin_lock(&root->fs_info->block_group_cache_lock);
1a40e23b
ZY
8553 rb_erase(&block_group->cache_node,
8554 &root->fs_info->block_group_cache_tree);
3dfdb934 8555 spin_unlock(&root->fs_info->block_group_cache_lock);
817d52f8 8556
80eb234a 8557 down_write(&block_group->space_info->groups_sem);
44fb5511
CM
8558 /*
8559 * we must use list_del_init so people can check to see if they
8560 * are still on the list after taking the semaphore
8561 */
8562 list_del_init(&block_group->list);
80eb234a 8563 up_write(&block_group->space_info->groups_sem);
1a40e23b 8564
817d52f8 8565 if (block_group->cached == BTRFS_CACHE_STARTED)
11833d66 8566 wait_block_group_cache_done(block_group);
817d52f8
JB
8567
8568 btrfs_remove_free_space_cache(block_group);
8569
c146afad
YZ
8570 spin_lock(&block_group->space_info->lock);
8571 block_group->space_info->total_bytes -= block_group->key.offset;
8572 block_group->space_info->bytes_readonly -= block_group->key.offset;
89a55897 8573 block_group->space_info->disk_total -= block_group->key.offset * factor;
c146afad 8574 spin_unlock(&block_group->space_info->lock);
283bb197 8575
0af3d00b
JB
8576 memcpy(&key, &block_group->key, sizeof(key));
8577
283bb197 8578 btrfs_clear_space_info_full(root->fs_info);
c146afad 8579
fa9c0d79
CM
8580 btrfs_put_block_group(block_group);
8581 btrfs_put_block_group(block_group);
1a40e23b
ZY
8582
8583 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
8584 if (ret > 0)
8585 ret = -EIO;
8586 if (ret < 0)
8587 goto out;
8588
8589 ret = btrfs_del_item(trans, root, path);
8590out:
8591 btrfs_free_path(path);
8592 return ret;
8593}