Btrfs: try not to sleep as much when doing slow caching
[linux-2.6-block.git] / fs / btrfs / extent-tree.c
CommitLineData
6cbd5570
CM
1/*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
ec6b910f 18#include <linux/sched.h>
edbd8d4e 19#include <linux/pagemap.h>
ec44a35c 20#include <linux/writeback.h>
21af804c 21#include <linux/blkdev.h>
b7a9f29f 22#include <linux/sort.h>
4184ea7f 23#include <linux/rcupdate.h>
817d52f8 24#include <linux/kthread.h>
5a0e3ad6 25#include <linux/slab.h>
4b4e25f2 26#include "compat.h"
74493f7a 27#include "hash.h"
fec577fb
CM
28#include "ctree.h"
29#include "disk-io.h"
30#include "print-tree.h"
e089f05c 31#include "transaction.h"
0b86a832 32#include "volumes.h"
925baedd 33#include "locking.h"
fa9c0d79 34#include "free-space-cache.h"
fec577fb 35
0e4f8f88
CM
36/* control flags for do_chunk_alloc's force field
37 * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk
38 * if we really need one.
39 *
40 * CHUNK_ALLOC_FORCE means it must try to allocate one
41 *
42 * CHUNK_ALLOC_LIMITED means to only try and allocate one
43 * if we have very few chunks already allocated. This is
44 * used as part of the clustering code to help make sure
45 * we have a good pool of storage to cluster in, without
46 * filling the FS with empty chunks
47 *
48 */
49enum {
50 CHUNK_ALLOC_NO_FORCE = 0,
51 CHUNK_ALLOC_FORCE = 1,
52 CHUNK_ALLOC_LIMITED = 2,
53};
54
f3465ca4
JB
55static int update_block_group(struct btrfs_trans_handle *trans,
56 struct btrfs_root *root,
f0486c68 57 u64 bytenr, u64 num_bytes, int alloc);
5d4f98a2
YZ
58static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
59 struct btrfs_root *root,
60 u64 bytenr, u64 num_bytes, u64 parent,
61 u64 root_objectid, u64 owner_objectid,
62 u64 owner_offset, int refs_to_drop,
63 struct btrfs_delayed_extent_op *extra_op);
64static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
65 struct extent_buffer *leaf,
66 struct btrfs_extent_item *ei);
67static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
68 struct btrfs_root *root,
69 u64 parent, u64 root_objectid,
70 u64 flags, u64 owner, u64 offset,
71 struct btrfs_key *ins, int ref_mod);
72static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
73 struct btrfs_root *root,
74 u64 parent, u64 root_objectid,
75 u64 flags, struct btrfs_disk_key *key,
76 int level, struct btrfs_key *ins);
6a63209f
JB
77static int do_chunk_alloc(struct btrfs_trans_handle *trans,
78 struct btrfs_root *extent_root, u64 alloc_bytes,
79 u64 flags, int force);
11833d66
YZ
80static int find_next_key(struct btrfs_path *path, int level,
81 struct btrfs_key *key);
9ed74f2d
JB
82static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
83 int dump_block_groups);
6a63209f 84
817d52f8
JB
85static noinline int
86block_group_cache_done(struct btrfs_block_group_cache *cache)
87{
88 smp_mb();
89 return cache->cached == BTRFS_CACHE_FINISHED;
90}
91
0f9dd46c
JB
92static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
93{
94 return (cache->flags & bits) == bits;
95}
96
11dfe35a
JB
97void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
98{
99 atomic_inc(&cache->count);
100}
101
102void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
103{
f0486c68
YZ
104 if (atomic_dec_and_test(&cache->count)) {
105 WARN_ON(cache->pinned > 0);
106 WARN_ON(cache->reserved > 0);
107 WARN_ON(cache->reserved_pinned > 0);
11dfe35a 108 kfree(cache);
f0486c68 109 }
11dfe35a
JB
110}
111
0f9dd46c
JB
112/*
113 * this adds the block group to the fs_info rb tree for the block group
114 * cache
115 */
b2950863 116static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
0f9dd46c
JB
117 struct btrfs_block_group_cache *block_group)
118{
119 struct rb_node **p;
120 struct rb_node *parent = NULL;
121 struct btrfs_block_group_cache *cache;
122
123 spin_lock(&info->block_group_cache_lock);
124 p = &info->block_group_cache_tree.rb_node;
125
126 while (*p) {
127 parent = *p;
128 cache = rb_entry(parent, struct btrfs_block_group_cache,
129 cache_node);
130 if (block_group->key.objectid < cache->key.objectid) {
131 p = &(*p)->rb_left;
132 } else if (block_group->key.objectid > cache->key.objectid) {
133 p = &(*p)->rb_right;
134 } else {
135 spin_unlock(&info->block_group_cache_lock);
136 return -EEXIST;
137 }
138 }
139
140 rb_link_node(&block_group->cache_node, parent, p);
141 rb_insert_color(&block_group->cache_node,
142 &info->block_group_cache_tree);
143 spin_unlock(&info->block_group_cache_lock);
144
145 return 0;
146}
147
148/*
149 * This will return the block group at or after bytenr if contains is 0, else
150 * it will return the block group that contains the bytenr
151 */
152static struct btrfs_block_group_cache *
153block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
154 int contains)
155{
156 struct btrfs_block_group_cache *cache, *ret = NULL;
157 struct rb_node *n;
158 u64 end, start;
159
160 spin_lock(&info->block_group_cache_lock);
161 n = info->block_group_cache_tree.rb_node;
162
163 while (n) {
164 cache = rb_entry(n, struct btrfs_block_group_cache,
165 cache_node);
166 end = cache->key.objectid + cache->key.offset - 1;
167 start = cache->key.objectid;
168
169 if (bytenr < start) {
170 if (!contains && (!ret || start < ret->key.objectid))
171 ret = cache;
172 n = n->rb_left;
173 } else if (bytenr > start) {
174 if (contains && bytenr <= end) {
175 ret = cache;
176 break;
177 }
178 n = n->rb_right;
179 } else {
180 ret = cache;
181 break;
182 }
183 }
d2fb3437 184 if (ret)
11dfe35a 185 btrfs_get_block_group(ret);
0f9dd46c
JB
186 spin_unlock(&info->block_group_cache_lock);
187
188 return ret;
189}
190
11833d66
YZ
191static int add_excluded_extent(struct btrfs_root *root,
192 u64 start, u64 num_bytes)
817d52f8 193{
11833d66
YZ
194 u64 end = start + num_bytes - 1;
195 set_extent_bits(&root->fs_info->freed_extents[0],
196 start, end, EXTENT_UPTODATE, GFP_NOFS);
197 set_extent_bits(&root->fs_info->freed_extents[1],
198 start, end, EXTENT_UPTODATE, GFP_NOFS);
199 return 0;
200}
817d52f8 201
11833d66
YZ
202static void free_excluded_extents(struct btrfs_root *root,
203 struct btrfs_block_group_cache *cache)
204{
205 u64 start, end;
817d52f8 206
11833d66
YZ
207 start = cache->key.objectid;
208 end = start + cache->key.offset - 1;
209
210 clear_extent_bits(&root->fs_info->freed_extents[0],
211 start, end, EXTENT_UPTODATE, GFP_NOFS);
212 clear_extent_bits(&root->fs_info->freed_extents[1],
213 start, end, EXTENT_UPTODATE, GFP_NOFS);
817d52f8
JB
214}
215
11833d66
YZ
216static int exclude_super_stripes(struct btrfs_root *root,
217 struct btrfs_block_group_cache *cache)
817d52f8 218{
817d52f8
JB
219 u64 bytenr;
220 u64 *logical;
221 int stripe_len;
222 int i, nr, ret;
223
06b2331f
YZ
224 if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
225 stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
226 cache->bytes_super += stripe_len;
227 ret = add_excluded_extent(root, cache->key.objectid,
228 stripe_len);
229 BUG_ON(ret);
230 }
231
817d52f8
JB
232 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
233 bytenr = btrfs_sb_offset(i);
234 ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
235 cache->key.objectid, bytenr,
236 0, &logical, &nr, &stripe_len);
237 BUG_ON(ret);
11833d66 238
817d52f8 239 while (nr--) {
1b2da372 240 cache->bytes_super += stripe_len;
11833d66
YZ
241 ret = add_excluded_extent(root, logical[nr],
242 stripe_len);
243 BUG_ON(ret);
817d52f8 244 }
11833d66 245
817d52f8
JB
246 kfree(logical);
247 }
817d52f8
JB
248 return 0;
249}
250
11833d66
YZ
251static struct btrfs_caching_control *
252get_caching_control(struct btrfs_block_group_cache *cache)
253{
254 struct btrfs_caching_control *ctl;
255
256 spin_lock(&cache->lock);
257 if (cache->cached != BTRFS_CACHE_STARTED) {
258 spin_unlock(&cache->lock);
259 return NULL;
260 }
261
dde5abee
JB
262 /* We're loading it the fast way, so we don't have a caching_ctl. */
263 if (!cache->caching_ctl) {
264 spin_unlock(&cache->lock);
11833d66
YZ
265 return NULL;
266 }
267
268 ctl = cache->caching_ctl;
269 atomic_inc(&ctl->count);
270 spin_unlock(&cache->lock);
271 return ctl;
272}
273
274static void put_caching_control(struct btrfs_caching_control *ctl)
275{
276 if (atomic_dec_and_test(&ctl->count))
277 kfree(ctl);
278}
279
0f9dd46c
JB
280/*
281 * this is only called by cache_block_group, since we could have freed extents
282 * we need to check the pinned_extents for any extents that can't be used yet
283 * since their free space will be released as soon as the transaction commits.
284 */
817d52f8 285static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
0f9dd46c
JB
286 struct btrfs_fs_info *info, u64 start, u64 end)
287{
817d52f8 288 u64 extent_start, extent_end, size, total_added = 0;
0f9dd46c
JB
289 int ret;
290
291 while (start < end) {
11833d66 292 ret = find_first_extent_bit(info->pinned_extents, start,
0f9dd46c 293 &extent_start, &extent_end,
11833d66 294 EXTENT_DIRTY | EXTENT_UPTODATE);
0f9dd46c
JB
295 if (ret)
296 break;
297
06b2331f 298 if (extent_start <= start) {
0f9dd46c
JB
299 start = extent_end + 1;
300 } else if (extent_start > start && extent_start < end) {
301 size = extent_start - start;
817d52f8 302 total_added += size;
ea6a478e
JB
303 ret = btrfs_add_free_space(block_group, start,
304 size);
0f9dd46c
JB
305 BUG_ON(ret);
306 start = extent_end + 1;
307 } else {
308 break;
309 }
310 }
311
312 if (start < end) {
313 size = end - start;
817d52f8 314 total_added += size;
ea6a478e 315 ret = btrfs_add_free_space(block_group, start, size);
0f9dd46c
JB
316 BUG_ON(ret);
317 }
318
817d52f8 319 return total_added;
0f9dd46c
JB
320}
321
817d52f8 322static int caching_kthread(void *data)
e37c9e69 323{
817d52f8
JB
324 struct btrfs_block_group_cache *block_group = data;
325 struct btrfs_fs_info *fs_info = block_group->fs_info;
11833d66
YZ
326 struct btrfs_caching_control *caching_ctl = block_group->caching_ctl;
327 struct btrfs_root *extent_root = fs_info->extent_root;
e37c9e69 328 struct btrfs_path *path;
5f39d397 329 struct extent_buffer *leaf;
11833d66 330 struct btrfs_key key;
817d52f8 331 u64 total_found = 0;
11833d66
YZ
332 u64 last = 0;
333 u32 nritems;
334 int ret = 0;
f510cfec 335
e37c9e69
CM
336 path = btrfs_alloc_path();
337 if (!path)
338 return -ENOMEM;
7d7d6068 339
817d52f8 340 last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
11833d66 341
5cd57b2c 342 /*
817d52f8
JB
343 * We don't want to deadlock with somebody trying to allocate a new
344 * extent for the extent root while also trying to search the extent
345 * root to add free space. So we skip locking and search the commit
346 * root, since its read-only
5cd57b2c
CM
347 */
348 path->skip_locking = 1;
817d52f8
JB
349 path->search_commit_root = 1;
350 path->reada = 2;
351
e4404d6e 352 key.objectid = last;
e37c9e69 353 key.offset = 0;
11833d66 354 key.type = BTRFS_EXTENT_ITEM_KEY;
013f1b12 355again:
11833d66 356 mutex_lock(&caching_ctl->mutex);
013f1b12
CM
357 /* need to make sure the commit_root doesn't disappear */
358 down_read(&fs_info->extent_commit_sem);
359
11833d66 360 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
e37c9e69 361 if (ret < 0)
ef8bbdfe 362 goto err;
a512bbf8 363
11833d66
YZ
364 leaf = path->nodes[0];
365 nritems = btrfs_header_nritems(leaf);
366
d397712b 367 while (1) {
817d52f8 368 smp_mb();
11833d66 369 if (fs_info->closing > 1) {
f25784b3 370 last = (u64)-1;
817d52f8 371 break;
f25784b3 372 }
817d52f8 373
11833d66
YZ
374 if (path->slots[0] < nritems) {
375 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
376 } else {
377 ret = find_next_key(path, 0, &key);
378 if (ret)
e37c9e69 379 break;
817d52f8 380
589d8ade
JB
381 if (need_resched() ||
382 btrfs_next_leaf(extent_root, path)) {
383 caching_ctl->progress = last;
384 btrfs_release_path(extent_root, path);
385 up_read(&fs_info->extent_commit_sem);
386 mutex_unlock(&caching_ctl->mutex);
11833d66 387 cond_resched();
589d8ade
JB
388 goto again;
389 }
390 leaf = path->nodes[0];
391 nritems = btrfs_header_nritems(leaf);
392 continue;
11833d66 393 }
817d52f8 394
11833d66
YZ
395 if (key.objectid < block_group->key.objectid) {
396 path->slots[0]++;
817d52f8 397 continue;
e37c9e69 398 }
0f9dd46c 399
e37c9e69 400 if (key.objectid >= block_group->key.objectid +
0f9dd46c 401 block_group->key.offset)
e37c9e69 402 break;
7d7d6068 403
11833d66 404 if (key.type == BTRFS_EXTENT_ITEM_KEY) {
817d52f8
JB
405 total_found += add_new_free_space(block_group,
406 fs_info, last,
407 key.objectid);
7d7d6068 408 last = key.objectid + key.offset;
817d52f8 409
11833d66
YZ
410 if (total_found > (1024 * 1024 * 2)) {
411 total_found = 0;
412 wake_up(&caching_ctl->wait);
413 }
817d52f8 414 }
e37c9e69
CM
415 path->slots[0]++;
416 }
817d52f8 417 ret = 0;
e37c9e69 418
817d52f8
JB
419 total_found += add_new_free_space(block_group, fs_info, last,
420 block_group->key.objectid +
421 block_group->key.offset);
11833d66 422 caching_ctl->progress = (u64)-1;
817d52f8
JB
423
424 spin_lock(&block_group->lock);
11833d66 425 block_group->caching_ctl = NULL;
817d52f8
JB
426 block_group->cached = BTRFS_CACHE_FINISHED;
427 spin_unlock(&block_group->lock);
0f9dd46c 428
54aa1f4d 429err:
e37c9e69 430 btrfs_free_path(path);
276e680d 431 up_read(&fs_info->extent_commit_sem);
817d52f8 432
11833d66
YZ
433 free_excluded_extents(extent_root, block_group);
434
435 mutex_unlock(&caching_ctl->mutex);
436 wake_up(&caching_ctl->wait);
437
438 put_caching_control(caching_ctl);
439 atomic_dec(&block_group->space_info->caching_threads);
11dfe35a
JB
440 btrfs_put_block_group(block_group);
441
817d52f8
JB
442 return 0;
443}
444
9d66e233
JB
445static int cache_block_group(struct btrfs_block_group_cache *cache,
446 struct btrfs_trans_handle *trans,
b8399dee 447 struct btrfs_root *root,
9d66e233 448 int load_cache_only)
817d52f8 449{
11833d66
YZ
450 struct btrfs_fs_info *fs_info = cache->fs_info;
451 struct btrfs_caching_control *caching_ctl;
817d52f8
JB
452 struct task_struct *tsk;
453 int ret = 0;
454
11833d66
YZ
455 smp_mb();
456 if (cache->cached != BTRFS_CACHE_NO)
457 return 0;
458
9d66e233
JB
459 /*
460 * We can't do the read from on-disk cache during a commit since we need
b8399dee
JB
461 * to have the normal tree locking. Also if we are currently trying to
462 * allocate blocks for the tree root we can't do the fast caching since
463 * we likely hold important locks.
9d66e233 464 */
f7039b1d 465 if (trans && (!trans->transaction->in_commit) &&
b8399dee 466 (root && root != root->fs_info->tree_root)) {
9d66e233
JB
467 spin_lock(&cache->lock);
468 if (cache->cached != BTRFS_CACHE_NO) {
469 spin_unlock(&cache->lock);
470 return 0;
471 }
472 cache->cached = BTRFS_CACHE_STARTED;
473 spin_unlock(&cache->lock);
474
475 ret = load_free_space_cache(fs_info, cache);
476
477 spin_lock(&cache->lock);
478 if (ret == 1) {
479 cache->cached = BTRFS_CACHE_FINISHED;
480 cache->last_byte_to_unpin = (u64)-1;
481 } else {
482 cache->cached = BTRFS_CACHE_NO;
483 }
484 spin_unlock(&cache->lock);
3c14874a
JB
485 if (ret == 1) {
486 free_excluded_extents(fs_info->extent_root, cache);
9d66e233 487 return 0;
3c14874a 488 }
9d66e233
JB
489 }
490
491 if (load_cache_only)
492 return 0;
493
fc0e4a31 494 caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
11833d66
YZ
495 BUG_ON(!caching_ctl);
496
497 INIT_LIST_HEAD(&caching_ctl->list);
498 mutex_init(&caching_ctl->mutex);
499 init_waitqueue_head(&caching_ctl->wait);
500 caching_ctl->block_group = cache;
501 caching_ctl->progress = cache->key.objectid;
502 /* one for caching kthread, one for caching block group list */
503 atomic_set(&caching_ctl->count, 2);
504
817d52f8
JB
505 spin_lock(&cache->lock);
506 if (cache->cached != BTRFS_CACHE_NO) {
507 spin_unlock(&cache->lock);
11833d66
YZ
508 kfree(caching_ctl);
509 return 0;
817d52f8 510 }
11833d66 511 cache->caching_ctl = caching_ctl;
817d52f8
JB
512 cache->cached = BTRFS_CACHE_STARTED;
513 spin_unlock(&cache->lock);
514
11833d66
YZ
515 down_write(&fs_info->extent_commit_sem);
516 list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
517 up_write(&fs_info->extent_commit_sem);
518
519 atomic_inc(&cache->space_info->caching_threads);
11dfe35a 520 btrfs_get_block_group(cache);
11833d66 521
817d52f8
JB
522 tsk = kthread_run(caching_kthread, cache, "btrfs-cache-%llu\n",
523 cache->key.objectid);
524 if (IS_ERR(tsk)) {
525 ret = PTR_ERR(tsk);
526 printk(KERN_ERR "error running thread %d\n", ret);
527 BUG();
528 }
529
ef8bbdfe 530 return ret;
e37c9e69
CM
531}
532
0f9dd46c
JB
533/*
534 * return the block group that starts at or after bytenr
535 */
d397712b
CM
536static struct btrfs_block_group_cache *
537btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
0ef3e66b 538{
0f9dd46c 539 struct btrfs_block_group_cache *cache;
0ef3e66b 540
0f9dd46c 541 cache = block_group_cache_tree_search(info, bytenr, 0);
0ef3e66b 542
0f9dd46c 543 return cache;
0ef3e66b
CM
544}
545
0f9dd46c 546/*
9f55684c 547 * return the block group that contains the given bytenr
0f9dd46c 548 */
d397712b
CM
549struct btrfs_block_group_cache *btrfs_lookup_block_group(
550 struct btrfs_fs_info *info,
551 u64 bytenr)
be744175 552{
0f9dd46c 553 struct btrfs_block_group_cache *cache;
be744175 554
0f9dd46c 555 cache = block_group_cache_tree_search(info, bytenr, 1);
96b5179d 556
0f9dd46c 557 return cache;
be744175 558}
0b86a832 559
0f9dd46c
JB
560static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
561 u64 flags)
6324fbf3 562{
0f9dd46c 563 struct list_head *head = &info->space_info;
0f9dd46c 564 struct btrfs_space_info *found;
4184ea7f 565
b742bb82
YZ
566 flags &= BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_SYSTEM |
567 BTRFS_BLOCK_GROUP_METADATA;
568
4184ea7f
CM
569 rcu_read_lock();
570 list_for_each_entry_rcu(found, head, list) {
67377734 571 if (found->flags & flags) {
4184ea7f 572 rcu_read_unlock();
0f9dd46c 573 return found;
4184ea7f 574 }
0f9dd46c 575 }
4184ea7f 576 rcu_read_unlock();
0f9dd46c 577 return NULL;
6324fbf3
CM
578}
579
4184ea7f
CM
580/*
581 * after adding space to the filesystem, we need to clear the full flags
582 * on all the space infos.
583 */
584void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
585{
586 struct list_head *head = &info->space_info;
587 struct btrfs_space_info *found;
588
589 rcu_read_lock();
590 list_for_each_entry_rcu(found, head, list)
591 found->full = 0;
592 rcu_read_unlock();
593}
594
80eb234a
JB
595static u64 div_factor(u64 num, int factor)
596{
597 if (factor == 10)
598 return num;
599 num *= factor;
600 do_div(num, 10);
601 return num;
602}
603
e5bc2458
CM
604static u64 div_factor_fine(u64 num, int factor)
605{
606 if (factor == 100)
607 return num;
608 num *= factor;
609 do_div(num, 100);
610 return num;
611}
612
d2fb3437
YZ
613u64 btrfs_find_block_group(struct btrfs_root *root,
614 u64 search_start, u64 search_hint, int owner)
cd1bc465 615{
96b5179d 616 struct btrfs_block_group_cache *cache;
cd1bc465 617 u64 used;
d2fb3437
YZ
618 u64 last = max(search_hint, search_start);
619 u64 group_start = 0;
31f3c99b 620 int full_search = 0;
d2fb3437 621 int factor = 9;
0ef3e66b 622 int wrapped = 0;
31f3c99b 623again:
e8569813
ZY
624 while (1) {
625 cache = btrfs_lookup_first_block_group(root->fs_info, last);
0f9dd46c
JB
626 if (!cache)
627 break;
96b5179d 628
c286ac48 629 spin_lock(&cache->lock);
96b5179d
CM
630 last = cache->key.objectid + cache->key.offset;
631 used = btrfs_block_group_used(&cache->item);
632
d2fb3437
YZ
633 if ((full_search || !cache->ro) &&
634 block_group_bits(cache, BTRFS_BLOCK_GROUP_METADATA)) {
e8569813 635 if (used + cache->pinned + cache->reserved <
d2fb3437
YZ
636 div_factor(cache->key.offset, factor)) {
637 group_start = cache->key.objectid;
c286ac48 638 spin_unlock(&cache->lock);
fa9c0d79 639 btrfs_put_block_group(cache);
8790d502
CM
640 goto found;
641 }
6324fbf3 642 }
c286ac48 643 spin_unlock(&cache->lock);
fa9c0d79 644 btrfs_put_block_group(cache);
de428b63 645 cond_resched();
cd1bc465 646 }
0ef3e66b
CM
647 if (!wrapped) {
648 last = search_start;
649 wrapped = 1;
650 goto again;
651 }
652 if (!full_search && factor < 10) {
be744175 653 last = search_start;
31f3c99b 654 full_search = 1;
0ef3e66b 655 factor = 10;
31f3c99b
CM
656 goto again;
657 }
be744175 658found:
d2fb3437 659 return group_start;
925baedd 660}
0f9dd46c 661
e02119d5 662/* simple helper to search for an existing extent at a given offset */
31840ae1 663int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len)
e02119d5
CM
664{
665 int ret;
666 struct btrfs_key key;
31840ae1 667 struct btrfs_path *path;
e02119d5 668
31840ae1
ZY
669 path = btrfs_alloc_path();
670 BUG_ON(!path);
e02119d5
CM
671 key.objectid = start;
672 key.offset = len;
673 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
674 ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
675 0, 0);
31840ae1 676 btrfs_free_path(path);
7bb86316
CM
677 return ret;
678}
679
a22285a6
YZ
680/*
681 * helper function to lookup reference count and flags of extent.
682 *
683 * the head node for delayed ref is used to store the sum of all the
684 * reference count modifications queued up in the rbtree. the head
685 * node may also store the extent flags to set. This way you can check
686 * to see what the reference count and extent flags would be if all of
687 * the delayed refs are not processed.
688 */
689int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
690 struct btrfs_root *root, u64 bytenr,
691 u64 num_bytes, u64 *refs, u64 *flags)
692{
693 struct btrfs_delayed_ref_head *head;
694 struct btrfs_delayed_ref_root *delayed_refs;
695 struct btrfs_path *path;
696 struct btrfs_extent_item *ei;
697 struct extent_buffer *leaf;
698 struct btrfs_key key;
699 u32 item_size;
700 u64 num_refs;
701 u64 extent_flags;
702 int ret;
703
704 path = btrfs_alloc_path();
705 if (!path)
706 return -ENOMEM;
707
708 key.objectid = bytenr;
709 key.type = BTRFS_EXTENT_ITEM_KEY;
710 key.offset = num_bytes;
711 if (!trans) {
712 path->skip_locking = 1;
713 path->search_commit_root = 1;
714 }
715again:
716 ret = btrfs_search_slot(trans, root->fs_info->extent_root,
717 &key, path, 0, 0);
718 if (ret < 0)
719 goto out_free;
720
721 if (ret == 0) {
722 leaf = path->nodes[0];
723 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
724 if (item_size >= sizeof(*ei)) {
725 ei = btrfs_item_ptr(leaf, path->slots[0],
726 struct btrfs_extent_item);
727 num_refs = btrfs_extent_refs(leaf, ei);
728 extent_flags = btrfs_extent_flags(leaf, ei);
729 } else {
730#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
731 struct btrfs_extent_item_v0 *ei0;
732 BUG_ON(item_size != sizeof(*ei0));
733 ei0 = btrfs_item_ptr(leaf, path->slots[0],
734 struct btrfs_extent_item_v0);
735 num_refs = btrfs_extent_refs_v0(leaf, ei0);
736 /* FIXME: this isn't correct for data */
737 extent_flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
738#else
739 BUG();
740#endif
741 }
742 BUG_ON(num_refs == 0);
743 } else {
744 num_refs = 0;
745 extent_flags = 0;
746 ret = 0;
747 }
748
749 if (!trans)
750 goto out;
751
752 delayed_refs = &trans->transaction->delayed_refs;
753 spin_lock(&delayed_refs->lock);
754 head = btrfs_find_delayed_ref_head(trans, bytenr);
755 if (head) {
756 if (!mutex_trylock(&head->mutex)) {
757 atomic_inc(&head->node.refs);
758 spin_unlock(&delayed_refs->lock);
759
760 btrfs_release_path(root->fs_info->extent_root, path);
761
762 mutex_lock(&head->mutex);
763 mutex_unlock(&head->mutex);
764 btrfs_put_delayed_ref(&head->node);
765 goto again;
766 }
767 if (head->extent_op && head->extent_op->update_flags)
768 extent_flags |= head->extent_op->flags_to_set;
769 else
770 BUG_ON(num_refs == 0);
771
772 num_refs += head->node.ref_mod;
773 mutex_unlock(&head->mutex);
774 }
775 spin_unlock(&delayed_refs->lock);
776out:
777 WARN_ON(num_refs == 0);
778 if (refs)
779 *refs = num_refs;
780 if (flags)
781 *flags = extent_flags;
782out_free:
783 btrfs_free_path(path);
784 return ret;
785}
786
d8d5f3e1
CM
787/*
788 * Back reference rules. Back refs have three main goals:
789 *
790 * 1) differentiate between all holders of references to an extent so that
791 * when a reference is dropped we can make sure it was a valid reference
792 * before freeing the extent.
793 *
794 * 2) Provide enough information to quickly find the holders of an extent
795 * if we notice a given block is corrupted or bad.
796 *
797 * 3) Make it easy to migrate blocks for FS shrinking or storage pool
798 * maintenance. This is actually the same as #2, but with a slightly
799 * different use case.
800 *
5d4f98a2
YZ
801 * There are two kinds of back refs. The implicit back refs is optimized
802 * for pointers in non-shared tree blocks. For a given pointer in a block,
803 * back refs of this kind provide information about the block's owner tree
804 * and the pointer's key. These information allow us to find the block by
805 * b-tree searching. The full back refs is for pointers in tree blocks not
806 * referenced by their owner trees. The location of tree block is recorded
807 * in the back refs. Actually the full back refs is generic, and can be
808 * used in all cases the implicit back refs is used. The major shortcoming
809 * of the full back refs is its overhead. Every time a tree block gets
810 * COWed, we have to update back refs entry for all pointers in it.
811 *
812 * For a newly allocated tree block, we use implicit back refs for
813 * pointers in it. This means most tree related operations only involve
814 * implicit back refs. For a tree block created in old transaction, the
815 * only way to drop a reference to it is COW it. So we can detect the
816 * event that tree block loses its owner tree's reference and do the
817 * back refs conversion.
818 *
819 * When a tree block is COW'd through a tree, there are four cases:
820 *
821 * The reference count of the block is one and the tree is the block's
822 * owner tree. Nothing to do in this case.
823 *
824 * The reference count of the block is one and the tree is not the
825 * block's owner tree. In this case, full back refs is used for pointers
826 * in the block. Remove these full back refs, add implicit back refs for
827 * every pointers in the new block.
828 *
829 * The reference count of the block is greater than one and the tree is
830 * the block's owner tree. In this case, implicit back refs is used for
831 * pointers in the block. Add full back refs for every pointers in the
832 * block, increase lower level extents' reference counts. The original
833 * implicit back refs are entailed to the new block.
834 *
835 * The reference count of the block is greater than one and the tree is
836 * not the block's owner tree. Add implicit back refs for every pointer in
837 * the new block, increase lower level extents' reference count.
838 *
839 * Back Reference Key composing:
840 *
841 * The key objectid corresponds to the first byte in the extent,
842 * The key type is used to differentiate between types of back refs.
843 * There are different meanings of the key offset for different types
844 * of back refs.
845 *
d8d5f3e1
CM
846 * File extents can be referenced by:
847 *
848 * - multiple snapshots, subvolumes, or different generations in one subvol
31840ae1 849 * - different files inside a single subvolume
d8d5f3e1
CM
850 * - different offsets inside a file (bookend extents in file.c)
851 *
5d4f98a2 852 * The extent ref structure for the implicit back refs has fields for:
d8d5f3e1
CM
853 *
854 * - Objectid of the subvolume root
d8d5f3e1 855 * - objectid of the file holding the reference
5d4f98a2
YZ
856 * - original offset in the file
857 * - how many bookend extents
d8d5f3e1 858 *
5d4f98a2
YZ
859 * The key offset for the implicit back refs is hash of the first
860 * three fields.
d8d5f3e1 861 *
5d4f98a2 862 * The extent ref structure for the full back refs has field for:
d8d5f3e1 863 *
5d4f98a2 864 * - number of pointers in the tree leaf
d8d5f3e1 865 *
5d4f98a2
YZ
866 * The key offset for the implicit back refs is the first byte of
867 * the tree leaf
d8d5f3e1 868 *
5d4f98a2
YZ
869 * When a file extent is allocated, The implicit back refs is used.
870 * the fields are filled in:
d8d5f3e1 871 *
5d4f98a2 872 * (root_key.objectid, inode objectid, offset in file, 1)
d8d5f3e1 873 *
5d4f98a2
YZ
874 * When a file extent is removed file truncation, we find the
875 * corresponding implicit back refs and check the following fields:
d8d5f3e1 876 *
5d4f98a2 877 * (btrfs_header_owner(leaf), inode objectid, offset in file)
d8d5f3e1 878 *
5d4f98a2 879 * Btree extents can be referenced by:
d8d5f3e1 880 *
5d4f98a2 881 * - Different subvolumes
d8d5f3e1 882 *
5d4f98a2
YZ
883 * Both the implicit back refs and the full back refs for tree blocks
884 * only consist of key. The key offset for the implicit back refs is
885 * objectid of block's owner tree. The key offset for the full back refs
886 * is the first byte of parent block.
d8d5f3e1 887 *
5d4f98a2
YZ
888 * When implicit back refs is used, information about the lowest key and
889 * level of the tree block are required. These information are stored in
890 * tree block info structure.
d8d5f3e1 891 */
31840ae1 892
5d4f98a2
YZ
893#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
894static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
895 struct btrfs_root *root,
896 struct btrfs_path *path,
897 u64 owner, u32 extra_size)
7bb86316 898{
5d4f98a2
YZ
899 struct btrfs_extent_item *item;
900 struct btrfs_extent_item_v0 *ei0;
901 struct btrfs_extent_ref_v0 *ref0;
902 struct btrfs_tree_block_info *bi;
903 struct extent_buffer *leaf;
7bb86316 904 struct btrfs_key key;
5d4f98a2
YZ
905 struct btrfs_key found_key;
906 u32 new_size = sizeof(*item);
907 u64 refs;
908 int ret;
909
910 leaf = path->nodes[0];
911 BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));
912
913 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
914 ei0 = btrfs_item_ptr(leaf, path->slots[0],
915 struct btrfs_extent_item_v0);
916 refs = btrfs_extent_refs_v0(leaf, ei0);
917
918 if (owner == (u64)-1) {
919 while (1) {
920 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
921 ret = btrfs_next_leaf(root, path);
922 if (ret < 0)
923 return ret;
924 BUG_ON(ret > 0);
925 leaf = path->nodes[0];
926 }
927 btrfs_item_key_to_cpu(leaf, &found_key,
928 path->slots[0]);
929 BUG_ON(key.objectid != found_key.objectid);
930 if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
931 path->slots[0]++;
932 continue;
933 }
934 ref0 = btrfs_item_ptr(leaf, path->slots[0],
935 struct btrfs_extent_ref_v0);
936 owner = btrfs_ref_objectid_v0(leaf, ref0);
937 break;
938 }
939 }
940 btrfs_release_path(root, path);
941
942 if (owner < BTRFS_FIRST_FREE_OBJECTID)
943 new_size += sizeof(*bi);
944
945 new_size -= sizeof(*ei0);
946 ret = btrfs_search_slot(trans, root, &key, path,
947 new_size + extra_size, 1);
948 if (ret < 0)
949 return ret;
950 BUG_ON(ret);
951
952 ret = btrfs_extend_item(trans, root, path, new_size);
953 BUG_ON(ret);
954
955 leaf = path->nodes[0];
956 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
957 btrfs_set_extent_refs(leaf, item, refs);
958 /* FIXME: get real generation */
959 btrfs_set_extent_generation(leaf, item, 0);
960 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
961 btrfs_set_extent_flags(leaf, item,
962 BTRFS_EXTENT_FLAG_TREE_BLOCK |
963 BTRFS_BLOCK_FLAG_FULL_BACKREF);
964 bi = (struct btrfs_tree_block_info *)(item + 1);
965 /* FIXME: get first key of the block */
966 memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi));
967 btrfs_set_tree_block_level(leaf, bi, (int)owner);
968 } else {
969 btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
970 }
971 btrfs_mark_buffer_dirty(leaf);
972 return 0;
973}
974#endif
975
976static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
977{
978 u32 high_crc = ~(u32)0;
979 u32 low_crc = ~(u32)0;
980 __le64 lenum;
981
982 lenum = cpu_to_le64(root_objectid);
163e783e 983 high_crc = crc32c(high_crc, &lenum, sizeof(lenum));
5d4f98a2 984 lenum = cpu_to_le64(owner);
163e783e 985 low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
5d4f98a2 986 lenum = cpu_to_le64(offset);
163e783e 987 low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
5d4f98a2
YZ
988
989 return ((u64)high_crc << 31) ^ (u64)low_crc;
990}
991
992static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
993 struct btrfs_extent_data_ref *ref)
994{
995 return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
996 btrfs_extent_data_ref_objectid(leaf, ref),
997 btrfs_extent_data_ref_offset(leaf, ref));
998}
999
1000static int match_extent_data_ref(struct extent_buffer *leaf,
1001 struct btrfs_extent_data_ref *ref,
1002 u64 root_objectid, u64 owner, u64 offset)
1003{
1004 if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
1005 btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
1006 btrfs_extent_data_ref_offset(leaf, ref) != offset)
1007 return 0;
1008 return 1;
1009}
1010
1011static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
1012 struct btrfs_root *root,
1013 struct btrfs_path *path,
1014 u64 bytenr, u64 parent,
1015 u64 root_objectid,
1016 u64 owner, u64 offset)
1017{
1018 struct btrfs_key key;
1019 struct btrfs_extent_data_ref *ref;
31840ae1 1020 struct extent_buffer *leaf;
5d4f98a2 1021 u32 nritems;
74493f7a 1022 int ret;
5d4f98a2
YZ
1023 int recow;
1024 int err = -ENOENT;
74493f7a 1025
31840ae1 1026 key.objectid = bytenr;
5d4f98a2
YZ
1027 if (parent) {
1028 key.type = BTRFS_SHARED_DATA_REF_KEY;
1029 key.offset = parent;
1030 } else {
1031 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1032 key.offset = hash_extent_data_ref(root_objectid,
1033 owner, offset);
1034 }
1035again:
1036 recow = 0;
1037 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1038 if (ret < 0) {
1039 err = ret;
1040 goto fail;
1041 }
31840ae1 1042
5d4f98a2
YZ
1043 if (parent) {
1044 if (!ret)
1045 return 0;
1046#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1047 key.type = BTRFS_EXTENT_REF_V0_KEY;
1048 btrfs_release_path(root, path);
1049 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1050 if (ret < 0) {
1051 err = ret;
1052 goto fail;
1053 }
1054 if (!ret)
1055 return 0;
1056#endif
1057 goto fail;
31840ae1
ZY
1058 }
1059
1060 leaf = path->nodes[0];
5d4f98a2
YZ
1061 nritems = btrfs_header_nritems(leaf);
1062 while (1) {
1063 if (path->slots[0] >= nritems) {
1064 ret = btrfs_next_leaf(root, path);
1065 if (ret < 0)
1066 err = ret;
1067 if (ret)
1068 goto fail;
1069
1070 leaf = path->nodes[0];
1071 nritems = btrfs_header_nritems(leaf);
1072 recow = 1;
1073 }
1074
1075 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1076 if (key.objectid != bytenr ||
1077 key.type != BTRFS_EXTENT_DATA_REF_KEY)
1078 goto fail;
1079
1080 ref = btrfs_item_ptr(leaf, path->slots[0],
1081 struct btrfs_extent_data_ref);
1082
1083 if (match_extent_data_ref(leaf, ref, root_objectid,
1084 owner, offset)) {
1085 if (recow) {
1086 btrfs_release_path(root, path);
1087 goto again;
1088 }
1089 err = 0;
1090 break;
1091 }
1092 path->slots[0]++;
31840ae1 1093 }
5d4f98a2
YZ
1094fail:
1095 return err;
31840ae1
ZY
1096}
1097
5d4f98a2
YZ
1098static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
1099 struct btrfs_root *root,
1100 struct btrfs_path *path,
1101 u64 bytenr, u64 parent,
1102 u64 root_objectid, u64 owner,
1103 u64 offset, int refs_to_add)
31840ae1
ZY
1104{
1105 struct btrfs_key key;
1106 struct extent_buffer *leaf;
5d4f98a2 1107 u32 size;
31840ae1
ZY
1108 u32 num_refs;
1109 int ret;
74493f7a 1110
74493f7a 1111 key.objectid = bytenr;
5d4f98a2
YZ
1112 if (parent) {
1113 key.type = BTRFS_SHARED_DATA_REF_KEY;
1114 key.offset = parent;
1115 size = sizeof(struct btrfs_shared_data_ref);
1116 } else {
1117 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1118 key.offset = hash_extent_data_ref(root_objectid,
1119 owner, offset);
1120 size = sizeof(struct btrfs_extent_data_ref);
1121 }
74493f7a 1122
5d4f98a2
YZ
1123 ret = btrfs_insert_empty_item(trans, root, path, &key, size);
1124 if (ret && ret != -EEXIST)
1125 goto fail;
1126
1127 leaf = path->nodes[0];
1128 if (parent) {
1129 struct btrfs_shared_data_ref *ref;
31840ae1 1130 ref = btrfs_item_ptr(leaf, path->slots[0],
5d4f98a2
YZ
1131 struct btrfs_shared_data_ref);
1132 if (ret == 0) {
1133 btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
1134 } else {
1135 num_refs = btrfs_shared_data_ref_count(leaf, ref);
1136 num_refs += refs_to_add;
1137 btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
31840ae1 1138 }
5d4f98a2
YZ
1139 } else {
1140 struct btrfs_extent_data_ref *ref;
1141 while (ret == -EEXIST) {
1142 ref = btrfs_item_ptr(leaf, path->slots[0],
1143 struct btrfs_extent_data_ref);
1144 if (match_extent_data_ref(leaf, ref, root_objectid,
1145 owner, offset))
1146 break;
1147 btrfs_release_path(root, path);
1148 key.offset++;
1149 ret = btrfs_insert_empty_item(trans, root, path, &key,
1150 size);
1151 if (ret && ret != -EEXIST)
1152 goto fail;
31840ae1 1153
5d4f98a2
YZ
1154 leaf = path->nodes[0];
1155 }
1156 ref = btrfs_item_ptr(leaf, path->slots[0],
1157 struct btrfs_extent_data_ref);
1158 if (ret == 0) {
1159 btrfs_set_extent_data_ref_root(leaf, ref,
1160 root_objectid);
1161 btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
1162 btrfs_set_extent_data_ref_offset(leaf, ref, offset);
1163 btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
1164 } else {
1165 num_refs = btrfs_extent_data_ref_count(leaf, ref);
1166 num_refs += refs_to_add;
1167 btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
31840ae1 1168 }
31840ae1 1169 }
5d4f98a2
YZ
1170 btrfs_mark_buffer_dirty(leaf);
1171 ret = 0;
1172fail:
7bb86316
CM
1173 btrfs_release_path(root, path);
1174 return ret;
74493f7a
CM
1175}
1176
5d4f98a2
YZ
1177static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
1178 struct btrfs_root *root,
1179 struct btrfs_path *path,
1180 int refs_to_drop)
31840ae1 1181{
5d4f98a2
YZ
1182 struct btrfs_key key;
1183 struct btrfs_extent_data_ref *ref1 = NULL;
1184 struct btrfs_shared_data_ref *ref2 = NULL;
31840ae1 1185 struct extent_buffer *leaf;
5d4f98a2 1186 u32 num_refs = 0;
31840ae1
ZY
1187 int ret = 0;
1188
1189 leaf = path->nodes[0];
5d4f98a2
YZ
1190 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1191
1192 if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1193 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1194 struct btrfs_extent_data_ref);
1195 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1196 } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1197 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1198 struct btrfs_shared_data_ref);
1199 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1200#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1201 } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1202 struct btrfs_extent_ref_v0 *ref0;
1203 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1204 struct btrfs_extent_ref_v0);
1205 num_refs = btrfs_ref_count_v0(leaf, ref0);
1206#endif
1207 } else {
1208 BUG();
1209 }
1210
56bec294
CM
1211 BUG_ON(num_refs < refs_to_drop);
1212 num_refs -= refs_to_drop;
5d4f98a2 1213
31840ae1
ZY
1214 if (num_refs == 0) {
1215 ret = btrfs_del_item(trans, root, path);
1216 } else {
5d4f98a2
YZ
1217 if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
1218 btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
1219 else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
1220 btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
1221#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1222 else {
1223 struct btrfs_extent_ref_v0 *ref0;
1224 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1225 struct btrfs_extent_ref_v0);
1226 btrfs_set_ref_count_v0(leaf, ref0, num_refs);
1227 }
1228#endif
31840ae1
ZY
1229 btrfs_mark_buffer_dirty(leaf);
1230 }
31840ae1
ZY
1231 return ret;
1232}
1233
5d4f98a2
YZ
1234static noinline u32 extent_data_ref_count(struct btrfs_root *root,
1235 struct btrfs_path *path,
1236 struct btrfs_extent_inline_ref *iref)
15916de8 1237{
5d4f98a2
YZ
1238 struct btrfs_key key;
1239 struct extent_buffer *leaf;
1240 struct btrfs_extent_data_ref *ref1;
1241 struct btrfs_shared_data_ref *ref2;
1242 u32 num_refs = 0;
1243
1244 leaf = path->nodes[0];
1245 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1246 if (iref) {
1247 if (btrfs_extent_inline_ref_type(leaf, iref) ==
1248 BTRFS_EXTENT_DATA_REF_KEY) {
1249 ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
1250 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1251 } else {
1252 ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
1253 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1254 }
1255 } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1256 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1257 struct btrfs_extent_data_ref);
1258 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1259 } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1260 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1261 struct btrfs_shared_data_ref);
1262 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1263#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1264 } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1265 struct btrfs_extent_ref_v0 *ref0;
1266 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1267 struct btrfs_extent_ref_v0);
1268 num_refs = btrfs_ref_count_v0(leaf, ref0);
4b4e25f2 1269#endif
5d4f98a2
YZ
1270 } else {
1271 WARN_ON(1);
1272 }
1273 return num_refs;
1274}
15916de8 1275
5d4f98a2
YZ
1276static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
1277 struct btrfs_root *root,
1278 struct btrfs_path *path,
1279 u64 bytenr, u64 parent,
1280 u64 root_objectid)
1f3c79a2 1281{
5d4f98a2 1282 struct btrfs_key key;
1f3c79a2 1283 int ret;
1f3c79a2 1284
5d4f98a2
YZ
1285 key.objectid = bytenr;
1286 if (parent) {
1287 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1288 key.offset = parent;
1289 } else {
1290 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1291 key.offset = root_objectid;
1f3c79a2
LH
1292 }
1293
5d4f98a2
YZ
1294 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1295 if (ret > 0)
1296 ret = -ENOENT;
1297#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1298 if (ret == -ENOENT && parent) {
1299 btrfs_release_path(root, path);
1300 key.type = BTRFS_EXTENT_REF_V0_KEY;
1301 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1302 if (ret > 0)
1303 ret = -ENOENT;
1304 }
1f3c79a2 1305#endif
5d4f98a2 1306 return ret;
1f3c79a2
LH
1307}
1308
5d4f98a2
YZ
1309static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
1310 struct btrfs_root *root,
1311 struct btrfs_path *path,
1312 u64 bytenr, u64 parent,
1313 u64 root_objectid)
31840ae1 1314{
5d4f98a2 1315 struct btrfs_key key;
31840ae1 1316 int ret;
31840ae1 1317
5d4f98a2
YZ
1318 key.objectid = bytenr;
1319 if (parent) {
1320 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1321 key.offset = parent;
1322 } else {
1323 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1324 key.offset = root_objectid;
1325 }
1326
1327 ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1328 btrfs_release_path(root, path);
31840ae1
ZY
1329 return ret;
1330}
1331
5d4f98a2 1332static inline int extent_ref_type(u64 parent, u64 owner)
31840ae1 1333{
5d4f98a2
YZ
1334 int type;
1335 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1336 if (parent > 0)
1337 type = BTRFS_SHARED_BLOCK_REF_KEY;
1338 else
1339 type = BTRFS_TREE_BLOCK_REF_KEY;
1340 } else {
1341 if (parent > 0)
1342 type = BTRFS_SHARED_DATA_REF_KEY;
1343 else
1344 type = BTRFS_EXTENT_DATA_REF_KEY;
1345 }
1346 return type;
31840ae1 1347}
56bec294 1348
2c47e605
YZ
1349static int find_next_key(struct btrfs_path *path, int level,
1350 struct btrfs_key *key)
56bec294 1351
02217ed2 1352{
2c47e605 1353 for (; level < BTRFS_MAX_LEVEL; level++) {
5d4f98a2
YZ
1354 if (!path->nodes[level])
1355 break;
5d4f98a2
YZ
1356 if (path->slots[level] + 1 >=
1357 btrfs_header_nritems(path->nodes[level]))
1358 continue;
1359 if (level == 0)
1360 btrfs_item_key_to_cpu(path->nodes[level], key,
1361 path->slots[level] + 1);
1362 else
1363 btrfs_node_key_to_cpu(path->nodes[level], key,
1364 path->slots[level] + 1);
1365 return 0;
1366 }
1367 return 1;
1368}
037e6390 1369
5d4f98a2
YZ
1370/*
1371 * look for inline back ref. if back ref is found, *ref_ret is set
1372 * to the address of inline back ref, and 0 is returned.
1373 *
1374 * if back ref isn't found, *ref_ret is set to the address where it
1375 * should be inserted, and -ENOENT is returned.
1376 *
1377 * if insert is true and there are too many inline back refs, the path
1378 * points to the extent item, and -EAGAIN is returned.
1379 *
1380 * NOTE: inline back refs are ordered in the same way that back ref
1381 * items in the tree are ordered.
1382 */
1383static noinline_for_stack
1384int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
1385 struct btrfs_root *root,
1386 struct btrfs_path *path,
1387 struct btrfs_extent_inline_ref **ref_ret,
1388 u64 bytenr, u64 num_bytes,
1389 u64 parent, u64 root_objectid,
1390 u64 owner, u64 offset, int insert)
1391{
1392 struct btrfs_key key;
1393 struct extent_buffer *leaf;
1394 struct btrfs_extent_item *ei;
1395 struct btrfs_extent_inline_ref *iref;
1396 u64 flags;
1397 u64 item_size;
1398 unsigned long ptr;
1399 unsigned long end;
1400 int extra_size;
1401 int type;
1402 int want;
1403 int ret;
1404 int err = 0;
26b8003f 1405
db94535d 1406 key.objectid = bytenr;
31840ae1 1407 key.type = BTRFS_EXTENT_ITEM_KEY;
56bec294 1408 key.offset = num_bytes;
31840ae1 1409
5d4f98a2
YZ
1410 want = extent_ref_type(parent, owner);
1411 if (insert) {
1412 extra_size = btrfs_extent_inline_ref_size(want);
85d4198e 1413 path->keep_locks = 1;
5d4f98a2
YZ
1414 } else
1415 extra_size = -1;
1416 ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
b9473439 1417 if (ret < 0) {
5d4f98a2
YZ
1418 err = ret;
1419 goto out;
1420 }
1421 BUG_ON(ret);
1422
1423 leaf = path->nodes[0];
1424 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1425#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1426 if (item_size < sizeof(*ei)) {
1427 if (!insert) {
1428 err = -ENOENT;
1429 goto out;
1430 }
1431 ret = convert_extent_item_v0(trans, root, path, owner,
1432 extra_size);
1433 if (ret < 0) {
1434 err = ret;
1435 goto out;
1436 }
1437 leaf = path->nodes[0];
1438 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1439 }
1440#endif
1441 BUG_ON(item_size < sizeof(*ei));
1442
5d4f98a2
YZ
1443 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1444 flags = btrfs_extent_flags(leaf, ei);
1445
1446 ptr = (unsigned long)(ei + 1);
1447 end = (unsigned long)ei + item_size;
1448
1449 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1450 ptr += sizeof(struct btrfs_tree_block_info);
1451 BUG_ON(ptr > end);
1452 } else {
1453 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
1454 }
1455
1456 err = -ENOENT;
1457 while (1) {
1458 if (ptr >= end) {
1459 WARN_ON(ptr > end);
1460 break;
1461 }
1462 iref = (struct btrfs_extent_inline_ref *)ptr;
1463 type = btrfs_extent_inline_ref_type(leaf, iref);
1464 if (want < type)
1465 break;
1466 if (want > type) {
1467 ptr += btrfs_extent_inline_ref_size(type);
1468 continue;
1469 }
1470
1471 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1472 struct btrfs_extent_data_ref *dref;
1473 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1474 if (match_extent_data_ref(leaf, dref, root_objectid,
1475 owner, offset)) {
1476 err = 0;
1477 break;
1478 }
1479 if (hash_extent_data_ref_item(leaf, dref) <
1480 hash_extent_data_ref(root_objectid, owner, offset))
1481 break;
1482 } else {
1483 u64 ref_offset;
1484 ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
1485 if (parent > 0) {
1486 if (parent == ref_offset) {
1487 err = 0;
1488 break;
1489 }
1490 if (ref_offset < parent)
1491 break;
1492 } else {
1493 if (root_objectid == ref_offset) {
1494 err = 0;
1495 break;
1496 }
1497 if (ref_offset < root_objectid)
1498 break;
1499 }
1500 }
1501 ptr += btrfs_extent_inline_ref_size(type);
1502 }
1503 if (err == -ENOENT && insert) {
1504 if (item_size + extra_size >=
1505 BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
1506 err = -EAGAIN;
1507 goto out;
1508 }
1509 /*
1510 * To add new inline back ref, we have to make sure
1511 * there is no corresponding back ref item.
1512 * For simplicity, we just do not add new inline back
1513 * ref if there is any kind of item for this block
1514 */
2c47e605
YZ
1515 if (find_next_key(path, 0, &key) == 0 &&
1516 key.objectid == bytenr &&
85d4198e 1517 key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
5d4f98a2
YZ
1518 err = -EAGAIN;
1519 goto out;
1520 }
1521 }
1522 *ref_ret = (struct btrfs_extent_inline_ref *)ptr;
1523out:
85d4198e 1524 if (insert) {
5d4f98a2
YZ
1525 path->keep_locks = 0;
1526 btrfs_unlock_up_safe(path, 1);
1527 }
1528 return err;
1529}
1530
1531/*
1532 * helper to add new inline back ref
1533 */
1534static noinline_for_stack
1535int setup_inline_extent_backref(struct btrfs_trans_handle *trans,
1536 struct btrfs_root *root,
1537 struct btrfs_path *path,
1538 struct btrfs_extent_inline_ref *iref,
1539 u64 parent, u64 root_objectid,
1540 u64 owner, u64 offset, int refs_to_add,
1541 struct btrfs_delayed_extent_op *extent_op)
1542{
1543 struct extent_buffer *leaf;
1544 struct btrfs_extent_item *ei;
1545 unsigned long ptr;
1546 unsigned long end;
1547 unsigned long item_offset;
1548 u64 refs;
1549 int size;
1550 int type;
1551 int ret;
1552
1553 leaf = path->nodes[0];
1554 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1555 item_offset = (unsigned long)iref - (unsigned long)ei;
1556
1557 type = extent_ref_type(parent, owner);
1558 size = btrfs_extent_inline_ref_size(type);
1559
1560 ret = btrfs_extend_item(trans, root, path, size);
1561 BUG_ON(ret);
1562
1563 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1564 refs = btrfs_extent_refs(leaf, ei);
1565 refs += refs_to_add;
1566 btrfs_set_extent_refs(leaf, ei, refs);
1567 if (extent_op)
1568 __run_delayed_extent_op(extent_op, leaf, ei);
1569
1570 ptr = (unsigned long)ei + item_offset;
1571 end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
1572 if (ptr < end - size)
1573 memmove_extent_buffer(leaf, ptr + size, ptr,
1574 end - size - ptr);
1575
1576 iref = (struct btrfs_extent_inline_ref *)ptr;
1577 btrfs_set_extent_inline_ref_type(leaf, iref, type);
1578 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1579 struct btrfs_extent_data_ref *dref;
1580 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1581 btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
1582 btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
1583 btrfs_set_extent_data_ref_offset(leaf, dref, offset);
1584 btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
1585 } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1586 struct btrfs_shared_data_ref *sref;
1587 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1588 btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
1589 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1590 } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
1591 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1592 } else {
1593 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
1594 }
1595 btrfs_mark_buffer_dirty(leaf);
1596 return 0;
1597}
1598
1599static int lookup_extent_backref(struct btrfs_trans_handle *trans,
1600 struct btrfs_root *root,
1601 struct btrfs_path *path,
1602 struct btrfs_extent_inline_ref **ref_ret,
1603 u64 bytenr, u64 num_bytes, u64 parent,
1604 u64 root_objectid, u64 owner, u64 offset)
1605{
1606 int ret;
1607
1608 ret = lookup_inline_extent_backref(trans, root, path, ref_ret,
1609 bytenr, num_bytes, parent,
1610 root_objectid, owner, offset, 0);
1611 if (ret != -ENOENT)
54aa1f4d 1612 return ret;
5d4f98a2
YZ
1613
1614 btrfs_release_path(root, path);
1615 *ref_ret = NULL;
1616
1617 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1618 ret = lookup_tree_block_ref(trans, root, path, bytenr, parent,
1619 root_objectid);
1620 } else {
1621 ret = lookup_extent_data_ref(trans, root, path, bytenr, parent,
1622 root_objectid, owner, offset);
b9473439 1623 }
5d4f98a2
YZ
1624 return ret;
1625}
31840ae1 1626
5d4f98a2
YZ
1627/*
1628 * helper to update/remove inline back ref
1629 */
1630static noinline_for_stack
1631int update_inline_extent_backref(struct btrfs_trans_handle *trans,
1632 struct btrfs_root *root,
1633 struct btrfs_path *path,
1634 struct btrfs_extent_inline_ref *iref,
1635 int refs_to_mod,
1636 struct btrfs_delayed_extent_op *extent_op)
1637{
1638 struct extent_buffer *leaf;
1639 struct btrfs_extent_item *ei;
1640 struct btrfs_extent_data_ref *dref = NULL;
1641 struct btrfs_shared_data_ref *sref = NULL;
1642 unsigned long ptr;
1643 unsigned long end;
1644 u32 item_size;
1645 int size;
1646 int type;
1647 int ret;
1648 u64 refs;
1649
1650 leaf = path->nodes[0];
1651 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1652 refs = btrfs_extent_refs(leaf, ei);
1653 WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
1654 refs += refs_to_mod;
1655 btrfs_set_extent_refs(leaf, ei, refs);
1656 if (extent_op)
1657 __run_delayed_extent_op(extent_op, leaf, ei);
1658
1659 type = btrfs_extent_inline_ref_type(leaf, iref);
1660
1661 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1662 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1663 refs = btrfs_extent_data_ref_count(leaf, dref);
1664 } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1665 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1666 refs = btrfs_shared_data_ref_count(leaf, sref);
1667 } else {
1668 refs = 1;
1669 BUG_ON(refs_to_mod != -1);
56bec294 1670 }
31840ae1 1671
5d4f98a2
YZ
1672 BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
1673 refs += refs_to_mod;
1674
1675 if (refs > 0) {
1676 if (type == BTRFS_EXTENT_DATA_REF_KEY)
1677 btrfs_set_extent_data_ref_count(leaf, dref, refs);
1678 else
1679 btrfs_set_shared_data_ref_count(leaf, sref, refs);
1680 } else {
1681 size = btrfs_extent_inline_ref_size(type);
1682 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1683 ptr = (unsigned long)iref;
1684 end = (unsigned long)ei + item_size;
1685 if (ptr + size < end)
1686 memmove_extent_buffer(leaf, ptr, ptr + size,
1687 end - ptr - size);
1688 item_size -= size;
1689 ret = btrfs_truncate_item(trans, root, path, item_size, 1);
1690 BUG_ON(ret);
1691 }
1692 btrfs_mark_buffer_dirty(leaf);
1693 return 0;
1694}
1695
1696static noinline_for_stack
1697int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
1698 struct btrfs_root *root,
1699 struct btrfs_path *path,
1700 u64 bytenr, u64 num_bytes, u64 parent,
1701 u64 root_objectid, u64 owner,
1702 u64 offset, int refs_to_add,
1703 struct btrfs_delayed_extent_op *extent_op)
1704{
1705 struct btrfs_extent_inline_ref *iref;
1706 int ret;
1707
1708 ret = lookup_inline_extent_backref(trans, root, path, &iref,
1709 bytenr, num_bytes, parent,
1710 root_objectid, owner, offset, 1);
1711 if (ret == 0) {
1712 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
1713 ret = update_inline_extent_backref(trans, root, path, iref,
1714 refs_to_add, extent_op);
1715 } else if (ret == -ENOENT) {
1716 ret = setup_inline_extent_backref(trans, root, path, iref,
1717 parent, root_objectid,
1718 owner, offset, refs_to_add,
1719 extent_op);
771ed689 1720 }
5d4f98a2
YZ
1721 return ret;
1722}
31840ae1 1723
5d4f98a2
YZ
1724static int insert_extent_backref(struct btrfs_trans_handle *trans,
1725 struct btrfs_root *root,
1726 struct btrfs_path *path,
1727 u64 bytenr, u64 parent, u64 root_objectid,
1728 u64 owner, u64 offset, int refs_to_add)
1729{
1730 int ret;
1731 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1732 BUG_ON(refs_to_add != 1);
1733 ret = insert_tree_block_ref(trans, root, path, bytenr,
1734 parent, root_objectid);
1735 } else {
1736 ret = insert_extent_data_ref(trans, root, path, bytenr,
1737 parent, root_objectid,
1738 owner, offset, refs_to_add);
1739 }
1740 return ret;
1741}
56bec294 1742
5d4f98a2
YZ
1743static int remove_extent_backref(struct btrfs_trans_handle *trans,
1744 struct btrfs_root *root,
1745 struct btrfs_path *path,
1746 struct btrfs_extent_inline_ref *iref,
1747 int refs_to_drop, int is_data)
1748{
1749 int ret;
b9473439 1750
5d4f98a2
YZ
1751 BUG_ON(!is_data && refs_to_drop != 1);
1752 if (iref) {
1753 ret = update_inline_extent_backref(trans, root, path, iref,
1754 -refs_to_drop, NULL);
1755 } else if (is_data) {
1756 ret = remove_extent_data_ref(trans, root, path, refs_to_drop);
1757 } else {
1758 ret = btrfs_del_item(trans, root, path);
1759 }
1760 return ret;
1761}
1762
5378e607 1763static int btrfs_issue_discard(struct block_device *bdev,
5d4f98a2
YZ
1764 u64 start, u64 len)
1765{
5378e607 1766 return blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_NOFS, 0);
5d4f98a2 1767}
5d4f98a2
YZ
1768
1769static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
5378e607 1770 u64 num_bytes, u64 *actual_bytes)
5d4f98a2 1771{
5d4f98a2 1772 int ret;
5378e607 1773 u64 discarded_bytes = 0;
5d4f98a2
YZ
1774 struct btrfs_multi_bio *multi = NULL;
1775
e244a0ae 1776
5d4f98a2 1777 /* Tell the block device(s) that the sectors can be discarded */
5378e607
LD
1778 ret = btrfs_map_block(&root->fs_info->mapping_tree, REQ_DISCARD,
1779 bytenr, &num_bytes, &multi, 0);
5d4f98a2
YZ
1780 if (!ret) {
1781 struct btrfs_bio_stripe *stripe = multi->stripes;
1782 int i;
1783
5d4f98a2
YZ
1784
1785 for (i = 0; i < multi->num_stripes; i++, stripe++) {
5378e607
LD
1786 ret = btrfs_issue_discard(stripe->dev->bdev,
1787 stripe->physical,
1788 stripe->length);
1789 if (!ret)
1790 discarded_bytes += stripe->length;
1791 else if (ret != -EOPNOTSUPP)
1792 break;
5d4f98a2
YZ
1793 }
1794 kfree(multi);
1795 }
5378e607
LD
1796 if (discarded_bytes && ret == -EOPNOTSUPP)
1797 ret = 0;
1798
1799 if (actual_bytes)
1800 *actual_bytes = discarded_bytes;
1801
5d4f98a2
YZ
1802
1803 return ret;
5d4f98a2
YZ
1804}
1805
1806int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1807 struct btrfs_root *root,
1808 u64 bytenr, u64 num_bytes, u64 parent,
1809 u64 root_objectid, u64 owner, u64 offset)
1810{
1811 int ret;
1812 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
1813 root_objectid == BTRFS_TREE_LOG_OBJECTID);
1814
1815 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1816 ret = btrfs_add_delayed_tree_ref(trans, bytenr, num_bytes,
1817 parent, root_objectid, (int)owner,
1818 BTRFS_ADD_DELAYED_REF, NULL);
1819 } else {
1820 ret = btrfs_add_delayed_data_ref(trans, bytenr, num_bytes,
1821 parent, root_objectid, owner, offset,
1822 BTRFS_ADD_DELAYED_REF, NULL);
1823 }
1824 return ret;
1825}
1826
1827static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1828 struct btrfs_root *root,
1829 u64 bytenr, u64 num_bytes,
1830 u64 parent, u64 root_objectid,
1831 u64 owner, u64 offset, int refs_to_add,
1832 struct btrfs_delayed_extent_op *extent_op)
1833{
1834 struct btrfs_path *path;
1835 struct extent_buffer *leaf;
1836 struct btrfs_extent_item *item;
1837 u64 refs;
1838 int ret;
1839 int err = 0;
1840
1841 path = btrfs_alloc_path();
1842 if (!path)
1843 return -ENOMEM;
1844
1845 path->reada = 1;
1846 path->leave_spinning = 1;
1847 /* this will setup the path even if it fails to insert the back ref */
1848 ret = insert_inline_extent_backref(trans, root->fs_info->extent_root,
1849 path, bytenr, num_bytes, parent,
1850 root_objectid, owner, offset,
1851 refs_to_add, extent_op);
1852 if (ret == 0)
1853 goto out;
1854
1855 if (ret != -EAGAIN) {
1856 err = ret;
1857 goto out;
1858 }
1859
1860 leaf = path->nodes[0];
1861 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1862 refs = btrfs_extent_refs(leaf, item);
1863 btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
1864 if (extent_op)
1865 __run_delayed_extent_op(extent_op, leaf, item);
56bec294 1866
5d4f98a2 1867 btrfs_mark_buffer_dirty(leaf);
56bec294
CM
1868 btrfs_release_path(root->fs_info->extent_root, path);
1869
1870 path->reada = 1;
b9473439
CM
1871 path->leave_spinning = 1;
1872
56bec294
CM
1873 /* now insert the actual backref */
1874 ret = insert_extent_backref(trans, root->fs_info->extent_root,
5d4f98a2
YZ
1875 path, bytenr, parent, root_objectid,
1876 owner, offset, refs_to_add);
56bec294 1877 BUG_ON(ret);
5d4f98a2 1878out:
56bec294 1879 btrfs_free_path(path);
5d4f98a2 1880 return err;
56bec294
CM
1881}
1882
5d4f98a2
YZ
1883static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
1884 struct btrfs_root *root,
1885 struct btrfs_delayed_ref_node *node,
1886 struct btrfs_delayed_extent_op *extent_op,
1887 int insert_reserved)
56bec294 1888{
5d4f98a2
YZ
1889 int ret = 0;
1890 struct btrfs_delayed_data_ref *ref;
1891 struct btrfs_key ins;
1892 u64 parent = 0;
1893 u64 ref_root = 0;
1894 u64 flags = 0;
1895
1896 ins.objectid = node->bytenr;
1897 ins.offset = node->num_bytes;
1898 ins.type = BTRFS_EXTENT_ITEM_KEY;
1899
1900 ref = btrfs_delayed_node_to_data_ref(node);
1901 if (node->type == BTRFS_SHARED_DATA_REF_KEY)
1902 parent = ref->parent;
1903 else
1904 ref_root = ref->root;
1905
1906 if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
1907 if (extent_op) {
1908 BUG_ON(extent_op->update_key);
1909 flags |= extent_op->flags_to_set;
1910 }
1911 ret = alloc_reserved_file_extent(trans, root,
1912 parent, ref_root, flags,
1913 ref->objectid, ref->offset,
1914 &ins, node->ref_mod);
5d4f98a2
YZ
1915 } else if (node->action == BTRFS_ADD_DELAYED_REF) {
1916 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
1917 node->num_bytes, parent,
1918 ref_root, ref->objectid,
1919 ref->offset, node->ref_mod,
1920 extent_op);
1921 } else if (node->action == BTRFS_DROP_DELAYED_REF) {
1922 ret = __btrfs_free_extent(trans, root, node->bytenr,
1923 node->num_bytes, parent,
1924 ref_root, ref->objectid,
1925 ref->offset, node->ref_mod,
1926 extent_op);
1927 } else {
1928 BUG();
1929 }
1930 return ret;
1931}
1932
1933static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
1934 struct extent_buffer *leaf,
1935 struct btrfs_extent_item *ei)
1936{
1937 u64 flags = btrfs_extent_flags(leaf, ei);
1938 if (extent_op->update_flags) {
1939 flags |= extent_op->flags_to_set;
1940 btrfs_set_extent_flags(leaf, ei, flags);
1941 }
1942
1943 if (extent_op->update_key) {
1944 struct btrfs_tree_block_info *bi;
1945 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
1946 bi = (struct btrfs_tree_block_info *)(ei + 1);
1947 btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
1948 }
1949}
1950
1951static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
1952 struct btrfs_root *root,
1953 struct btrfs_delayed_ref_node *node,
1954 struct btrfs_delayed_extent_op *extent_op)
1955{
1956 struct btrfs_key key;
1957 struct btrfs_path *path;
1958 struct btrfs_extent_item *ei;
1959 struct extent_buffer *leaf;
1960 u32 item_size;
56bec294 1961 int ret;
5d4f98a2
YZ
1962 int err = 0;
1963
1964 path = btrfs_alloc_path();
1965 if (!path)
1966 return -ENOMEM;
1967
1968 key.objectid = node->bytenr;
1969 key.type = BTRFS_EXTENT_ITEM_KEY;
1970 key.offset = node->num_bytes;
1971
1972 path->reada = 1;
1973 path->leave_spinning = 1;
1974 ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
1975 path, 0, 1);
1976 if (ret < 0) {
1977 err = ret;
1978 goto out;
1979 }
1980 if (ret > 0) {
1981 err = -EIO;
1982 goto out;
1983 }
1984
1985 leaf = path->nodes[0];
1986 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1987#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1988 if (item_size < sizeof(*ei)) {
1989 ret = convert_extent_item_v0(trans, root->fs_info->extent_root,
1990 path, (u64)-1, 0);
1991 if (ret < 0) {
1992 err = ret;
1993 goto out;
1994 }
1995 leaf = path->nodes[0];
1996 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1997 }
1998#endif
1999 BUG_ON(item_size < sizeof(*ei));
2000 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2001 __run_delayed_extent_op(extent_op, leaf, ei);
56bec294 2002
5d4f98a2
YZ
2003 btrfs_mark_buffer_dirty(leaf);
2004out:
2005 btrfs_free_path(path);
2006 return err;
56bec294
CM
2007}
2008
5d4f98a2
YZ
2009static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
2010 struct btrfs_root *root,
2011 struct btrfs_delayed_ref_node *node,
2012 struct btrfs_delayed_extent_op *extent_op,
2013 int insert_reserved)
56bec294
CM
2014{
2015 int ret = 0;
5d4f98a2
YZ
2016 struct btrfs_delayed_tree_ref *ref;
2017 struct btrfs_key ins;
2018 u64 parent = 0;
2019 u64 ref_root = 0;
56bec294 2020
5d4f98a2
YZ
2021 ins.objectid = node->bytenr;
2022 ins.offset = node->num_bytes;
2023 ins.type = BTRFS_EXTENT_ITEM_KEY;
56bec294 2024
5d4f98a2
YZ
2025 ref = btrfs_delayed_node_to_tree_ref(node);
2026 if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2027 parent = ref->parent;
2028 else
2029 ref_root = ref->root;
2030
2031 BUG_ON(node->ref_mod != 1);
2032 if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2033 BUG_ON(!extent_op || !extent_op->update_flags ||
2034 !extent_op->update_key);
2035 ret = alloc_reserved_tree_block(trans, root,
2036 parent, ref_root,
2037 extent_op->flags_to_set,
2038 &extent_op->key,
2039 ref->level, &ins);
5d4f98a2
YZ
2040 } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2041 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
2042 node->num_bytes, parent, ref_root,
2043 ref->level, 0, 1, extent_op);
2044 } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2045 ret = __btrfs_free_extent(trans, root, node->bytenr,
2046 node->num_bytes, parent, ref_root,
2047 ref->level, 0, 1, extent_op);
2048 } else {
2049 BUG();
2050 }
56bec294
CM
2051 return ret;
2052}
2053
2054/* helper function to actually process a single delayed ref entry */
5d4f98a2
YZ
2055static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
2056 struct btrfs_root *root,
2057 struct btrfs_delayed_ref_node *node,
2058 struct btrfs_delayed_extent_op *extent_op,
2059 int insert_reserved)
56bec294
CM
2060{
2061 int ret;
5d4f98a2 2062 if (btrfs_delayed_ref_is_head(node)) {
56bec294
CM
2063 struct btrfs_delayed_ref_head *head;
2064 /*
2065 * we've hit the end of the chain and we were supposed
2066 * to insert this extent into the tree. But, it got
2067 * deleted before we ever needed to insert it, so all
2068 * we have to do is clean up the accounting
2069 */
5d4f98a2
YZ
2070 BUG_ON(extent_op);
2071 head = btrfs_delayed_node_to_head(node);
56bec294 2072 if (insert_reserved) {
f0486c68
YZ
2073 btrfs_pin_extent(root, node->bytenr,
2074 node->num_bytes, 1);
5d4f98a2
YZ
2075 if (head->is_data) {
2076 ret = btrfs_del_csums(trans, root,
2077 node->bytenr,
2078 node->num_bytes);
2079 BUG_ON(ret);
2080 }
56bec294 2081 }
56bec294
CM
2082 mutex_unlock(&head->mutex);
2083 return 0;
2084 }
2085
5d4f98a2
YZ
2086 if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
2087 node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2088 ret = run_delayed_tree_ref(trans, root, node, extent_op,
2089 insert_reserved);
2090 else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
2091 node->type == BTRFS_SHARED_DATA_REF_KEY)
2092 ret = run_delayed_data_ref(trans, root, node, extent_op,
2093 insert_reserved);
2094 else
2095 BUG();
2096 return ret;
56bec294
CM
2097}
2098
2099static noinline struct btrfs_delayed_ref_node *
2100select_delayed_ref(struct btrfs_delayed_ref_head *head)
2101{
2102 struct rb_node *node;
2103 struct btrfs_delayed_ref_node *ref;
2104 int action = BTRFS_ADD_DELAYED_REF;
2105again:
2106 /*
2107 * select delayed ref of type BTRFS_ADD_DELAYED_REF first.
2108 * this prevents ref count from going down to zero when
2109 * there still are pending delayed ref.
2110 */
2111 node = rb_prev(&head->node.rb_node);
2112 while (1) {
2113 if (!node)
2114 break;
2115 ref = rb_entry(node, struct btrfs_delayed_ref_node,
2116 rb_node);
2117 if (ref->bytenr != head->node.bytenr)
2118 break;
5d4f98a2 2119 if (ref->action == action)
56bec294
CM
2120 return ref;
2121 node = rb_prev(node);
2122 }
2123 if (action == BTRFS_ADD_DELAYED_REF) {
2124 action = BTRFS_DROP_DELAYED_REF;
2125 goto again;
2126 }
2127 return NULL;
2128}
2129
c3e69d58
CM
2130static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
2131 struct btrfs_root *root,
2132 struct list_head *cluster)
56bec294 2133{
56bec294
CM
2134 struct btrfs_delayed_ref_root *delayed_refs;
2135 struct btrfs_delayed_ref_node *ref;
2136 struct btrfs_delayed_ref_head *locked_ref = NULL;
5d4f98a2 2137 struct btrfs_delayed_extent_op *extent_op;
56bec294 2138 int ret;
c3e69d58 2139 int count = 0;
56bec294 2140 int must_insert_reserved = 0;
56bec294
CM
2141
2142 delayed_refs = &trans->transaction->delayed_refs;
56bec294
CM
2143 while (1) {
2144 if (!locked_ref) {
c3e69d58
CM
2145 /* pick a new head ref from the cluster list */
2146 if (list_empty(cluster))
56bec294 2147 break;
56bec294 2148
c3e69d58
CM
2149 locked_ref = list_entry(cluster->next,
2150 struct btrfs_delayed_ref_head, cluster);
2151
2152 /* grab the lock that says we are going to process
2153 * all the refs for this head */
2154 ret = btrfs_delayed_ref_lock(trans, locked_ref);
2155
2156 /*
2157 * we may have dropped the spin lock to get the head
2158 * mutex lock, and that might have given someone else
2159 * time to free the head. If that's true, it has been
2160 * removed from our list and we can move on.
2161 */
2162 if (ret == -EAGAIN) {
2163 locked_ref = NULL;
2164 count++;
2165 continue;
56bec294
CM
2166 }
2167 }
a28ec197 2168
56bec294
CM
2169 /*
2170 * record the must insert reserved flag before we
2171 * drop the spin lock.
2172 */
2173 must_insert_reserved = locked_ref->must_insert_reserved;
2174 locked_ref->must_insert_reserved = 0;
7bb86316 2175
5d4f98a2
YZ
2176 extent_op = locked_ref->extent_op;
2177 locked_ref->extent_op = NULL;
2178
56bec294
CM
2179 /*
2180 * locked_ref is the head node, so we have to go one
2181 * node back for any delayed ref updates
2182 */
56bec294
CM
2183 ref = select_delayed_ref(locked_ref);
2184 if (!ref) {
2185 /* All delayed refs have been processed, Go ahead
2186 * and send the head node to run_one_delayed_ref,
2187 * so that any accounting fixes can happen
2188 */
2189 ref = &locked_ref->node;
5d4f98a2
YZ
2190
2191 if (extent_op && must_insert_reserved) {
2192 kfree(extent_op);
2193 extent_op = NULL;
2194 }
2195
2196 if (extent_op) {
2197 spin_unlock(&delayed_refs->lock);
2198
2199 ret = run_delayed_extent_op(trans, root,
2200 ref, extent_op);
2201 BUG_ON(ret);
2202 kfree(extent_op);
2203
2204 cond_resched();
2205 spin_lock(&delayed_refs->lock);
2206 continue;
2207 }
2208
c3e69d58 2209 list_del_init(&locked_ref->cluster);
56bec294
CM
2210 locked_ref = NULL;
2211 }
02217ed2 2212
56bec294
CM
2213 ref->in_tree = 0;
2214 rb_erase(&ref->rb_node, &delayed_refs->root);
2215 delayed_refs->num_entries--;
5d4f98a2 2216
56bec294 2217 spin_unlock(&delayed_refs->lock);
925baedd 2218
5d4f98a2 2219 ret = run_one_delayed_ref(trans, root, ref, extent_op,
56bec294
CM
2220 must_insert_reserved);
2221 BUG_ON(ret);
eb099670 2222
5d4f98a2
YZ
2223 btrfs_put_delayed_ref(ref);
2224 kfree(extent_op);
c3e69d58 2225 count++;
5d4f98a2 2226
c3e69d58
CM
2227 cond_resched();
2228 spin_lock(&delayed_refs->lock);
2229 }
2230 return count;
2231}
2232
2233/*
2234 * this starts processing the delayed reference count updates and
2235 * extent insertions we have queued up so far. count can be
2236 * 0, which means to process everything in the tree at the start
2237 * of the run (but not newly added entries), or it can be some target
2238 * number you'd like to process.
2239 */
2240int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2241 struct btrfs_root *root, unsigned long count)
2242{
2243 struct rb_node *node;
2244 struct btrfs_delayed_ref_root *delayed_refs;
2245 struct btrfs_delayed_ref_node *ref;
2246 struct list_head cluster;
2247 int ret;
2248 int run_all = count == (unsigned long)-1;
2249 int run_most = 0;
2250
2251 if (root == root->fs_info->extent_root)
2252 root = root->fs_info->tree_root;
2253
2254 delayed_refs = &trans->transaction->delayed_refs;
2255 INIT_LIST_HEAD(&cluster);
2256again:
2257 spin_lock(&delayed_refs->lock);
2258 if (count == 0) {
2259 count = delayed_refs->num_entries * 2;
2260 run_most = 1;
2261 }
2262 while (1) {
2263 if (!(run_all || run_most) &&
2264 delayed_refs->num_heads_ready < 64)
2265 break;
eb099670 2266
56bec294 2267 /*
c3e69d58
CM
2268 * go find something we can process in the rbtree. We start at
2269 * the beginning of the tree, and then build a cluster
2270 * of refs to process starting at the first one we are able to
2271 * lock
56bec294 2272 */
c3e69d58
CM
2273 ret = btrfs_find_ref_cluster(trans, &cluster,
2274 delayed_refs->run_delayed_start);
2275 if (ret)
56bec294
CM
2276 break;
2277
c3e69d58
CM
2278 ret = run_clustered_refs(trans, root, &cluster);
2279 BUG_ON(ret < 0);
2280
2281 count -= min_t(unsigned long, ret, count);
2282
2283 if (count == 0)
2284 break;
eb099670 2285 }
c3e69d58 2286
56bec294 2287 if (run_all) {
56bec294 2288 node = rb_first(&delayed_refs->root);
c3e69d58 2289 if (!node)
56bec294 2290 goto out;
c3e69d58 2291 count = (unsigned long)-1;
e9d0b13b 2292
56bec294
CM
2293 while (node) {
2294 ref = rb_entry(node, struct btrfs_delayed_ref_node,
2295 rb_node);
2296 if (btrfs_delayed_ref_is_head(ref)) {
2297 struct btrfs_delayed_ref_head *head;
5caf2a00 2298
56bec294
CM
2299 head = btrfs_delayed_node_to_head(ref);
2300 atomic_inc(&ref->refs);
2301
2302 spin_unlock(&delayed_refs->lock);
2303 mutex_lock(&head->mutex);
2304 mutex_unlock(&head->mutex);
2305
2306 btrfs_put_delayed_ref(ref);
1887be66 2307 cond_resched();
56bec294
CM
2308 goto again;
2309 }
2310 node = rb_next(node);
2311 }
2312 spin_unlock(&delayed_refs->lock);
56bec294
CM
2313 schedule_timeout(1);
2314 goto again;
5f39d397 2315 }
54aa1f4d 2316out:
c3e69d58 2317 spin_unlock(&delayed_refs->lock);
a28ec197
CM
2318 return 0;
2319}
2320
5d4f98a2
YZ
2321int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
2322 struct btrfs_root *root,
2323 u64 bytenr, u64 num_bytes, u64 flags,
2324 int is_data)
2325{
2326 struct btrfs_delayed_extent_op *extent_op;
2327 int ret;
2328
2329 extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
2330 if (!extent_op)
2331 return -ENOMEM;
2332
2333 extent_op->flags_to_set = flags;
2334 extent_op->update_flags = 1;
2335 extent_op->update_key = 0;
2336 extent_op->is_data = is_data ? 1 : 0;
2337
2338 ret = btrfs_add_delayed_extent_op(trans, bytenr, num_bytes, extent_op);
2339 if (ret)
2340 kfree(extent_op);
2341 return ret;
2342}
2343
2344static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
2345 struct btrfs_root *root,
2346 struct btrfs_path *path,
2347 u64 objectid, u64 offset, u64 bytenr)
2348{
2349 struct btrfs_delayed_ref_head *head;
2350 struct btrfs_delayed_ref_node *ref;
2351 struct btrfs_delayed_data_ref *data_ref;
2352 struct btrfs_delayed_ref_root *delayed_refs;
2353 struct rb_node *node;
2354 int ret = 0;
2355
2356 ret = -ENOENT;
2357 delayed_refs = &trans->transaction->delayed_refs;
2358 spin_lock(&delayed_refs->lock);
2359 head = btrfs_find_delayed_ref_head(trans, bytenr);
2360 if (!head)
2361 goto out;
2362
2363 if (!mutex_trylock(&head->mutex)) {
2364 atomic_inc(&head->node.refs);
2365 spin_unlock(&delayed_refs->lock);
2366
2367 btrfs_release_path(root->fs_info->extent_root, path);
2368
2369 mutex_lock(&head->mutex);
2370 mutex_unlock(&head->mutex);
2371 btrfs_put_delayed_ref(&head->node);
2372 return -EAGAIN;
2373 }
2374
2375 node = rb_prev(&head->node.rb_node);
2376 if (!node)
2377 goto out_unlock;
2378
2379 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2380
2381 if (ref->bytenr != bytenr)
2382 goto out_unlock;
2383
2384 ret = 1;
2385 if (ref->type != BTRFS_EXTENT_DATA_REF_KEY)
2386 goto out_unlock;
2387
2388 data_ref = btrfs_delayed_node_to_data_ref(ref);
2389
2390 node = rb_prev(node);
2391 if (node) {
2392 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2393 if (ref->bytenr == bytenr)
2394 goto out_unlock;
2395 }
2396
2397 if (data_ref->root != root->root_key.objectid ||
2398 data_ref->objectid != objectid || data_ref->offset != offset)
2399 goto out_unlock;
2400
2401 ret = 0;
2402out_unlock:
2403 mutex_unlock(&head->mutex);
2404out:
2405 spin_unlock(&delayed_refs->lock);
2406 return ret;
2407}
2408
2409static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
2410 struct btrfs_root *root,
2411 struct btrfs_path *path,
2412 u64 objectid, u64 offset, u64 bytenr)
be20aa9d
CM
2413{
2414 struct btrfs_root *extent_root = root->fs_info->extent_root;
f321e491 2415 struct extent_buffer *leaf;
5d4f98a2
YZ
2416 struct btrfs_extent_data_ref *ref;
2417 struct btrfs_extent_inline_ref *iref;
2418 struct btrfs_extent_item *ei;
f321e491 2419 struct btrfs_key key;
5d4f98a2 2420 u32 item_size;
be20aa9d 2421 int ret;
925baedd 2422
be20aa9d 2423 key.objectid = bytenr;
31840ae1 2424 key.offset = (u64)-1;
f321e491 2425 key.type = BTRFS_EXTENT_ITEM_KEY;
be20aa9d 2426
be20aa9d
CM
2427 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
2428 if (ret < 0)
2429 goto out;
2430 BUG_ON(ret == 0);
80ff3856
YZ
2431
2432 ret = -ENOENT;
2433 if (path->slots[0] == 0)
31840ae1 2434 goto out;
be20aa9d 2435
31840ae1 2436 path->slots[0]--;
f321e491 2437 leaf = path->nodes[0];
5d4f98a2 2438 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
be20aa9d 2439
5d4f98a2 2440 if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
be20aa9d 2441 goto out;
f321e491 2442
5d4f98a2
YZ
2443 ret = 1;
2444 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2445#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2446 if (item_size < sizeof(*ei)) {
2447 WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
2448 goto out;
2449 }
2450#endif
2451 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
bd09835d 2452
5d4f98a2
YZ
2453 if (item_size != sizeof(*ei) +
2454 btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
2455 goto out;
be20aa9d 2456
5d4f98a2
YZ
2457 if (btrfs_extent_generation(leaf, ei) <=
2458 btrfs_root_last_snapshot(&root->root_item))
2459 goto out;
2460
2461 iref = (struct btrfs_extent_inline_ref *)(ei + 1);
2462 if (btrfs_extent_inline_ref_type(leaf, iref) !=
2463 BTRFS_EXTENT_DATA_REF_KEY)
2464 goto out;
2465
2466 ref = (struct btrfs_extent_data_ref *)(&iref->offset);
2467 if (btrfs_extent_refs(leaf, ei) !=
2468 btrfs_extent_data_ref_count(leaf, ref) ||
2469 btrfs_extent_data_ref_root(leaf, ref) !=
2470 root->root_key.objectid ||
2471 btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
2472 btrfs_extent_data_ref_offset(leaf, ref) != offset)
2473 goto out;
2474
2475 ret = 0;
2476out:
2477 return ret;
2478}
2479
2480int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
2481 struct btrfs_root *root,
2482 u64 objectid, u64 offset, u64 bytenr)
2483{
2484 struct btrfs_path *path;
2485 int ret;
2486 int ret2;
2487
2488 path = btrfs_alloc_path();
2489 if (!path)
2490 return -ENOENT;
2491
2492 do {
2493 ret = check_committed_ref(trans, root, path, objectid,
2494 offset, bytenr);
2495 if (ret && ret != -ENOENT)
f321e491 2496 goto out;
80ff3856 2497
5d4f98a2
YZ
2498 ret2 = check_delayed_ref(trans, root, path, objectid,
2499 offset, bytenr);
2500 } while (ret2 == -EAGAIN);
2501
2502 if (ret2 && ret2 != -ENOENT) {
2503 ret = ret2;
2504 goto out;
f321e491 2505 }
5d4f98a2
YZ
2506
2507 if (ret != -ENOENT || ret2 != -ENOENT)
2508 ret = 0;
be20aa9d 2509out:
80ff3856 2510 btrfs_free_path(path);
f0486c68
YZ
2511 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
2512 WARN_ON(ret > 0);
f321e491 2513 return ret;
be20aa9d 2514}
c5739bba 2515
5d4f98a2 2516#if 0
31840ae1
ZY
2517int btrfs_cache_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2518 struct extent_buffer *buf, u32 nr_extents)
02217ed2 2519{
5f39d397 2520 struct btrfs_key key;
6407bf6d 2521 struct btrfs_file_extent_item *fi;
e4657689
ZY
2522 u64 root_gen;
2523 u32 nritems;
02217ed2 2524 int i;
db94535d 2525 int level;
31840ae1 2526 int ret = 0;
e4657689 2527 int shared = 0;
a28ec197 2528
3768f368 2529 if (!root->ref_cows)
a28ec197 2530 return 0;
5f39d397 2531
e4657689
ZY
2532 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
2533 shared = 0;
2534 root_gen = root->root_key.offset;
2535 } else {
2536 shared = 1;
2537 root_gen = trans->transid - 1;
2538 }
2539
db94535d 2540 level = btrfs_header_level(buf);
5f39d397 2541 nritems = btrfs_header_nritems(buf);
4a096752 2542
31840ae1 2543 if (level == 0) {
31153d81
YZ
2544 struct btrfs_leaf_ref *ref;
2545 struct btrfs_extent_info *info;
2546
31840ae1 2547 ref = btrfs_alloc_leaf_ref(root, nr_extents);
31153d81 2548 if (!ref) {
31840ae1 2549 ret = -ENOMEM;
31153d81
YZ
2550 goto out;
2551 }
2552
e4657689 2553 ref->root_gen = root_gen;
31153d81
YZ
2554 ref->bytenr = buf->start;
2555 ref->owner = btrfs_header_owner(buf);
2556 ref->generation = btrfs_header_generation(buf);
31840ae1 2557 ref->nritems = nr_extents;
31153d81 2558 info = ref->extents;
bcc63abb 2559
31840ae1 2560 for (i = 0; nr_extents > 0 && i < nritems; i++) {
31153d81
YZ
2561 u64 disk_bytenr;
2562 btrfs_item_key_to_cpu(buf, &key, i);
2563 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
2564 continue;
2565 fi = btrfs_item_ptr(buf, i,
2566 struct btrfs_file_extent_item);
2567 if (btrfs_file_extent_type(buf, fi) ==
2568 BTRFS_FILE_EXTENT_INLINE)
2569 continue;
2570 disk_bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
2571 if (disk_bytenr == 0)
2572 continue;
2573
2574 info->bytenr = disk_bytenr;
2575 info->num_bytes =
2576 btrfs_file_extent_disk_num_bytes(buf, fi);
2577 info->objectid = key.objectid;
2578 info->offset = key.offset;
2579 info++;
2580 }
2581
e4657689 2582 ret = btrfs_add_leaf_ref(root, ref, shared);
5b84e8d6
YZ
2583 if (ret == -EEXIST && shared) {
2584 struct btrfs_leaf_ref *old;
2585 old = btrfs_lookup_leaf_ref(root, ref->bytenr);
2586 BUG_ON(!old);
2587 btrfs_remove_leaf_ref(root, old);
2588 btrfs_free_leaf_ref(root, old);
2589 ret = btrfs_add_leaf_ref(root, ref, shared);
2590 }
31153d81 2591 WARN_ON(ret);
bcc63abb 2592 btrfs_free_leaf_ref(root, ref);
31153d81
YZ
2593 }
2594out:
31840ae1
ZY
2595 return ret;
2596}
2597
b7a9f29f
CM
2598/* when a block goes through cow, we update the reference counts of
2599 * everything that block points to. The internal pointers of the block
2600 * can be in just about any order, and it is likely to have clusters of
2601 * things that are close together and clusters of things that are not.
2602 *
2603 * To help reduce the seeks that come with updating all of these reference
2604 * counts, sort them by byte number before actual updates are done.
2605 *
2606 * struct refsort is used to match byte number to slot in the btree block.
2607 * we sort based on the byte number and then use the slot to actually
2608 * find the item.
bd56b302
CM
2609 *
2610 * struct refsort is smaller than strcut btrfs_item and smaller than
2611 * struct btrfs_key_ptr. Since we're currently limited to the page size
2612 * for a btree block, there's no way for a kmalloc of refsorts for a
2613 * single node to be bigger than a page.
b7a9f29f
CM
2614 */
2615struct refsort {
2616 u64 bytenr;
2617 u32 slot;
2618};
2619
2620/*
2621 * for passing into sort()
2622 */
2623static int refsort_cmp(const void *a_void, const void *b_void)
2624{
2625 const struct refsort *a = a_void;
2626 const struct refsort *b = b_void;
2627
2628 if (a->bytenr < b->bytenr)
2629 return -1;
2630 if (a->bytenr > b->bytenr)
2631 return 1;
2632 return 0;
2633}
5d4f98a2 2634#endif
b7a9f29f 2635
5d4f98a2 2636static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
b7a9f29f 2637 struct btrfs_root *root,
5d4f98a2
YZ
2638 struct extent_buffer *buf,
2639 int full_backref, int inc)
31840ae1
ZY
2640{
2641 u64 bytenr;
5d4f98a2
YZ
2642 u64 num_bytes;
2643 u64 parent;
31840ae1 2644 u64 ref_root;
31840ae1 2645 u32 nritems;
31840ae1
ZY
2646 struct btrfs_key key;
2647 struct btrfs_file_extent_item *fi;
2648 int i;
2649 int level;
2650 int ret = 0;
31840ae1 2651 int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
5d4f98a2 2652 u64, u64, u64, u64, u64, u64);
31840ae1
ZY
2653
2654 ref_root = btrfs_header_owner(buf);
31840ae1
ZY
2655 nritems = btrfs_header_nritems(buf);
2656 level = btrfs_header_level(buf);
2657
5d4f98a2
YZ
2658 if (!root->ref_cows && level == 0)
2659 return 0;
31840ae1 2660
5d4f98a2
YZ
2661 if (inc)
2662 process_func = btrfs_inc_extent_ref;
2663 else
2664 process_func = btrfs_free_extent;
31840ae1 2665
5d4f98a2
YZ
2666 if (full_backref)
2667 parent = buf->start;
2668 else
2669 parent = 0;
2670
2671 for (i = 0; i < nritems; i++) {
31840ae1 2672 if (level == 0) {
5d4f98a2 2673 btrfs_item_key_to_cpu(buf, &key, i);
31840ae1
ZY
2674 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
2675 continue;
5d4f98a2 2676 fi = btrfs_item_ptr(buf, i,
31840ae1
ZY
2677 struct btrfs_file_extent_item);
2678 if (btrfs_file_extent_type(buf, fi) ==
2679 BTRFS_FILE_EXTENT_INLINE)
2680 continue;
2681 bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
2682 if (bytenr == 0)
2683 continue;
5d4f98a2
YZ
2684
2685 num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
2686 key.offset -= btrfs_file_extent_offset(buf, fi);
2687 ret = process_func(trans, root, bytenr, num_bytes,
2688 parent, ref_root, key.objectid,
2689 key.offset);
31840ae1
ZY
2690 if (ret)
2691 goto fail;
2692 } else {
5d4f98a2
YZ
2693 bytenr = btrfs_node_blockptr(buf, i);
2694 num_bytes = btrfs_level_size(root, level - 1);
2695 ret = process_func(trans, root, bytenr, num_bytes,
2696 parent, ref_root, level - 1, 0);
31840ae1
ZY
2697 if (ret)
2698 goto fail;
2699 }
2700 }
2701 return 0;
2702fail:
5d4f98a2
YZ
2703 BUG();
2704 return ret;
2705}
2706
2707int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2708 struct extent_buffer *buf, int full_backref)
2709{
2710 return __btrfs_mod_ref(trans, root, buf, full_backref, 1);
2711}
2712
2713int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2714 struct extent_buffer *buf, int full_backref)
2715{
2716 return __btrfs_mod_ref(trans, root, buf, full_backref, 0);
31840ae1
ZY
2717}
2718
9078a3e1
CM
2719static int write_one_cache_group(struct btrfs_trans_handle *trans,
2720 struct btrfs_root *root,
2721 struct btrfs_path *path,
2722 struct btrfs_block_group_cache *cache)
2723{
2724 int ret;
9078a3e1 2725 struct btrfs_root *extent_root = root->fs_info->extent_root;
5f39d397
CM
2726 unsigned long bi;
2727 struct extent_buffer *leaf;
9078a3e1 2728
9078a3e1 2729 ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
54aa1f4d
CM
2730 if (ret < 0)
2731 goto fail;
9078a3e1 2732 BUG_ON(ret);
5f39d397
CM
2733
2734 leaf = path->nodes[0];
2735 bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
2736 write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
2737 btrfs_mark_buffer_dirty(leaf);
9078a3e1 2738 btrfs_release_path(extent_root, path);
54aa1f4d 2739fail:
9078a3e1
CM
2740 if (ret)
2741 return ret;
9078a3e1
CM
2742 return 0;
2743
2744}
2745
4a8c9a62
YZ
2746static struct btrfs_block_group_cache *
2747next_block_group(struct btrfs_root *root,
2748 struct btrfs_block_group_cache *cache)
2749{
2750 struct rb_node *node;
2751 spin_lock(&root->fs_info->block_group_cache_lock);
2752 node = rb_next(&cache->cache_node);
2753 btrfs_put_block_group(cache);
2754 if (node) {
2755 cache = rb_entry(node, struct btrfs_block_group_cache,
2756 cache_node);
11dfe35a 2757 btrfs_get_block_group(cache);
4a8c9a62
YZ
2758 } else
2759 cache = NULL;
2760 spin_unlock(&root->fs_info->block_group_cache_lock);
2761 return cache;
2762}
2763
0af3d00b
JB
2764static int cache_save_setup(struct btrfs_block_group_cache *block_group,
2765 struct btrfs_trans_handle *trans,
2766 struct btrfs_path *path)
2767{
2768 struct btrfs_root *root = block_group->fs_info->tree_root;
2769 struct inode *inode = NULL;
2770 u64 alloc_hint = 0;
2b20982e 2771 int dcs = BTRFS_DC_ERROR;
0af3d00b
JB
2772 int num_pages = 0;
2773 int retries = 0;
2774 int ret = 0;
2775
2776 /*
2777 * If this block group is smaller than 100 megs don't bother caching the
2778 * block group.
2779 */
2780 if (block_group->key.offset < (100 * 1024 * 1024)) {
2781 spin_lock(&block_group->lock);
2782 block_group->disk_cache_state = BTRFS_DC_WRITTEN;
2783 spin_unlock(&block_group->lock);
2784 return 0;
2785 }
2786
2787again:
2788 inode = lookup_free_space_inode(root, block_group, path);
2789 if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
2790 ret = PTR_ERR(inode);
2791 btrfs_release_path(root, path);
2792 goto out;
2793 }
2794
2795 if (IS_ERR(inode)) {
2796 BUG_ON(retries);
2797 retries++;
2798
2799 if (block_group->ro)
2800 goto out_free;
2801
2802 ret = create_free_space_inode(root, trans, block_group, path);
2803 if (ret)
2804 goto out_free;
2805 goto again;
2806 }
2807
2808 /*
2809 * We want to set the generation to 0, that way if anything goes wrong
2810 * from here on out we know not to trust this cache when we load up next
2811 * time.
2812 */
2813 BTRFS_I(inode)->generation = 0;
2814 ret = btrfs_update_inode(trans, root, inode);
2815 WARN_ON(ret);
2816
2817 if (i_size_read(inode) > 0) {
2818 ret = btrfs_truncate_free_space_cache(root, trans, path,
2819 inode);
2820 if (ret)
2821 goto out_put;
2822 }
2823
2824 spin_lock(&block_group->lock);
2825 if (block_group->cached != BTRFS_CACHE_FINISHED) {
2b20982e
JB
2826 /* We're not cached, don't bother trying to write stuff out */
2827 dcs = BTRFS_DC_WRITTEN;
0af3d00b
JB
2828 spin_unlock(&block_group->lock);
2829 goto out_put;
2830 }
2831 spin_unlock(&block_group->lock);
2832
2833 num_pages = (int)div64_u64(block_group->key.offset, 1024 * 1024 * 1024);
2834 if (!num_pages)
2835 num_pages = 1;
2836
2837 /*
2838 * Just to make absolutely sure we have enough space, we're going to
2839 * preallocate 12 pages worth of space for each block group. In
2840 * practice we ought to use at most 8, but we need extra space so we can
2841 * add our header and have a terminator between the extents and the
2842 * bitmaps.
2843 */
2844 num_pages *= 16;
2845 num_pages *= PAGE_CACHE_SIZE;
2846
2847 ret = btrfs_check_data_free_space(inode, num_pages);
2848 if (ret)
2849 goto out_put;
2850
2851 ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
2852 num_pages, num_pages,
2853 &alloc_hint);
2b20982e
JB
2854 if (!ret)
2855 dcs = BTRFS_DC_SETUP;
0af3d00b
JB
2856 btrfs_free_reserved_data_space(inode, num_pages);
2857out_put:
2858 iput(inode);
2859out_free:
2860 btrfs_release_path(root, path);
2861out:
2862 spin_lock(&block_group->lock);
2b20982e 2863 block_group->disk_cache_state = dcs;
0af3d00b
JB
2864 spin_unlock(&block_group->lock);
2865
2866 return ret;
2867}
2868
96b5179d
CM
2869int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
2870 struct btrfs_root *root)
9078a3e1 2871{
4a8c9a62 2872 struct btrfs_block_group_cache *cache;
9078a3e1 2873 int err = 0;
9078a3e1 2874 struct btrfs_path *path;
96b5179d 2875 u64 last = 0;
9078a3e1
CM
2876
2877 path = btrfs_alloc_path();
2878 if (!path)
2879 return -ENOMEM;
2880
0af3d00b
JB
2881again:
2882 while (1) {
2883 cache = btrfs_lookup_first_block_group(root->fs_info, last);
2884 while (cache) {
2885 if (cache->disk_cache_state == BTRFS_DC_CLEAR)
2886 break;
2887 cache = next_block_group(root, cache);
2888 }
2889 if (!cache) {
2890 if (last == 0)
2891 break;
2892 last = 0;
2893 continue;
2894 }
2895 err = cache_save_setup(cache, trans, path);
2896 last = cache->key.objectid + cache->key.offset;
2897 btrfs_put_block_group(cache);
2898 }
2899
d397712b 2900 while (1) {
4a8c9a62
YZ
2901 if (last == 0) {
2902 err = btrfs_run_delayed_refs(trans, root,
2903 (unsigned long)-1);
2904 BUG_ON(err);
0f9dd46c 2905 }
54aa1f4d 2906
4a8c9a62
YZ
2907 cache = btrfs_lookup_first_block_group(root->fs_info, last);
2908 while (cache) {
0af3d00b
JB
2909 if (cache->disk_cache_state == BTRFS_DC_CLEAR) {
2910 btrfs_put_block_group(cache);
2911 goto again;
2912 }
2913
4a8c9a62
YZ
2914 if (cache->dirty)
2915 break;
2916 cache = next_block_group(root, cache);
2917 }
2918 if (!cache) {
2919 if (last == 0)
2920 break;
2921 last = 0;
2922 continue;
2923 }
0f9dd46c 2924
0cb59c99
JB
2925 if (cache->disk_cache_state == BTRFS_DC_SETUP)
2926 cache->disk_cache_state = BTRFS_DC_NEED_WRITE;
e8569813 2927 cache->dirty = 0;
4a8c9a62 2928 last = cache->key.objectid + cache->key.offset;
0f9dd46c 2929
4a8c9a62
YZ
2930 err = write_one_cache_group(trans, root, path, cache);
2931 BUG_ON(err);
2932 btrfs_put_block_group(cache);
9078a3e1 2933 }
4a8c9a62 2934
0cb59c99
JB
2935 while (1) {
2936 /*
2937 * I don't think this is needed since we're just marking our
2938 * preallocated extent as written, but just in case it can't
2939 * hurt.
2940 */
2941 if (last == 0) {
2942 err = btrfs_run_delayed_refs(trans, root,
2943 (unsigned long)-1);
2944 BUG_ON(err);
2945 }
2946
2947 cache = btrfs_lookup_first_block_group(root->fs_info, last);
2948 while (cache) {
2949 /*
2950 * Really this shouldn't happen, but it could if we
2951 * couldn't write the entire preallocated extent and
2952 * splitting the extent resulted in a new block.
2953 */
2954 if (cache->dirty) {
2955 btrfs_put_block_group(cache);
2956 goto again;
2957 }
2958 if (cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
2959 break;
2960 cache = next_block_group(root, cache);
2961 }
2962 if (!cache) {
2963 if (last == 0)
2964 break;
2965 last = 0;
2966 continue;
2967 }
2968
2969 btrfs_write_out_cache(root, trans, cache, path);
2970
2971 /*
2972 * If we didn't have an error then the cache state is still
2973 * NEED_WRITE, so we can set it to WRITTEN.
2974 */
2975 if (cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
2976 cache->disk_cache_state = BTRFS_DC_WRITTEN;
2977 last = cache->key.objectid + cache->key.offset;
2978 btrfs_put_block_group(cache);
2979 }
2980
9078a3e1 2981 btrfs_free_path(path);
4a8c9a62 2982 return 0;
9078a3e1
CM
2983}
2984
d2fb3437
YZ
2985int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
2986{
2987 struct btrfs_block_group_cache *block_group;
2988 int readonly = 0;
2989
2990 block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
2991 if (!block_group || block_group->ro)
2992 readonly = 1;
2993 if (block_group)
fa9c0d79 2994 btrfs_put_block_group(block_group);
d2fb3437
YZ
2995 return readonly;
2996}
2997
593060d7
CM
2998static int update_space_info(struct btrfs_fs_info *info, u64 flags,
2999 u64 total_bytes, u64 bytes_used,
3000 struct btrfs_space_info **space_info)
3001{
3002 struct btrfs_space_info *found;
b742bb82
YZ
3003 int i;
3004 int factor;
3005
3006 if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
3007 BTRFS_BLOCK_GROUP_RAID10))
3008 factor = 2;
3009 else
3010 factor = 1;
593060d7
CM
3011
3012 found = __find_space_info(info, flags);
3013 if (found) {
25179201 3014 spin_lock(&found->lock);
593060d7 3015 found->total_bytes += total_bytes;
89a55897 3016 found->disk_total += total_bytes * factor;
593060d7 3017 found->bytes_used += bytes_used;
b742bb82 3018 found->disk_used += bytes_used * factor;
8f18cf13 3019 found->full = 0;
25179201 3020 spin_unlock(&found->lock);
593060d7
CM
3021 *space_info = found;
3022 return 0;
3023 }
c146afad 3024 found = kzalloc(sizeof(*found), GFP_NOFS);
593060d7
CM
3025 if (!found)
3026 return -ENOMEM;
3027
b742bb82
YZ
3028 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
3029 INIT_LIST_HEAD(&found->block_groups[i]);
80eb234a 3030 init_rwsem(&found->groups_sem);
0f9dd46c 3031 spin_lock_init(&found->lock);
b742bb82
YZ
3032 found->flags = flags & (BTRFS_BLOCK_GROUP_DATA |
3033 BTRFS_BLOCK_GROUP_SYSTEM |
3034 BTRFS_BLOCK_GROUP_METADATA);
593060d7 3035 found->total_bytes = total_bytes;
89a55897 3036 found->disk_total = total_bytes * factor;
593060d7 3037 found->bytes_used = bytes_used;
b742bb82 3038 found->disk_used = bytes_used * factor;
593060d7 3039 found->bytes_pinned = 0;
e8569813 3040 found->bytes_reserved = 0;
c146afad 3041 found->bytes_readonly = 0;
f0486c68 3042 found->bytes_may_use = 0;
593060d7 3043 found->full = 0;
0e4f8f88 3044 found->force_alloc = CHUNK_ALLOC_NO_FORCE;
6d74119f 3045 found->chunk_alloc = 0;
593060d7 3046 *space_info = found;
4184ea7f 3047 list_add_rcu(&found->list, &info->space_info);
817d52f8 3048 atomic_set(&found->caching_threads, 0);
593060d7
CM
3049 return 0;
3050}
3051
8790d502
CM
3052static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
3053{
3054 u64 extra_flags = flags & (BTRFS_BLOCK_GROUP_RAID0 |
611f0e00 3055 BTRFS_BLOCK_GROUP_RAID1 |
321aecc6 3056 BTRFS_BLOCK_GROUP_RAID10 |
611f0e00 3057 BTRFS_BLOCK_GROUP_DUP);
8790d502
CM
3058 if (extra_flags) {
3059 if (flags & BTRFS_BLOCK_GROUP_DATA)
3060 fs_info->avail_data_alloc_bits |= extra_flags;
3061 if (flags & BTRFS_BLOCK_GROUP_METADATA)
3062 fs_info->avail_metadata_alloc_bits |= extra_flags;
3063 if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3064 fs_info->avail_system_alloc_bits |= extra_flags;
3065 }
3066}
593060d7 3067
2b82032c 3068u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
ec44a35c 3069{
cd02dca5
CM
3070 /*
3071 * we add in the count of missing devices because we want
3072 * to make sure that any RAID levels on a degraded FS
3073 * continue to be honored.
3074 */
3075 u64 num_devices = root->fs_info->fs_devices->rw_devices +
3076 root->fs_info->fs_devices->missing_devices;
a061fc8d
CM
3077
3078 if (num_devices == 1)
3079 flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0);
3080 if (num_devices < 4)
3081 flags &= ~BTRFS_BLOCK_GROUP_RAID10;
3082
ec44a35c
CM
3083 if ((flags & BTRFS_BLOCK_GROUP_DUP) &&
3084 (flags & (BTRFS_BLOCK_GROUP_RAID1 |
a061fc8d 3085 BTRFS_BLOCK_GROUP_RAID10))) {
ec44a35c 3086 flags &= ~BTRFS_BLOCK_GROUP_DUP;
a061fc8d 3087 }
ec44a35c
CM
3088
3089 if ((flags & BTRFS_BLOCK_GROUP_RAID1) &&
a061fc8d 3090 (flags & BTRFS_BLOCK_GROUP_RAID10)) {
ec44a35c 3091 flags &= ~BTRFS_BLOCK_GROUP_RAID1;
a061fc8d 3092 }
ec44a35c
CM
3093
3094 if ((flags & BTRFS_BLOCK_GROUP_RAID0) &&
3095 ((flags & BTRFS_BLOCK_GROUP_RAID1) |
3096 (flags & BTRFS_BLOCK_GROUP_RAID10) |
3097 (flags & BTRFS_BLOCK_GROUP_DUP)))
3098 flags &= ~BTRFS_BLOCK_GROUP_RAID0;
3099 return flags;
3100}
3101
b742bb82 3102static u64 get_alloc_profile(struct btrfs_root *root, u64 flags)
6a63209f 3103{
b742bb82
YZ
3104 if (flags & BTRFS_BLOCK_GROUP_DATA)
3105 flags |= root->fs_info->avail_data_alloc_bits &
3106 root->fs_info->data_alloc_profile;
3107 else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3108 flags |= root->fs_info->avail_system_alloc_bits &
3109 root->fs_info->system_alloc_profile;
3110 else if (flags & BTRFS_BLOCK_GROUP_METADATA)
3111 flags |= root->fs_info->avail_metadata_alloc_bits &
3112 root->fs_info->metadata_alloc_profile;
3113 return btrfs_reduce_alloc_profile(root, flags);
6a63209f
JB
3114}
3115
6d07bcec 3116u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
9ed74f2d 3117{
b742bb82 3118 u64 flags;
9ed74f2d 3119
b742bb82
YZ
3120 if (data)
3121 flags = BTRFS_BLOCK_GROUP_DATA;
3122 else if (root == root->fs_info->chunk_root)
3123 flags = BTRFS_BLOCK_GROUP_SYSTEM;
9ed74f2d 3124 else
b742bb82 3125 flags = BTRFS_BLOCK_GROUP_METADATA;
9ed74f2d 3126
b742bb82 3127 return get_alloc_profile(root, flags);
6a63209f 3128}
9ed74f2d 3129
6a63209f
JB
3130void btrfs_set_inode_space_info(struct btrfs_root *root, struct inode *inode)
3131{
6a63209f 3132 BTRFS_I(inode)->space_info = __find_space_info(root->fs_info,
f0486c68 3133 BTRFS_BLOCK_GROUP_DATA);
9ed74f2d
JB
3134}
3135
6a63209f 3136/*
6a63209f
JB
3137 * This will check the space that the inode allocates from to make sure we have
3138 * enough space for bytes.
6a63209f 3139 */
0ca1f7ce 3140int btrfs_check_data_free_space(struct inode *inode, u64 bytes)
6a63209f 3141{
6a63209f 3142 struct btrfs_space_info *data_sinfo;
0ca1f7ce 3143 struct btrfs_root *root = BTRFS_I(inode)->root;
ab6e2410 3144 u64 used;
0af3d00b 3145 int ret = 0, committed = 0, alloc_chunk = 1;
6a63209f 3146
6a63209f
JB
3147 /* make sure bytes are sectorsize aligned */
3148 bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
6a63209f 3149
0af3d00b
JB
3150 if (root == root->fs_info->tree_root) {
3151 alloc_chunk = 0;
3152 committed = 1;
3153 }
3154
6a63209f 3155 data_sinfo = BTRFS_I(inode)->space_info;
33b4d47f
CM
3156 if (!data_sinfo)
3157 goto alloc;
9ed74f2d 3158
6a63209f
JB
3159again:
3160 /* make sure we have enough space to handle the data first */
3161 spin_lock(&data_sinfo->lock);
8929ecfa
YZ
3162 used = data_sinfo->bytes_used + data_sinfo->bytes_reserved +
3163 data_sinfo->bytes_pinned + data_sinfo->bytes_readonly +
3164 data_sinfo->bytes_may_use;
ab6e2410
JB
3165
3166 if (used + bytes > data_sinfo->total_bytes) {
4e06bdd6 3167 struct btrfs_trans_handle *trans;
9ed74f2d 3168
6a63209f
JB
3169 /*
3170 * if we don't have enough free bytes in this space then we need
3171 * to alloc a new chunk.
3172 */
0af3d00b 3173 if (!data_sinfo->full && alloc_chunk) {
6a63209f 3174 u64 alloc_target;
9ed74f2d 3175
0e4f8f88 3176 data_sinfo->force_alloc = CHUNK_ALLOC_FORCE;
6a63209f 3177 spin_unlock(&data_sinfo->lock);
33b4d47f 3178alloc:
6a63209f 3179 alloc_target = btrfs_get_alloc_profile(root, 1);
7a7eaa40 3180 trans = btrfs_join_transaction(root);
a22285a6
YZ
3181 if (IS_ERR(trans))
3182 return PTR_ERR(trans);
9ed74f2d 3183
6a63209f
JB
3184 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
3185 bytes + 2 * 1024 * 1024,
0e4f8f88
CM
3186 alloc_target,
3187 CHUNK_ALLOC_NO_FORCE);
6a63209f 3188 btrfs_end_transaction(trans, root);
d52a5b5f
MX
3189 if (ret < 0) {
3190 if (ret != -ENOSPC)
3191 return ret;
3192 else
3193 goto commit_trans;
3194 }
9ed74f2d 3195
33b4d47f
CM
3196 if (!data_sinfo) {
3197 btrfs_set_inode_space_info(root, inode);
3198 data_sinfo = BTRFS_I(inode)->space_info;
3199 }
6a63209f
JB
3200 goto again;
3201 }
3202 spin_unlock(&data_sinfo->lock);
6a63209f 3203
4e06bdd6 3204 /* commit the current transaction and try again */
d52a5b5f 3205commit_trans:
a4abeea4
JB
3206 if (!committed &&
3207 !atomic_read(&root->fs_info->open_ioctl_trans)) {
4e06bdd6 3208 committed = 1;
7a7eaa40 3209 trans = btrfs_join_transaction(root);
a22285a6
YZ
3210 if (IS_ERR(trans))
3211 return PTR_ERR(trans);
4e06bdd6
JB
3212 ret = btrfs_commit_transaction(trans, root);
3213 if (ret)
3214 return ret;
3215 goto again;
3216 }
9ed74f2d 3217
933b585f 3218#if 0 /* I hope we never need this code again, just in case */
8929ecfa
YZ
3219 printk(KERN_ERR "no space left, need %llu, %llu bytes_used, "
3220 "%llu bytes_reserved, " "%llu bytes_pinned, "
3221 "%llu bytes_readonly, %llu may use %llu total\n",
3222 (unsigned long long)bytes,
21380931
JB
3223 (unsigned long long)data_sinfo->bytes_used,
3224 (unsigned long long)data_sinfo->bytes_reserved,
3225 (unsigned long long)data_sinfo->bytes_pinned,
3226 (unsigned long long)data_sinfo->bytes_readonly,
3227 (unsigned long long)data_sinfo->bytes_may_use,
3228 (unsigned long long)data_sinfo->total_bytes);
933b585f 3229#endif
6a63209f
JB
3230 return -ENOSPC;
3231 }
3232 data_sinfo->bytes_may_use += bytes;
3233 BTRFS_I(inode)->reserved_bytes += bytes;
3234 spin_unlock(&data_sinfo->lock);
6a63209f 3235
9ed74f2d 3236 return 0;
9ed74f2d 3237}
6a63209f 3238
6a63209f 3239/*
0ca1f7ce
YZ
3240 * called when we are clearing an delalloc extent from the
3241 * inode's io_tree or there was an error for whatever reason
3242 * after calling btrfs_check_data_free_space
6a63209f 3243 */
0ca1f7ce 3244void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes)
e3ccfa98 3245{
0ca1f7ce 3246 struct btrfs_root *root = BTRFS_I(inode)->root;
6a63209f 3247 struct btrfs_space_info *data_sinfo;
e3ccfa98 3248
6a63209f
JB
3249 /* make sure bytes are sectorsize aligned */
3250 bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
e3ccfa98 3251
6a63209f
JB
3252 data_sinfo = BTRFS_I(inode)->space_info;
3253 spin_lock(&data_sinfo->lock);
3254 data_sinfo->bytes_may_use -= bytes;
3255 BTRFS_I(inode)->reserved_bytes -= bytes;
3256 spin_unlock(&data_sinfo->lock);
e3ccfa98
JB
3257}
3258
97e728d4 3259static void force_metadata_allocation(struct btrfs_fs_info *info)
e3ccfa98 3260{
97e728d4
JB
3261 struct list_head *head = &info->space_info;
3262 struct btrfs_space_info *found;
e3ccfa98 3263
97e728d4
JB
3264 rcu_read_lock();
3265 list_for_each_entry_rcu(found, head, list) {
3266 if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
0e4f8f88 3267 found->force_alloc = CHUNK_ALLOC_FORCE;
e3ccfa98 3268 }
97e728d4 3269 rcu_read_unlock();
e3ccfa98
JB
3270}
3271
e5bc2458 3272static int should_alloc_chunk(struct btrfs_root *root,
0e4f8f88
CM
3273 struct btrfs_space_info *sinfo, u64 alloc_bytes,
3274 int force)
32c00aff 3275{
424499db 3276 u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly;
0e4f8f88 3277 u64 num_allocated = sinfo->bytes_used + sinfo->bytes_reserved;
e5bc2458 3278 u64 thresh;
e3ccfa98 3279
0e4f8f88
CM
3280 if (force == CHUNK_ALLOC_FORCE)
3281 return 1;
3282
3283 /*
3284 * in limited mode, we want to have some free space up to
3285 * about 1% of the FS size.
3286 */
3287 if (force == CHUNK_ALLOC_LIMITED) {
3288 thresh = btrfs_super_total_bytes(&root->fs_info->super_copy);
3289 thresh = max_t(u64, 64 * 1024 * 1024,
3290 div_factor_fine(thresh, 1));
3291
3292 if (num_bytes - num_allocated < thresh)
3293 return 1;
3294 }
3295
3296 /*
3297 * we have two similar checks here, one based on percentage
3298 * and once based on a hard number of 256MB. The idea
3299 * is that if we have a good amount of free
3300 * room, don't allocate a chunk. A good mount is
3301 * less than 80% utilized of the chunks we have allocated,
3302 * or more than 256MB free
3303 */
3304 if (num_allocated + alloc_bytes + 256 * 1024 * 1024 < num_bytes)
424499db 3305 return 0;
e3ccfa98 3306
0e4f8f88 3307 if (num_allocated + alloc_bytes < div_factor(num_bytes, 8))
424499db 3308 return 0;
32c00aff 3309
e5bc2458 3310 thresh = btrfs_super_total_bytes(&root->fs_info->super_copy);
0e4f8f88
CM
3311
3312 /* 256MB or 5% of the FS */
e5bc2458
CM
3313 thresh = max_t(u64, 256 * 1024 * 1024, div_factor_fine(thresh, 5));
3314
3315 if (num_bytes > thresh && sinfo->bytes_used < div_factor(num_bytes, 3))
14ed0ca6 3316 return 0;
424499db 3317 return 1;
32c00aff
JB
3318}
3319
6324fbf3
CM
3320static int do_chunk_alloc(struct btrfs_trans_handle *trans,
3321 struct btrfs_root *extent_root, u64 alloc_bytes,
0ef3e66b 3322 u64 flags, int force)
9ed74f2d 3323{
6324fbf3 3324 struct btrfs_space_info *space_info;
97e728d4 3325 struct btrfs_fs_info *fs_info = extent_root->fs_info;
6d74119f 3326 int wait_for_alloc = 0;
9ed74f2d 3327 int ret = 0;
9ed74f2d 3328
2b82032c 3329 flags = btrfs_reduce_alloc_profile(extent_root, flags);
ec44a35c 3330
6324fbf3 3331 space_info = __find_space_info(extent_root->fs_info, flags);
593060d7
CM
3332 if (!space_info) {
3333 ret = update_space_info(extent_root->fs_info, flags,
3334 0, 0, &space_info);
3335 BUG_ON(ret);
9ed74f2d 3336 }
6324fbf3 3337 BUG_ON(!space_info);
9ed74f2d 3338
6d74119f 3339again:
25179201 3340 spin_lock(&space_info->lock);
9ed74f2d 3341 if (space_info->force_alloc)
0e4f8f88 3342 force = space_info->force_alloc;
25179201
JB
3343 if (space_info->full) {
3344 spin_unlock(&space_info->lock);
6d74119f 3345 return 0;
9ed74f2d
JB
3346 }
3347
0e4f8f88 3348 if (!should_alloc_chunk(extent_root, space_info, alloc_bytes, force)) {
25179201 3349 spin_unlock(&space_info->lock);
6d74119f
JB
3350 return 0;
3351 } else if (space_info->chunk_alloc) {
3352 wait_for_alloc = 1;
3353 } else {
3354 space_info->chunk_alloc = 1;
9ed74f2d 3355 }
0e4f8f88 3356
25179201 3357 spin_unlock(&space_info->lock);
9ed74f2d 3358
6d74119f
JB
3359 mutex_lock(&fs_info->chunk_mutex);
3360
3361 /*
3362 * The chunk_mutex is held throughout the entirety of a chunk
3363 * allocation, so once we've acquired the chunk_mutex we know that the
3364 * other guy is done and we need to recheck and see if we should
3365 * allocate.
3366 */
3367 if (wait_for_alloc) {
3368 mutex_unlock(&fs_info->chunk_mutex);
3369 wait_for_alloc = 0;
3370 goto again;
3371 }
3372
67377734
JB
3373 /*
3374 * If we have mixed data/metadata chunks we want to make sure we keep
3375 * allocating mixed chunks instead of individual chunks.
3376 */
3377 if (btrfs_mixed_space_info(space_info))
3378 flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA);
3379
97e728d4
JB
3380 /*
3381 * if we're doing a data chunk, go ahead and make sure that
3382 * we keep a reasonable number of metadata chunks allocated in the
3383 * FS as well.
3384 */
9ed74f2d 3385 if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
97e728d4
JB
3386 fs_info->data_chunk_allocations++;
3387 if (!(fs_info->data_chunk_allocations %
3388 fs_info->metadata_ratio))
3389 force_metadata_allocation(fs_info);
9ed74f2d
JB
3390 }
3391
2b82032c 3392 ret = btrfs_alloc_chunk(trans, extent_root, flags);
9ed74f2d 3393 spin_lock(&space_info->lock);
9ed74f2d 3394 if (ret)
6324fbf3 3395 space_info->full = 1;
424499db
YZ
3396 else
3397 ret = 1;
6d74119f 3398
0e4f8f88 3399 space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
6d74119f 3400 space_info->chunk_alloc = 0;
9ed74f2d 3401 spin_unlock(&space_info->lock);
c146afad 3402 mutex_unlock(&extent_root->fs_info->chunk_mutex);
0f9dd46c 3403 return ret;
6324fbf3 3404}
9ed74f2d 3405
9ed74f2d 3406/*
5da9d01b 3407 * shrink metadata reservation for delalloc
9ed74f2d 3408 */
5da9d01b 3409static int shrink_delalloc(struct btrfs_trans_handle *trans,
0019f10d 3410 struct btrfs_root *root, u64 to_reclaim, int sync)
5da9d01b 3411{
0ca1f7ce 3412 struct btrfs_block_rsv *block_rsv;
0019f10d 3413 struct btrfs_space_info *space_info;
5da9d01b
YZ
3414 u64 reserved;
3415 u64 max_reclaim;
3416 u64 reclaimed = 0;
b1953bce 3417 long time_left;
bf9022e0 3418 int nr_pages = (2 * 1024 * 1024) >> PAGE_CACHE_SHIFT;
b1953bce 3419 int loops = 0;
36e39c40 3420 unsigned long progress;
5da9d01b 3421
0ca1f7ce 3422 block_rsv = &root->fs_info->delalloc_block_rsv;
0019f10d 3423 space_info = block_rsv->space_info;
bf9022e0
CM
3424
3425 smp_mb();
0019f10d 3426 reserved = space_info->bytes_reserved;
36e39c40 3427 progress = space_info->reservation_progress;
5da9d01b
YZ
3428
3429 if (reserved == 0)
3430 return 0;
3431
3432 max_reclaim = min(reserved, to_reclaim);
3433
b1953bce 3434 while (loops < 1024) {
bf9022e0
CM
3435 /* have the flusher threads jump in and do some IO */
3436 smp_mb();
3437 nr_pages = min_t(unsigned long, nr_pages,
3438 root->fs_info->delalloc_bytes >> PAGE_CACHE_SHIFT);
3439 writeback_inodes_sb_nr_if_idle(root->fs_info->sb, nr_pages);
5da9d01b 3440
0019f10d 3441 spin_lock(&space_info->lock);
36e39c40 3442 if (reserved > space_info->bytes_reserved)
0019f10d
JB
3443 reclaimed += reserved - space_info->bytes_reserved;
3444 reserved = space_info->bytes_reserved;
3445 spin_unlock(&space_info->lock);
5da9d01b 3446
36e39c40
CM
3447 loops++;
3448
5da9d01b
YZ
3449 if (reserved == 0 || reclaimed >= max_reclaim)
3450 break;
3451
3452 if (trans && trans->transaction->blocked)
3453 return -EAGAIN;
bf9022e0 3454
36e39c40 3455 time_left = schedule_timeout_interruptible(1);
b1953bce
JB
3456
3457 /* We were interrupted, exit */
3458 if (time_left)
3459 break;
3460
36e39c40
CM
3461 /* we've kicked the IO a few times, if anything has been freed,
3462 * exit. There is no sense in looping here for a long time
3463 * when we really need to commit the transaction, or there are
3464 * just too many writers without enough free space
3465 */
3466
3467 if (loops > 3) {
3468 smp_mb();
3469 if (progress != space_info->reservation_progress)
3470 break;
3471 }
bf9022e0 3472
5da9d01b
YZ
3473 }
3474 return reclaimed >= to_reclaim;
3475}
3476
8bb8ab2e
JB
3477/*
3478 * Retries tells us how many times we've called reserve_metadata_bytes. The
3479 * idea is if this is the first call (retries == 0) then we will add to our
3480 * reserved count if we can't make the allocation in order to hold our place
3481 * while we go and try and free up space. That way for retries > 1 we don't try
3482 * and add space, we just check to see if the amount of unused space is >= the
3483 * total space, meaning that our reservation is valid.
3484 *
3485 * However if we don't intend to retry this reservation, pass -1 as retries so
3486 * that it short circuits this logic.
3487 */
3488static int reserve_metadata_bytes(struct btrfs_trans_handle *trans,
3489 struct btrfs_root *root,
3490 struct btrfs_block_rsv *block_rsv,
3491 u64 orig_bytes, int flush)
9ed74f2d 3492{
f0486c68 3493 struct btrfs_space_info *space_info = block_rsv->space_info;
8bb8ab2e
JB
3494 u64 unused;
3495 u64 num_bytes = orig_bytes;
3496 int retries = 0;
3497 int ret = 0;
3498 bool reserved = false;
38227933 3499 bool committed = false;
9ed74f2d 3500
8bb8ab2e
JB
3501again:
3502 ret = -ENOSPC;
3503 if (reserved)
3504 num_bytes = 0;
9ed74f2d 3505
8bb8ab2e
JB
3506 spin_lock(&space_info->lock);
3507 unused = space_info->bytes_used + space_info->bytes_reserved +
3508 space_info->bytes_pinned + space_info->bytes_readonly +
3509 space_info->bytes_may_use;
9ed74f2d 3510
8bb8ab2e
JB
3511 /*
3512 * The idea here is that we've not already over-reserved the block group
3513 * then we can go ahead and save our reservation first and then start
3514 * flushing if we need to. Otherwise if we've already overcommitted
3515 * lets start flushing stuff first and then come back and try to make
3516 * our reservation.
3517 */
3518 if (unused <= space_info->total_bytes) {
6f334348 3519 unused = space_info->total_bytes - unused;
8bb8ab2e
JB
3520 if (unused >= num_bytes) {
3521 if (!reserved)
3522 space_info->bytes_reserved += orig_bytes;
3523 ret = 0;
3524 } else {
3525 /*
3526 * Ok set num_bytes to orig_bytes since we aren't
3527 * overocmmitted, this way we only try and reclaim what
3528 * we need.
3529 */
3530 num_bytes = orig_bytes;
3531 }
3532 } else {
3533 /*
3534 * Ok we're over committed, set num_bytes to the overcommitted
3535 * amount plus the amount of bytes that we need for this
3536 * reservation.
3537 */
3538 num_bytes = unused - space_info->total_bytes +
3539 (orig_bytes * (retries + 1));
3540 }
9ed74f2d 3541
8bb8ab2e
JB
3542 /*
3543 * Couldn't make our reservation, save our place so while we're trying
3544 * to reclaim space we can actually use it instead of somebody else
3545 * stealing it from us.
3546 */
3547 if (ret && !reserved) {
3548 space_info->bytes_reserved += orig_bytes;
3549 reserved = true;
3550 }
9ed74f2d 3551
f0486c68 3552 spin_unlock(&space_info->lock);
9ed74f2d 3553
8bb8ab2e
JB
3554 if (!ret)
3555 return 0;
9ed74f2d 3556
8bb8ab2e
JB
3557 if (!flush)
3558 goto out;
f0486c68 3559
8bb8ab2e
JB
3560 /*
3561 * We do synchronous shrinking since we don't actually unreserve
3562 * metadata until after the IO is completed.
3563 */
3564 ret = shrink_delalloc(trans, root, num_bytes, 1);
3565 if (ret > 0)
3566 return 0;
3567 else if (ret < 0)
3568 goto out;
f0486c68 3569
8bb8ab2e
JB
3570 /*
3571 * So if we were overcommitted it's possible that somebody else flushed
3572 * out enough space and we simply didn't have enough space to reclaim,
3573 * so go back around and try again.
3574 */
3575 if (retries < 2) {
3576 retries++;
3577 goto again;
3578 }
f0486c68
YZ
3579
3580 spin_lock(&space_info->lock);
8bb8ab2e
JB
3581 /*
3582 * Not enough space to be reclaimed, don't bother committing the
3583 * transaction.
3584 */
3585 if (space_info->bytes_pinned < orig_bytes)
3586 ret = -ENOSPC;
3587 spin_unlock(&space_info->lock);
3588 if (ret)
3589 goto out;
f0486c68 3590
8bb8ab2e 3591 ret = -EAGAIN;
38227933 3592 if (trans || committed)
8bb8ab2e 3593 goto out;
f0486c68 3594
8bb8ab2e 3595 ret = -ENOSPC;
7a7eaa40 3596 trans = btrfs_join_transaction(root);
8bb8ab2e
JB
3597 if (IS_ERR(trans))
3598 goto out;
3599 ret = btrfs_commit_transaction(trans, root);
38227933
JB
3600 if (!ret) {
3601 trans = NULL;
3602 committed = true;
8bb8ab2e 3603 goto again;
38227933 3604 }
8bb8ab2e
JB
3605
3606out:
3607 if (reserved) {
3608 spin_lock(&space_info->lock);
3609 space_info->bytes_reserved -= orig_bytes;
3610 spin_unlock(&space_info->lock);
f0486c68 3611 }
4e06bdd6 3612
f0486c68
YZ
3613 return ret;
3614}
3615
3616static struct btrfs_block_rsv *get_block_rsv(struct btrfs_trans_handle *trans,
3617 struct btrfs_root *root)
3618{
3619 struct btrfs_block_rsv *block_rsv;
3620 if (root->ref_cows)
3621 block_rsv = trans->block_rsv;
3622 else
3623 block_rsv = root->block_rsv;
3624
3625 if (!block_rsv)
3626 block_rsv = &root->fs_info->empty_block_rsv;
3627
3628 return block_rsv;
3629}
3630
3631static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
3632 u64 num_bytes)
3633{
3634 int ret = -ENOSPC;
3635 spin_lock(&block_rsv->lock);
3636 if (block_rsv->reserved >= num_bytes) {
3637 block_rsv->reserved -= num_bytes;
3638 if (block_rsv->reserved < block_rsv->size)
3639 block_rsv->full = 0;
3640 ret = 0;
3641 }
3642 spin_unlock(&block_rsv->lock);
3643 return ret;
3644}
3645
3646static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
3647 u64 num_bytes, int update_size)
3648{
3649 spin_lock(&block_rsv->lock);
3650 block_rsv->reserved += num_bytes;
3651 if (update_size)
3652 block_rsv->size += num_bytes;
3653 else if (block_rsv->reserved >= block_rsv->size)
3654 block_rsv->full = 1;
3655 spin_unlock(&block_rsv->lock);
3656}
3657
3658void block_rsv_release_bytes(struct btrfs_block_rsv *block_rsv,
3659 struct btrfs_block_rsv *dest, u64 num_bytes)
3660{
3661 struct btrfs_space_info *space_info = block_rsv->space_info;
3662
3663 spin_lock(&block_rsv->lock);
3664 if (num_bytes == (u64)-1)
3665 num_bytes = block_rsv->size;
3666 block_rsv->size -= num_bytes;
3667 if (block_rsv->reserved >= block_rsv->size) {
3668 num_bytes = block_rsv->reserved - block_rsv->size;
3669 block_rsv->reserved = block_rsv->size;
3670 block_rsv->full = 1;
3671 } else {
3672 num_bytes = 0;
3673 }
3674 spin_unlock(&block_rsv->lock);
3675
3676 if (num_bytes > 0) {
3677 if (dest) {
e9e22899
JB
3678 spin_lock(&dest->lock);
3679 if (!dest->full) {
3680 u64 bytes_to_add;
3681
3682 bytes_to_add = dest->size - dest->reserved;
3683 bytes_to_add = min(num_bytes, bytes_to_add);
3684 dest->reserved += bytes_to_add;
3685 if (dest->reserved >= dest->size)
3686 dest->full = 1;
3687 num_bytes -= bytes_to_add;
3688 }
3689 spin_unlock(&dest->lock);
3690 }
3691 if (num_bytes) {
f0486c68
YZ
3692 spin_lock(&space_info->lock);
3693 space_info->bytes_reserved -= num_bytes;
36e39c40 3694 space_info->reservation_progress++;
f0486c68 3695 spin_unlock(&space_info->lock);
4e06bdd6 3696 }
9ed74f2d 3697 }
f0486c68 3698}
4e06bdd6 3699
f0486c68
YZ
3700static int block_rsv_migrate_bytes(struct btrfs_block_rsv *src,
3701 struct btrfs_block_rsv *dst, u64 num_bytes)
3702{
3703 int ret;
9ed74f2d 3704
f0486c68
YZ
3705 ret = block_rsv_use_bytes(src, num_bytes);
3706 if (ret)
3707 return ret;
9ed74f2d 3708
f0486c68 3709 block_rsv_add_bytes(dst, num_bytes, 1);
9ed74f2d
JB
3710 return 0;
3711}
3712
f0486c68 3713void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv)
9ed74f2d 3714{
f0486c68
YZ
3715 memset(rsv, 0, sizeof(*rsv));
3716 spin_lock_init(&rsv->lock);
3717 atomic_set(&rsv->usage, 1);
3718 rsv->priority = 6;
3719 INIT_LIST_HEAD(&rsv->list);
3720}
3721
3722struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root)
3723{
3724 struct btrfs_block_rsv *block_rsv;
3725 struct btrfs_fs_info *fs_info = root->fs_info;
9ed74f2d 3726
f0486c68
YZ
3727 block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS);
3728 if (!block_rsv)
3729 return NULL;
9ed74f2d 3730
f0486c68 3731 btrfs_init_block_rsv(block_rsv);
f0486c68
YZ
3732 block_rsv->space_info = __find_space_info(fs_info,
3733 BTRFS_BLOCK_GROUP_METADATA);
f0486c68
YZ
3734 return block_rsv;
3735}
9ed74f2d 3736
f0486c68
YZ
3737void btrfs_free_block_rsv(struct btrfs_root *root,
3738 struct btrfs_block_rsv *rsv)
3739{
3740 if (rsv && atomic_dec_and_test(&rsv->usage)) {
3741 btrfs_block_rsv_release(root, rsv, (u64)-1);
3742 if (!rsv->durable)
3743 kfree(rsv);
3744 }
9ed74f2d
JB
3745}
3746
3747/*
f0486c68
YZ
3748 * make the block_rsv struct be able to capture freed space.
3749 * the captured space will re-add to the the block_rsv struct
3750 * after transaction commit
9ed74f2d 3751 */
f0486c68
YZ
3752void btrfs_add_durable_block_rsv(struct btrfs_fs_info *fs_info,
3753 struct btrfs_block_rsv *block_rsv)
9ed74f2d 3754{
f0486c68
YZ
3755 block_rsv->durable = 1;
3756 mutex_lock(&fs_info->durable_block_rsv_mutex);
3757 list_add_tail(&block_rsv->list, &fs_info->durable_block_rsv_list);
3758 mutex_unlock(&fs_info->durable_block_rsv_mutex);
3759}
9ed74f2d 3760
f0486c68
YZ
3761int btrfs_block_rsv_add(struct btrfs_trans_handle *trans,
3762 struct btrfs_root *root,
3763 struct btrfs_block_rsv *block_rsv,
8bb8ab2e 3764 u64 num_bytes)
f0486c68
YZ
3765{
3766 int ret;
9ed74f2d 3767
f0486c68
YZ
3768 if (num_bytes == 0)
3769 return 0;
8bb8ab2e
JB
3770
3771 ret = reserve_metadata_bytes(trans, root, block_rsv, num_bytes, 1);
f0486c68
YZ
3772 if (!ret) {
3773 block_rsv_add_bytes(block_rsv, num_bytes, 1);
3774 return 0;
3775 }
9ed74f2d 3776
f0486c68
YZ
3777 return ret;
3778}
9ed74f2d 3779
f0486c68
YZ
3780int btrfs_block_rsv_check(struct btrfs_trans_handle *trans,
3781 struct btrfs_root *root,
3782 struct btrfs_block_rsv *block_rsv,
3783 u64 min_reserved, int min_factor)
3784{
3785 u64 num_bytes = 0;
3786 int commit_trans = 0;
3787 int ret = -ENOSPC;
9ed74f2d 3788
f0486c68
YZ
3789 if (!block_rsv)
3790 return 0;
9ed74f2d 3791
f0486c68
YZ
3792 spin_lock(&block_rsv->lock);
3793 if (min_factor > 0)
3794 num_bytes = div_factor(block_rsv->size, min_factor);
3795 if (min_reserved > num_bytes)
3796 num_bytes = min_reserved;
9ed74f2d 3797
f0486c68
YZ
3798 if (block_rsv->reserved >= num_bytes) {
3799 ret = 0;
3800 } else {
3801 num_bytes -= block_rsv->reserved;
3802 if (block_rsv->durable &&
3803 block_rsv->freed[0] + block_rsv->freed[1] >= num_bytes)
3804 commit_trans = 1;
3805 }
3806 spin_unlock(&block_rsv->lock);
3807 if (!ret)
3808 return 0;
3809
3810 if (block_rsv->refill_used) {
8bb8ab2e
JB
3811 ret = reserve_metadata_bytes(trans, root, block_rsv,
3812 num_bytes, 0);
f0486c68
YZ
3813 if (!ret) {
3814 block_rsv_add_bytes(block_rsv, num_bytes, 0);
3815 return 0;
4e06bdd6 3816 }
f0486c68 3817 }
9ed74f2d 3818
f0486c68
YZ
3819 if (commit_trans) {
3820 if (trans)
3821 return -EAGAIN;
3822
7a7eaa40 3823 trans = btrfs_join_transaction(root);
f0486c68
YZ
3824 BUG_ON(IS_ERR(trans));
3825 ret = btrfs_commit_transaction(trans, root);
3826 return 0;
6a63209f 3827 }
9ed74f2d 3828
f0486c68
YZ
3829 return -ENOSPC;
3830}
3831
3832int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
3833 struct btrfs_block_rsv *dst_rsv,
3834 u64 num_bytes)
3835{
3836 return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
3837}
3838
3839void btrfs_block_rsv_release(struct btrfs_root *root,
3840 struct btrfs_block_rsv *block_rsv,
3841 u64 num_bytes)
3842{
3843 struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
3844 if (global_rsv->full || global_rsv == block_rsv ||
3845 block_rsv->space_info != global_rsv->space_info)
3846 global_rsv = NULL;
3847 block_rsv_release_bytes(block_rsv, global_rsv, num_bytes);
6a63209f
JB
3848}
3849
3850/*
8929ecfa
YZ
3851 * helper to calculate size of global block reservation.
3852 * the desired value is sum of space used by extent tree,
3853 * checksum tree and root tree
6a63209f 3854 */
8929ecfa 3855static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info)
6a63209f 3856{
8929ecfa
YZ
3857 struct btrfs_space_info *sinfo;
3858 u64 num_bytes;
3859 u64 meta_used;
3860 u64 data_used;
3861 int csum_size = btrfs_super_csum_size(&fs_info->super_copy);
3862#if 0
3863 /*
3864 * per tree used space accounting can be inaccuracy, so we
3865 * can't rely on it.
3866 */
3867 spin_lock(&fs_info->extent_root->accounting_lock);
3868 num_bytes = btrfs_root_used(&fs_info->extent_root->root_item);
3869 spin_unlock(&fs_info->extent_root->accounting_lock);
6a63209f 3870
8929ecfa
YZ
3871 spin_lock(&fs_info->csum_root->accounting_lock);
3872 num_bytes += btrfs_root_used(&fs_info->csum_root->root_item);
3873 spin_unlock(&fs_info->csum_root->accounting_lock);
6a63209f 3874
8929ecfa
YZ
3875 spin_lock(&fs_info->tree_root->accounting_lock);
3876 num_bytes += btrfs_root_used(&fs_info->tree_root->root_item);
3877 spin_unlock(&fs_info->tree_root->accounting_lock);
3878#endif
3879 sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
3880 spin_lock(&sinfo->lock);
3881 data_used = sinfo->bytes_used;
3882 spin_unlock(&sinfo->lock);
33b4d47f 3883
8929ecfa
YZ
3884 sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
3885 spin_lock(&sinfo->lock);
6d48755d
JB
3886 if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA)
3887 data_used = 0;
8929ecfa
YZ
3888 meta_used = sinfo->bytes_used;
3889 spin_unlock(&sinfo->lock);
ab6e2410 3890
8929ecfa
YZ
3891 num_bytes = (data_used >> fs_info->sb->s_blocksize_bits) *
3892 csum_size * 2;
3893 num_bytes += div64_u64(data_used + meta_used, 50);
4e06bdd6 3894
8929ecfa
YZ
3895 if (num_bytes * 3 > meta_used)
3896 num_bytes = div64_u64(meta_used, 3);
ab6e2410 3897
8929ecfa
YZ
3898 return ALIGN(num_bytes, fs_info->extent_root->leafsize << 10);
3899}
6a63209f 3900
8929ecfa
YZ
3901static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
3902{
3903 struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
3904 struct btrfs_space_info *sinfo = block_rsv->space_info;
3905 u64 num_bytes;
6a63209f 3906
8929ecfa 3907 num_bytes = calc_global_metadata_size(fs_info);
33b4d47f 3908
8929ecfa
YZ
3909 spin_lock(&block_rsv->lock);
3910 spin_lock(&sinfo->lock);
4e06bdd6 3911
8929ecfa 3912 block_rsv->size = num_bytes;
4e06bdd6 3913
8929ecfa 3914 num_bytes = sinfo->bytes_used + sinfo->bytes_pinned +
6d48755d
JB
3915 sinfo->bytes_reserved + sinfo->bytes_readonly +
3916 sinfo->bytes_may_use;
8929ecfa
YZ
3917
3918 if (sinfo->total_bytes > num_bytes) {
3919 num_bytes = sinfo->total_bytes - num_bytes;
3920 block_rsv->reserved += num_bytes;
3921 sinfo->bytes_reserved += num_bytes;
6a63209f 3922 }
6a63209f 3923
8929ecfa
YZ
3924 if (block_rsv->reserved >= block_rsv->size) {
3925 num_bytes = block_rsv->reserved - block_rsv->size;
3926 sinfo->bytes_reserved -= num_bytes;
36e39c40 3927 sinfo->reservation_progress++;
8929ecfa
YZ
3928 block_rsv->reserved = block_rsv->size;
3929 block_rsv->full = 1;
3930 }
3931#if 0
3932 printk(KERN_INFO"global block rsv size %llu reserved %llu\n",
3933 block_rsv->size, block_rsv->reserved);
3934#endif
3935 spin_unlock(&sinfo->lock);
3936 spin_unlock(&block_rsv->lock);
6a63209f
JB
3937}
3938
f0486c68 3939static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
6a63209f 3940{
f0486c68 3941 struct btrfs_space_info *space_info;
6a63209f 3942
f0486c68
YZ
3943 space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
3944 fs_info->chunk_block_rsv.space_info = space_info;
3945 fs_info->chunk_block_rsv.priority = 10;
6a63209f 3946
f0486c68 3947 space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
8929ecfa
YZ
3948 fs_info->global_block_rsv.space_info = space_info;
3949 fs_info->global_block_rsv.priority = 10;
3950 fs_info->global_block_rsv.refill_used = 1;
3951 fs_info->delalloc_block_rsv.space_info = space_info;
f0486c68
YZ
3952 fs_info->trans_block_rsv.space_info = space_info;
3953 fs_info->empty_block_rsv.space_info = space_info;
3954 fs_info->empty_block_rsv.priority = 10;
3955
8929ecfa
YZ
3956 fs_info->extent_root->block_rsv = &fs_info->global_block_rsv;
3957 fs_info->csum_root->block_rsv = &fs_info->global_block_rsv;
3958 fs_info->dev_root->block_rsv = &fs_info->global_block_rsv;
3959 fs_info->tree_root->block_rsv = &fs_info->global_block_rsv;
f0486c68 3960 fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv;
8929ecfa
YZ
3961
3962 btrfs_add_durable_block_rsv(fs_info, &fs_info->global_block_rsv);
3963
3964 btrfs_add_durable_block_rsv(fs_info, &fs_info->delalloc_block_rsv);
3965
3966 update_global_block_rsv(fs_info);
6a63209f
JB
3967}
3968
8929ecfa 3969static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
6a63209f 3970{
8929ecfa
YZ
3971 block_rsv_release_bytes(&fs_info->global_block_rsv, NULL, (u64)-1);
3972 WARN_ON(fs_info->delalloc_block_rsv.size > 0);
3973 WARN_ON(fs_info->delalloc_block_rsv.reserved > 0);
3974 WARN_ON(fs_info->trans_block_rsv.size > 0);
3975 WARN_ON(fs_info->trans_block_rsv.reserved > 0);
3976 WARN_ON(fs_info->chunk_block_rsv.size > 0);
3977 WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
f0486c68 3978}
6a63209f 3979
a22285a6
YZ
3980static u64 calc_trans_metadata_size(struct btrfs_root *root, int num_items)
3981{
3982 return (root->leafsize + root->nodesize * (BTRFS_MAX_LEVEL - 1)) *
3983 3 * num_items;
3984}
6a63209f 3985
fcb80c2a
JB
3986int btrfs_truncate_reserve_metadata(struct btrfs_trans_handle *trans,
3987 struct btrfs_root *root,
3988 struct btrfs_block_rsv *rsv)
3989{
3990 struct btrfs_block_rsv *trans_rsv = &root->fs_info->trans_block_rsv;
3991 u64 num_bytes;
3992 int ret;
3993
3994 /*
3995 * Truncate should be freeing data, but give us 2 items just in case it
3996 * needs to use some space. We may want to be smarter about this in the
3997 * future.
3998 */
3999 num_bytes = calc_trans_metadata_size(root, 2);
4000
4001 /* We already have enough bytes, just return */
4002 if (rsv->reserved >= num_bytes)
4003 return 0;
4004
4005 num_bytes -= rsv->reserved;
4006
4007 /*
4008 * You should have reserved enough space before hand to do this, so this
4009 * should not fail.
4010 */
4011 ret = block_rsv_migrate_bytes(trans_rsv, rsv, num_bytes);
4012 BUG_ON(ret);
4013
4014 return 0;
4015}
4016
a22285a6
YZ
4017int btrfs_trans_reserve_metadata(struct btrfs_trans_handle *trans,
4018 struct btrfs_root *root,
8bb8ab2e 4019 int num_items)
a22285a6
YZ
4020{
4021 u64 num_bytes;
4022 int ret;
6a63209f 4023
a22285a6
YZ
4024 if (num_items == 0 || root->fs_info->chunk_root == root)
4025 return 0;
6a63209f 4026
a22285a6
YZ
4027 num_bytes = calc_trans_metadata_size(root, num_items);
4028 ret = btrfs_block_rsv_add(trans, root, &root->fs_info->trans_block_rsv,
8bb8ab2e 4029 num_bytes);
a22285a6
YZ
4030 if (!ret) {
4031 trans->bytes_reserved += num_bytes;
4032 trans->block_rsv = &root->fs_info->trans_block_rsv;
4033 }
4034 return ret;
6a63209f
JB
4035}
4036
a22285a6
YZ
4037void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
4038 struct btrfs_root *root)
6a63209f 4039{
a22285a6
YZ
4040 if (!trans->bytes_reserved)
4041 return;
6a63209f 4042
a22285a6
YZ
4043 BUG_ON(trans->block_rsv != &root->fs_info->trans_block_rsv);
4044 btrfs_block_rsv_release(root, trans->block_rsv,
4045 trans->bytes_reserved);
4046 trans->bytes_reserved = 0;
4047}
6a63209f 4048
d68fc57b
YZ
4049int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
4050 struct inode *inode)
4051{
4052 struct btrfs_root *root = BTRFS_I(inode)->root;
4053 struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
4054 struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv;
4055
4056 /*
fcb80c2a
JB
4057 * We need to hold space in order to delete our orphan item once we've
4058 * added it, so this takes the reservation so we can release it later
4059 * when we are truly done with the orphan item.
d68fc57b 4060 */
fcb80c2a 4061 u64 num_bytes = calc_trans_metadata_size(root, 1);
d68fc57b 4062 return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
6a63209f
JB
4063}
4064
d68fc57b 4065void btrfs_orphan_release_metadata(struct inode *inode)
97e728d4 4066{
d68fc57b 4067 struct btrfs_root *root = BTRFS_I(inode)->root;
fcb80c2a 4068 u64 num_bytes = calc_trans_metadata_size(root, 1);
d68fc57b
YZ
4069 btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes);
4070}
97e728d4 4071
a22285a6
YZ
4072int btrfs_snap_reserve_metadata(struct btrfs_trans_handle *trans,
4073 struct btrfs_pending_snapshot *pending)
4074{
4075 struct btrfs_root *root = pending->root;
4076 struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
4077 struct btrfs_block_rsv *dst_rsv = &pending->block_rsv;
4078 /*
4079 * two for root back/forward refs, two for directory entries
4080 * and one for root of the snapshot.
4081 */
4082 u64 num_bytes = calc_trans_metadata_size(root, 5);
4083 dst_rsv->space_info = src_rsv->space_info;
4084 return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
97e728d4
JB
4085}
4086
0ca1f7ce 4087static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes)
6324fbf3 4088{
0ca1f7ce
YZ
4089 return num_bytes >>= 3;
4090}
c146afad 4091
0ca1f7ce
YZ
4092int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
4093{
4094 struct btrfs_root *root = BTRFS_I(inode)->root;
4095 struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
4096 u64 to_reserve;
4097 int nr_extents;
57a45ced 4098 int reserved_extents;
0ca1f7ce 4099 int ret;
6324fbf3 4100
0ca1f7ce
YZ
4101 if (btrfs_transaction_in_commit(root->fs_info))
4102 schedule_timeout(1);
ec44a35c 4103
0ca1f7ce 4104 num_bytes = ALIGN(num_bytes, root->sectorsize);
8bb8ab2e 4105
0ca1f7ce 4106 nr_extents = atomic_read(&BTRFS_I(inode)->outstanding_extents) + 1;
57a45ced
JB
4107 reserved_extents = atomic_read(&BTRFS_I(inode)->reserved_extents);
4108
4109 if (nr_extents > reserved_extents) {
4110 nr_extents -= reserved_extents;
0ca1f7ce
YZ
4111 to_reserve = calc_trans_metadata_size(root, nr_extents);
4112 } else {
4113 nr_extents = 0;
4114 to_reserve = 0;
593060d7 4115 }
57a45ced 4116
0ca1f7ce 4117 to_reserve += calc_csum_metadata_size(inode, num_bytes);
8bb8ab2e
JB
4118 ret = reserve_metadata_bytes(NULL, root, block_rsv, to_reserve, 1);
4119 if (ret)
0ca1f7ce 4120 return ret;
6324fbf3 4121
57a45ced 4122 atomic_add(nr_extents, &BTRFS_I(inode)->reserved_extents);
0ca1f7ce 4123 atomic_inc(&BTRFS_I(inode)->outstanding_extents);
25179201 4124
0ca1f7ce
YZ
4125 block_rsv_add_bytes(block_rsv, to_reserve, 1);
4126
4127 if (block_rsv->size > 512 * 1024 * 1024)
0019f10d 4128 shrink_delalloc(NULL, root, to_reserve, 0);
0ca1f7ce
YZ
4129
4130 return 0;
4131}
4132
4133void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
4134{
4135 struct btrfs_root *root = BTRFS_I(inode)->root;
4136 u64 to_free;
4137 int nr_extents;
57a45ced 4138 int reserved_extents;
0ca1f7ce
YZ
4139
4140 num_bytes = ALIGN(num_bytes, root->sectorsize);
4141 atomic_dec(&BTRFS_I(inode)->outstanding_extents);
3c14874a 4142 WARN_ON(atomic_read(&BTRFS_I(inode)->outstanding_extents) < 0);
0ca1f7ce 4143
57a45ced
JB
4144 reserved_extents = atomic_read(&BTRFS_I(inode)->reserved_extents);
4145 do {
4146 int old, new;
4147
4148 nr_extents = atomic_read(&BTRFS_I(inode)->outstanding_extents);
4149 if (nr_extents >= reserved_extents) {
4150 nr_extents = 0;
4151 break;
4152 }
4153 old = reserved_extents;
4154 nr_extents = reserved_extents - nr_extents;
4155 new = reserved_extents - nr_extents;
4156 old = atomic_cmpxchg(&BTRFS_I(inode)->reserved_extents,
4157 reserved_extents, new);
4158 if (likely(old == reserved_extents))
4159 break;
4160 reserved_extents = old;
4161 } while (1);
97e728d4 4162
0ca1f7ce
YZ
4163 to_free = calc_csum_metadata_size(inode, num_bytes);
4164 if (nr_extents > 0)
4165 to_free += calc_trans_metadata_size(root, nr_extents);
4166
4167 btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv,
4168 to_free);
4169}
4170
4171int btrfs_delalloc_reserve_space(struct inode *inode, u64 num_bytes)
4172{
4173 int ret;
4174
4175 ret = btrfs_check_data_free_space(inode, num_bytes);
d397712b 4176 if (ret)
0ca1f7ce
YZ
4177 return ret;
4178
4179 ret = btrfs_delalloc_reserve_metadata(inode, num_bytes);
4180 if (ret) {
4181 btrfs_free_reserved_data_space(inode, num_bytes);
4182 return ret;
4183 }
4184
4185 return 0;
4186}
4187
4188void btrfs_delalloc_release_space(struct inode *inode, u64 num_bytes)
4189{
4190 btrfs_delalloc_release_metadata(inode, num_bytes);
4191 btrfs_free_reserved_data_space(inode, num_bytes);
6324fbf3
CM
4192}
4193
9078a3e1
CM
4194static int update_block_group(struct btrfs_trans_handle *trans,
4195 struct btrfs_root *root,
f0486c68 4196 u64 bytenr, u64 num_bytes, int alloc)
9078a3e1 4197{
0af3d00b 4198 struct btrfs_block_group_cache *cache = NULL;
9078a3e1 4199 struct btrfs_fs_info *info = root->fs_info;
db94535d 4200 u64 total = num_bytes;
9078a3e1 4201 u64 old_val;
db94535d 4202 u64 byte_in_group;
0af3d00b 4203 int factor;
3e1ad54f 4204
5d4f98a2
YZ
4205 /* block accounting for super block */
4206 spin_lock(&info->delalloc_lock);
4207 old_val = btrfs_super_bytes_used(&info->super_copy);
4208 if (alloc)
4209 old_val += num_bytes;
4210 else
4211 old_val -= num_bytes;
4212 btrfs_set_super_bytes_used(&info->super_copy, old_val);
5d4f98a2
YZ
4213 spin_unlock(&info->delalloc_lock);
4214
d397712b 4215 while (total) {
db94535d 4216 cache = btrfs_lookup_block_group(info, bytenr);
f3465ca4 4217 if (!cache)
9078a3e1 4218 return -1;
b742bb82
YZ
4219 if (cache->flags & (BTRFS_BLOCK_GROUP_DUP |
4220 BTRFS_BLOCK_GROUP_RAID1 |
4221 BTRFS_BLOCK_GROUP_RAID10))
4222 factor = 2;
4223 else
4224 factor = 1;
9d66e233
JB
4225 /*
4226 * If this block group has free space cache written out, we
4227 * need to make sure to load it if we are removing space. This
4228 * is because we need the unpinning stage to actually add the
4229 * space back to the block group, otherwise we will leak space.
4230 */
4231 if (!alloc && cache->cached == BTRFS_CACHE_NO)
b8399dee 4232 cache_block_group(cache, trans, NULL, 1);
0af3d00b 4233
db94535d
CM
4234 byte_in_group = bytenr - cache->key.objectid;
4235 WARN_ON(byte_in_group > cache->key.offset);
9078a3e1 4236
25179201 4237 spin_lock(&cache->space_info->lock);
c286ac48 4238 spin_lock(&cache->lock);
0af3d00b
JB
4239
4240 if (btrfs_super_cache_generation(&info->super_copy) != 0 &&
4241 cache->disk_cache_state < BTRFS_DC_CLEAR)
4242 cache->disk_cache_state = BTRFS_DC_CLEAR;
4243
0f9dd46c 4244 cache->dirty = 1;
9078a3e1 4245 old_val = btrfs_block_group_used(&cache->item);
db94535d 4246 num_bytes = min(total, cache->key.offset - byte_in_group);
cd1bc465 4247 if (alloc) {
db94535d 4248 old_val += num_bytes;
11833d66
YZ
4249 btrfs_set_block_group_used(&cache->item, old_val);
4250 cache->reserved -= num_bytes;
11833d66 4251 cache->space_info->bytes_reserved -= num_bytes;
36e39c40 4252 cache->space_info->reservation_progress++;
b742bb82
YZ
4253 cache->space_info->bytes_used += num_bytes;
4254 cache->space_info->disk_used += num_bytes * factor;
c286ac48 4255 spin_unlock(&cache->lock);
25179201 4256 spin_unlock(&cache->space_info->lock);
cd1bc465 4257 } else {
db94535d 4258 old_val -= num_bytes;
c286ac48 4259 btrfs_set_block_group_used(&cache->item, old_val);
f0486c68
YZ
4260 cache->pinned += num_bytes;
4261 cache->space_info->bytes_pinned += num_bytes;
6324fbf3 4262 cache->space_info->bytes_used -= num_bytes;
b742bb82 4263 cache->space_info->disk_used -= num_bytes * factor;
c286ac48 4264 spin_unlock(&cache->lock);
25179201 4265 spin_unlock(&cache->space_info->lock);
1f3c79a2 4266
f0486c68
YZ
4267 set_extent_dirty(info->pinned_extents,
4268 bytenr, bytenr + num_bytes - 1,
4269 GFP_NOFS | __GFP_NOFAIL);
cd1bc465 4270 }
fa9c0d79 4271 btrfs_put_block_group(cache);
db94535d
CM
4272 total -= num_bytes;
4273 bytenr += num_bytes;
9078a3e1
CM
4274 }
4275 return 0;
4276}
6324fbf3 4277
a061fc8d
CM
4278static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
4279{
0f9dd46c 4280 struct btrfs_block_group_cache *cache;
d2fb3437 4281 u64 bytenr;
0f9dd46c
JB
4282
4283 cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
4284 if (!cache)
a061fc8d 4285 return 0;
0f9dd46c 4286
d2fb3437 4287 bytenr = cache->key.objectid;
fa9c0d79 4288 btrfs_put_block_group(cache);
d2fb3437
YZ
4289
4290 return bytenr;
a061fc8d
CM
4291}
4292
f0486c68
YZ
4293static int pin_down_extent(struct btrfs_root *root,
4294 struct btrfs_block_group_cache *cache,
4295 u64 bytenr, u64 num_bytes, int reserved)
324ae4df 4296{
11833d66
YZ
4297 spin_lock(&cache->space_info->lock);
4298 spin_lock(&cache->lock);
4299 cache->pinned += num_bytes;
4300 cache->space_info->bytes_pinned += num_bytes;
4301 if (reserved) {
4302 cache->reserved -= num_bytes;
4303 cache->space_info->bytes_reserved -= num_bytes;
36e39c40 4304 cache->space_info->reservation_progress++;
11833d66
YZ
4305 }
4306 spin_unlock(&cache->lock);
4307 spin_unlock(&cache->space_info->lock);
68b38550 4308
f0486c68
YZ
4309 set_extent_dirty(root->fs_info->pinned_extents, bytenr,
4310 bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
4311 return 0;
4312}
68b38550 4313
f0486c68
YZ
4314/*
4315 * this function must be called within transaction
4316 */
4317int btrfs_pin_extent(struct btrfs_root *root,
4318 u64 bytenr, u64 num_bytes, int reserved)
4319{
4320 struct btrfs_block_group_cache *cache;
68b38550 4321
f0486c68
YZ
4322 cache = btrfs_lookup_block_group(root->fs_info, bytenr);
4323 BUG_ON(!cache);
4324
4325 pin_down_extent(root, cache, bytenr, num_bytes, reserved);
4326
4327 btrfs_put_block_group(cache);
11833d66
YZ
4328 return 0;
4329}
4330
f0486c68
YZ
4331/*
4332 * update size of reserved extents. this function may return -EAGAIN
4333 * if 'reserve' is true or 'sinfo' is false.
4334 */
b4d00d56
LD
4335int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
4336 u64 num_bytes, int reserve, int sinfo)
11833d66 4337{
f0486c68
YZ
4338 int ret = 0;
4339 if (sinfo) {
4340 struct btrfs_space_info *space_info = cache->space_info;
4341 spin_lock(&space_info->lock);
4342 spin_lock(&cache->lock);
4343 if (reserve) {
4344 if (cache->ro) {
4345 ret = -EAGAIN;
4346 } else {
4347 cache->reserved += num_bytes;
4348 space_info->bytes_reserved += num_bytes;
4349 }
4350 } else {
4351 if (cache->ro)
4352 space_info->bytes_readonly += num_bytes;
4353 cache->reserved -= num_bytes;
4354 space_info->bytes_reserved -= num_bytes;
36e39c40 4355 space_info->reservation_progress++;
f0486c68
YZ
4356 }
4357 spin_unlock(&cache->lock);
4358 spin_unlock(&space_info->lock);
11833d66 4359 } else {
f0486c68
YZ
4360 spin_lock(&cache->lock);
4361 if (cache->ro) {
4362 ret = -EAGAIN;
4363 } else {
4364 if (reserve)
4365 cache->reserved += num_bytes;
4366 else
4367 cache->reserved -= num_bytes;
4368 }
4369 spin_unlock(&cache->lock);
324ae4df 4370 }
f0486c68 4371 return ret;
324ae4df 4372}
9078a3e1 4373
11833d66
YZ
4374int btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
4375 struct btrfs_root *root)
e8569813 4376{
e8569813 4377 struct btrfs_fs_info *fs_info = root->fs_info;
11833d66
YZ
4378 struct btrfs_caching_control *next;
4379 struct btrfs_caching_control *caching_ctl;
4380 struct btrfs_block_group_cache *cache;
e8569813 4381
11833d66 4382 down_write(&fs_info->extent_commit_sem);
25179201 4383
11833d66
YZ
4384 list_for_each_entry_safe(caching_ctl, next,
4385 &fs_info->caching_block_groups, list) {
4386 cache = caching_ctl->block_group;
4387 if (block_group_cache_done(cache)) {
4388 cache->last_byte_to_unpin = (u64)-1;
4389 list_del_init(&caching_ctl->list);
4390 put_caching_control(caching_ctl);
e8569813 4391 } else {
11833d66 4392 cache->last_byte_to_unpin = caching_ctl->progress;
e8569813 4393 }
e8569813 4394 }
11833d66
YZ
4395
4396 if (fs_info->pinned_extents == &fs_info->freed_extents[0])
4397 fs_info->pinned_extents = &fs_info->freed_extents[1];
4398 else
4399 fs_info->pinned_extents = &fs_info->freed_extents[0];
4400
4401 up_write(&fs_info->extent_commit_sem);
8929ecfa
YZ
4402
4403 update_global_block_rsv(fs_info);
e8569813
ZY
4404 return 0;
4405}
4406
11833d66 4407static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
ccd467d6 4408{
11833d66
YZ
4409 struct btrfs_fs_info *fs_info = root->fs_info;
4410 struct btrfs_block_group_cache *cache = NULL;
4411 u64 len;
ccd467d6 4412
11833d66
YZ
4413 while (start <= end) {
4414 if (!cache ||
4415 start >= cache->key.objectid + cache->key.offset) {
4416 if (cache)
4417 btrfs_put_block_group(cache);
4418 cache = btrfs_lookup_block_group(fs_info, start);
4419 BUG_ON(!cache);
4420 }
4421
4422 len = cache->key.objectid + cache->key.offset - start;
4423 len = min(len, end + 1 - start);
4424
4425 if (start < cache->last_byte_to_unpin) {
4426 len = min(len, cache->last_byte_to_unpin - start);
4427 btrfs_add_free_space(cache, start, len);
4428 }
4429
f0486c68
YZ
4430 start += len;
4431
11833d66
YZ
4432 spin_lock(&cache->space_info->lock);
4433 spin_lock(&cache->lock);
4434 cache->pinned -= len;
4435 cache->space_info->bytes_pinned -= len;
f0486c68
YZ
4436 if (cache->ro) {
4437 cache->space_info->bytes_readonly += len;
4438 } else if (cache->reserved_pinned > 0) {
4439 len = min(len, cache->reserved_pinned);
4440 cache->reserved_pinned -= len;
4441 cache->space_info->bytes_reserved += len;
4442 }
11833d66
YZ
4443 spin_unlock(&cache->lock);
4444 spin_unlock(&cache->space_info->lock);
ccd467d6 4445 }
11833d66
YZ
4446
4447 if (cache)
4448 btrfs_put_block_group(cache);
ccd467d6
CM
4449 return 0;
4450}
4451
4452int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
11833d66 4453 struct btrfs_root *root)
a28ec197 4454{
11833d66
YZ
4455 struct btrfs_fs_info *fs_info = root->fs_info;
4456 struct extent_io_tree *unpin;
f0486c68
YZ
4457 struct btrfs_block_rsv *block_rsv;
4458 struct btrfs_block_rsv *next_rsv;
1a5bc167
CM
4459 u64 start;
4460 u64 end;
f0486c68 4461 int idx;
a28ec197 4462 int ret;
a28ec197 4463
11833d66
YZ
4464 if (fs_info->pinned_extents == &fs_info->freed_extents[0])
4465 unpin = &fs_info->freed_extents[1];
4466 else
4467 unpin = &fs_info->freed_extents[0];
4468
d397712b 4469 while (1) {
1a5bc167
CM
4470 ret = find_first_extent_bit(unpin, 0, &start, &end,
4471 EXTENT_DIRTY);
4472 if (ret)
a28ec197 4473 break;
1f3c79a2 4474
5378e607
LD
4475 if (btrfs_test_opt(root, DISCARD))
4476 ret = btrfs_discard_extent(root, start,
4477 end + 1 - start, NULL);
1f3c79a2 4478
1a5bc167 4479 clear_extent_dirty(unpin, start, end, GFP_NOFS);
11833d66 4480 unpin_extent_range(root, start, end);
b9473439 4481 cond_resched();
a28ec197 4482 }
817d52f8 4483
f0486c68
YZ
4484 mutex_lock(&fs_info->durable_block_rsv_mutex);
4485 list_for_each_entry_safe(block_rsv, next_rsv,
4486 &fs_info->durable_block_rsv_list, list) {
444528b3 4487
f0486c68
YZ
4488 idx = trans->transid & 0x1;
4489 if (block_rsv->freed[idx] > 0) {
4490 block_rsv_add_bytes(block_rsv,
4491 block_rsv->freed[idx], 0);
4492 block_rsv->freed[idx] = 0;
4493 }
4494 if (atomic_read(&block_rsv->usage) == 0) {
4495 btrfs_block_rsv_release(root, block_rsv, (u64)-1);
31840ae1 4496
f0486c68
YZ
4497 if (block_rsv->freed[0] == 0 &&
4498 block_rsv->freed[1] == 0) {
4499 list_del_init(&block_rsv->list);
4500 kfree(block_rsv);
4501 }
4502 } else {
4503 btrfs_block_rsv_release(root, block_rsv, 0);
8ef97622 4504 }
f4b9aa8d 4505 }
f0486c68 4506 mutex_unlock(&fs_info->durable_block_rsv_mutex);
31840ae1 4507
e20d96d6
CM
4508 return 0;
4509}
4510
5d4f98a2
YZ
4511static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
4512 struct btrfs_root *root,
4513 u64 bytenr, u64 num_bytes, u64 parent,
4514 u64 root_objectid, u64 owner_objectid,
4515 u64 owner_offset, int refs_to_drop,
4516 struct btrfs_delayed_extent_op *extent_op)
a28ec197 4517{
e2fa7227 4518 struct btrfs_key key;
5d4f98a2 4519 struct btrfs_path *path;
1261ec42
CM
4520 struct btrfs_fs_info *info = root->fs_info;
4521 struct btrfs_root *extent_root = info->extent_root;
5f39d397 4522 struct extent_buffer *leaf;
5d4f98a2
YZ
4523 struct btrfs_extent_item *ei;
4524 struct btrfs_extent_inline_ref *iref;
a28ec197 4525 int ret;
5d4f98a2 4526 int is_data;
952fccac
CM
4527 int extent_slot = 0;
4528 int found_extent = 0;
4529 int num_to_del = 1;
5d4f98a2
YZ
4530 u32 item_size;
4531 u64 refs;
037e6390 4532
5caf2a00 4533 path = btrfs_alloc_path();
54aa1f4d
CM
4534 if (!path)
4535 return -ENOMEM;
5f26f772 4536
3c12ac72 4537 path->reada = 1;
b9473439 4538 path->leave_spinning = 1;
5d4f98a2
YZ
4539
4540 is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
4541 BUG_ON(!is_data && refs_to_drop != 1);
4542
4543 ret = lookup_extent_backref(trans, extent_root, path, &iref,
4544 bytenr, num_bytes, parent,
4545 root_objectid, owner_objectid,
4546 owner_offset);
7bb86316 4547 if (ret == 0) {
952fccac 4548 extent_slot = path->slots[0];
5d4f98a2
YZ
4549 while (extent_slot >= 0) {
4550 btrfs_item_key_to_cpu(path->nodes[0], &key,
952fccac 4551 extent_slot);
5d4f98a2 4552 if (key.objectid != bytenr)
952fccac 4553 break;
5d4f98a2
YZ
4554 if (key.type == BTRFS_EXTENT_ITEM_KEY &&
4555 key.offset == num_bytes) {
952fccac
CM
4556 found_extent = 1;
4557 break;
4558 }
4559 if (path->slots[0] - extent_slot > 5)
4560 break;
5d4f98a2 4561 extent_slot--;
952fccac 4562 }
5d4f98a2
YZ
4563#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
4564 item_size = btrfs_item_size_nr(path->nodes[0], extent_slot);
4565 if (found_extent && item_size < sizeof(*ei))
4566 found_extent = 0;
4567#endif
31840ae1 4568 if (!found_extent) {
5d4f98a2 4569 BUG_ON(iref);
56bec294 4570 ret = remove_extent_backref(trans, extent_root, path,
5d4f98a2
YZ
4571 NULL, refs_to_drop,
4572 is_data);
31840ae1
ZY
4573 BUG_ON(ret);
4574 btrfs_release_path(extent_root, path);
b9473439 4575 path->leave_spinning = 1;
5d4f98a2
YZ
4576
4577 key.objectid = bytenr;
4578 key.type = BTRFS_EXTENT_ITEM_KEY;
4579 key.offset = num_bytes;
4580
31840ae1
ZY
4581 ret = btrfs_search_slot(trans, extent_root,
4582 &key, path, -1, 1);
f3465ca4
JB
4583 if (ret) {
4584 printk(KERN_ERR "umm, got %d back from search"
d397712b
CM
4585 ", was looking for %llu\n", ret,
4586 (unsigned long long)bytenr);
f3465ca4
JB
4587 btrfs_print_leaf(extent_root, path->nodes[0]);
4588 }
31840ae1
ZY
4589 BUG_ON(ret);
4590 extent_slot = path->slots[0];
4591 }
7bb86316
CM
4592 } else {
4593 btrfs_print_leaf(extent_root, path->nodes[0]);
4594 WARN_ON(1);
d397712b 4595 printk(KERN_ERR "btrfs unable to find ref byte nr %llu "
5d4f98a2 4596 "parent %llu root %llu owner %llu offset %llu\n",
d397712b 4597 (unsigned long long)bytenr,
56bec294 4598 (unsigned long long)parent,
d397712b 4599 (unsigned long long)root_objectid,
5d4f98a2
YZ
4600 (unsigned long long)owner_objectid,
4601 (unsigned long long)owner_offset);
7bb86316 4602 }
5f39d397
CM
4603
4604 leaf = path->nodes[0];
5d4f98a2
YZ
4605 item_size = btrfs_item_size_nr(leaf, extent_slot);
4606#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
4607 if (item_size < sizeof(*ei)) {
4608 BUG_ON(found_extent || extent_slot != path->slots[0]);
4609 ret = convert_extent_item_v0(trans, extent_root, path,
4610 owner_objectid, 0);
4611 BUG_ON(ret < 0);
4612
4613 btrfs_release_path(extent_root, path);
4614 path->leave_spinning = 1;
4615
4616 key.objectid = bytenr;
4617 key.type = BTRFS_EXTENT_ITEM_KEY;
4618 key.offset = num_bytes;
4619
4620 ret = btrfs_search_slot(trans, extent_root, &key, path,
4621 -1, 1);
4622 if (ret) {
4623 printk(KERN_ERR "umm, got %d back from search"
4624 ", was looking for %llu\n", ret,
4625 (unsigned long long)bytenr);
4626 btrfs_print_leaf(extent_root, path->nodes[0]);
4627 }
4628 BUG_ON(ret);
4629 extent_slot = path->slots[0];
4630 leaf = path->nodes[0];
4631 item_size = btrfs_item_size_nr(leaf, extent_slot);
4632 }
4633#endif
4634 BUG_ON(item_size < sizeof(*ei));
952fccac 4635 ei = btrfs_item_ptr(leaf, extent_slot,
123abc88 4636 struct btrfs_extent_item);
5d4f98a2
YZ
4637 if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID) {
4638 struct btrfs_tree_block_info *bi;
4639 BUG_ON(item_size < sizeof(*ei) + sizeof(*bi));
4640 bi = (struct btrfs_tree_block_info *)(ei + 1);
4641 WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
4642 }
56bec294 4643
5d4f98a2 4644 refs = btrfs_extent_refs(leaf, ei);
56bec294
CM
4645 BUG_ON(refs < refs_to_drop);
4646 refs -= refs_to_drop;
5f39d397 4647
5d4f98a2
YZ
4648 if (refs > 0) {
4649 if (extent_op)
4650 __run_delayed_extent_op(extent_op, leaf, ei);
4651 /*
4652 * In the case of inline back ref, reference count will
4653 * be updated by remove_extent_backref
952fccac 4654 */
5d4f98a2
YZ
4655 if (iref) {
4656 BUG_ON(!found_extent);
4657 } else {
4658 btrfs_set_extent_refs(leaf, ei, refs);
4659 btrfs_mark_buffer_dirty(leaf);
4660 }
4661 if (found_extent) {
4662 ret = remove_extent_backref(trans, extent_root, path,
4663 iref, refs_to_drop,
4664 is_data);
952fccac
CM
4665 BUG_ON(ret);
4666 }
5d4f98a2 4667 } else {
5d4f98a2
YZ
4668 if (found_extent) {
4669 BUG_ON(is_data && refs_to_drop !=
4670 extent_data_ref_count(root, path, iref));
4671 if (iref) {
4672 BUG_ON(path->slots[0] != extent_slot);
4673 } else {
4674 BUG_ON(path->slots[0] != extent_slot + 1);
4675 path->slots[0] = extent_slot;
4676 num_to_del = 2;
4677 }
78fae27e 4678 }
b9473439 4679
952fccac
CM
4680 ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
4681 num_to_del);
31840ae1 4682 BUG_ON(ret);
25179201 4683 btrfs_release_path(extent_root, path);
21af804c 4684
5d4f98a2 4685 if (is_data) {
459931ec
CM
4686 ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
4687 BUG_ON(ret);
d57e62b8
CM
4688 } else {
4689 invalidate_mapping_pages(info->btree_inode->i_mapping,
4690 bytenr >> PAGE_CACHE_SHIFT,
4691 (bytenr + num_bytes - 1) >> PAGE_CACHE_SHIFT);
459931ec
CM
4692 }
4693
f0486c68 4694 ret = update_block_group(trans, root, bytenr, num_bytes, 0);
dcbdd4dc 4695 BUG_ON(ret);
a28ec197 4696 }
5caf2a00 4697 btrfs_free_path(path);
a28ec197
CM
4698 return ret;
4699}
4700
1887be66 4701/*
f0486c68 4702 * when we free an block, it is possible (and likely) that we free the last
1887be66
CM
4703 * delayed ref for that extent as well. This searches the delayed ref tree for
4704 * a given extent, and if there are no other delayed refs to be processed, it
4705 * removes it from the tree.
4706 */
4707static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
4708 struct btrfs_root *root, u64 bytenr)
4709{
4710 struct btrfs_delayed_ref_head *head;
4711 struct btrfs_delayed_ref_root *delayed_refs;
4712 struct btrfs_delayed_ref_node *ref;
4713 struct rb_node *node;
f0486c68 4714 int ret = 0;
1887be66
CM
4715
4716 delayed_refs = &trans->transaction->delayed_refs;
4717 spin_lock(&delayed_refs->lock);
4718 head = btrfs_find_delayed_ref_head(trans, bytenr);
4719 if (!head)
4720 goto out;
4721
4722 node = rb_prev(&head->node.rb_node);
4723 if (!node)
4724 goto out;
4725
4726 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
4727
4728 /* there are still entries for this ref, we can't drop it */
4729 if (ref->bytenr == bytenr)
4730 goto out;
4731
5d4f98a2
YZ
4732 if (head->extent_op) {
4733 if (!head->must_insert_reserved)
4734 goto out;
4735 kfree(head->extent_op);
4736 head->extent_op = NULL;
4737 }
4738
1887be66
CM
4739 /*
4740 * waiting for the lock here would deadlock. If someone else has it
4741 * locked they are already in the process of dropping it anyway
4742 */
4743 if (!mutex_trylock(&head->mutex))
4744 goto out;
4745
4746 /*
4747 * at this point we have a head with no other entries. Go
4748 * ahead and process it.
4749 */
4750 head->node.in_tree = 0;
4751 rb_erase(&head->node.rb_node, &delayed_refs->root);
c3e69d58 4752
1887be66
CM
4753 delayed_refs->num_entries--;
4754
4755 /*
4756 * we don't take a ref on the node because we're removing it from the
4757 * tree, so we just steal the ref the tree was holding.
4758 */
c3e69d58
CM
4759 delayed_refs->num_heads--;
4760 if (list_empty(&head->cluster))
4761 delayed_refs->num_heads_ready--;
4762
4763 list_del_init(&head->cluster);
1887be66
CM
4764 spin_unlock(&delayed_refs->lock);
4765
f0486c68
YZ
4766 BUG_ON(head->extent_op);
4767 if (head->must_insert_reserved)
4768 ret = 1;
4769
4770 mutex_unlock(&head->mutex);
1887be66 4771 btrfs_put_delayed_ref(&head->node);
f0486c68 4772 return ret;
1887be66
CM
4773out:
4774 spin_unlock(&delayed_refs->lock);
4775 return 0;
4776}
4777
f0486c68
YZ
4778void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
4779 struct btrfs_root *root,
4780 struct extent_buffer *buf,
4781 u64 parent, int last_ref)
4782{
4783 struct btrfs_block_rsv *block_rsv;
4784 struct btrfs_block_group_cache *cache = NULL;
4785 int ret;
4786
4787 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
4788 ret = btrfs_add_delayed_tree_ref(trans, buf->start, buf->len,
4789 parent, root->root_key.objectid,
4790 btrfs_header_level(buf),
4791 BTRFS_DROP_DELAYED_REF, NULL);
4792 BUG_ON(ret);
4793 }
4794
4795 if (!last_ref)
4796 return;
4797
4798 block_rsv = get_block_rsv(trans, root);
4799 cache = btrfs_lookup_block_group(root->fs_info, buf->start);
3bf84a5a
YZ
4800 if (block_rsv->space_info != cache->space_info)
4801 goto out;
f0486c68
YZ
4802
4803 if (btrfs_header_generation(buf) == trans->transid) {
4804 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
4805 ret = check_ref_cleanup(trans, root, buf->start);
4806 if (!ret)
4807 goto pin;
4808 }
4809
4810 if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
4811 pin_down_extent(root, cache, buf->start, buf->len, 1);
4812 goto pin;
4813 }
4814
4815 WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
4816
4817 btrfs_add_free_space(cache, buf->start, buf->len);
b4d00d56 4818 ret = btrfs_update_reserved_bytes(cache, buf->len, 0, 0);
f0486c68
YZ
4819 if (ret == -EAGAIN) {
4820 /* block group became read-only */
b4d00d56 4821 btrfs_update_reserved_bytes(cache, buf->len, 0, 1);
f0486c68
YZ
4822 goto out;
4823 }
4824
4825 ret = 1;
4826 spin_lock(&block_rsv->lock);
4827 if (block_rsv->reserved < block_rsv->size) {
4828 block_rsv->reserved += buf->len;
4829 ret = 0;
4830 }
4831 spin_unlock(&block_rsv->lock);
4832
4833 if (ret) {
4834 spin_lock(&cache->space_info->lock);
4835 cache->space_info->bytes_reserved -= buf->len;
36e39c40 4836 cache->space_info->reservation_progress++;
f0486c68
YZ
4837 spin_unlock(&cache->space_info->lock);
4838 }
4839 goto out;
4840 }
4841pin:
4842 if (block_rsv->durable && !cache->ro) {
4843 ret = 0;
4844 spin_lock(&cache->lock);
4845 if (!cache->ro) {
4846 cache->reserved_pinned += buf->len;
4847 ret = 1;
4848 }
4849 spin_unlock(&cache->lock);
4850
4851 if (ret) {
4852 spin_lock(&block_rsv->lock);
4853 block_rsv->freed[trans->transid & 0x1] += buf->len;
4854 spin_unlock(&block_rsv->lock);
4855 }
4856 }
4857out:
a826d6dc
JB
4858 /*
4859 * Deleting the buffer, clear the corrupt flag since it doesn't matter
4860 * anymore.
4861 */
4862 clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags);
f0486c68
YZ
4863 btrfs_put_block_group(cache);
4864}
4865
925baedd 4866int btrfs_free_extent(struct btrfs_trans_handle *trans,
31840ae1
ZY
4867 struct btrfs_root *root,
4868 u64 bytenr, u64 num_bytes, u64 parent,
5d4f98a2 4869 u64 root_objectid, u64 owner, u64 offset)
925baedd
CM
4870{
4871 int ret;
4872
56bec294
CM
4873 /*
4874 * tree log blocks never actually go into the extent allocation
4875 * tree, just update pinning info and exit early.
56bec294 4876 */
5d4f98a2
YZ
4877 if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
4878 WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
b9473439 4879 /* unlocks the pinned mutex */
11833d66 4880 btrfs_pin_extent(root, bytenr, num_bytes, 1);
56bec294 4881 ret = 0;
5d4f98a2
YZ
4882 } else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
4883 ret = btrfs_add_delayed_tree_ref(trans, bytenr, num_bytes,
4884 parent, root_objectid, (int)owner,
4885 BTRFS_DROP_DELAYED_REF, NULL);
1887be66 4886 BUG_ON(ret);
5d4f98a2
YZ
4887 } else {
4888 ret = btrfs_add_delayed_data_ref(trans, bytenr, num_bytes,
4889 parent, root_objectid, owner,
4890 offset, BTRFS_DROP_DELAYED_REF, NULL);
4891 BUG_ON(ret);
56bec294 4892 }
925baedd
CM
4893 return ret;
4894}
4895
87ee04eb
CM
4896static u64 stripe_align(struct btrfs_root *root, u64 val)
4897{
4898 u64 mask = ((u64)root->stripesize - 1);
4899 u64 ret = (val + mask) & ~mask;
4900 return ret;
4901}
4902
817d52f8
JB
4903/*
4904 * when we wait for progress in the block group caching, its because
4905 * our allocation attempt failed at least once. So, we must sleep
4906 * and let some progress happen before we try again.
4907 *
4908 * This function will sleep at least once waiting for new free space to
4909 * show up, and then it will check the block group free space numbers
4910 * for our min num_bytes. Another option is to have it go ahead
4911 * and look in the rbtree for a free extent of a given size, but this
4912 * is a good start.
4913 */
4914static noinline int
4915wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
4916 u64 num_bytes)
4917{
11833d66 4918 struct btrfs_caching_control *caching_ctl;
817d52f8
JB
4919 DEFINE_WAIT(wait);
4920
11833d66
YZ
4921 caching_ctl = get_caching_control(cache);
4922 if (!caching_ctl)
817d52f8 4923 return 0;
817d52f8 4924
11833d66 4925 wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
817d52f8 4926 (cache->free_space >= num_bytes));
11833d66
YZ
4927
4928 put_caching_control(caching_ctl);
4929 return 0;
4930}
4931
4932static noinline int
4933wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
4934{
4935 struct btrfs_caching_control *caching_ctl;
4936 DEFINE_WAIT(wait);
4937
4938 caching_ctl = get_caching_control(cache);
4939 if (!caching_ctl)
4940 return 0;
4941
4942 wait_event(caching_ctl->wait, block_group_cache_done(cache));
4943
4944 put_caching_control(caching_ctl);
817d52f8
JB
4945 return 0;
4946}
4947
b742bb82
YZ
4948static int get_block_group_index(struct btrfs_block_group_cache *cache)
4949{
4950 int index;
4951 if (cache->flags & BTRFS_BLOCK_GROUP_RAID10)
4952 index = 0;
4953 else if (cache->flags & BTRFS_BLOCK_GROUP_RAID1)
4954 index = 1;
4955 else if (cache->flags & BTRFS_BLOCK_GROUP_DUP)
4956 index = 2;
4957 else if (cache->flags & BTRFS_BLOCK_GROUP_RAID0)
4958 index = 3;
4959 else
4960 index = 4;
4961 return index;
4962}
4963
817d52f8 4964enum btrfs_loop_type {
ccf0e725 4965 LOOP_FIND_IDEAL = 0,
817d52f8
JB
4966 LOOP_CACHING_NOWAIT = 1,
4967 LOOP_CACHING_WAIT = 2,
4968 LOOP_ALLOC_CHUNK = 3,
4969 LOOP_NO_EMPTY_SIZE = 4,
4970};
4971
fec577fb
CM
4972/*
4973 * walks the btree of allocated extents and find a hole of a given size.
4974 * The key ins is changed to record the hole:
4975 * ins->objectid == block start
62e2749e 4976 * ins->flags = BTRFS_EXTENT_ITEM_KEY
fec577fb
CM
4977 * ins->offset == number of blocks
4978 * Any available blocks before search_start are skipped.
4979 */
d397712b 4980static noinline int find_free_extent(struct btrfs_trans_handle *trans,
98ed5174
CM
4981 struct btrfs_root *orig_root,
4982 u64 num_bytes, u64 empty_size,
4983 u64 search_start, u64 search_end,
4984 u64 hint_byte, struct btrfs_key *ins,
98ed5174 4985 int data)
fec577fb 4986{
80eb234a 4987 int ret = 0;
d397712b 4988 struct btrfs_root *root = orig_root->fs_info->extent_root;
fa9c0d79 4989 struct btrfs_free_cluster *last_ptr = NULL;
80eb234a 4990 struct btrfs_block_group_cache *block_group = NULL;
239b14b3 4991 int empty_cluster = 2 * 1024 * 1024;
0ef3e66b 4992 int allowed_chunk_alloc = 0;
ccf0e725 4993 int done_chunk_alloc = 0;
80eb234a 4994 struct btrfs_space_info *space_info;
fa9c0d79
CM
4995 int last_ptr_loop = 0;
4996 int loop = 0;
f0486c68 4997 int index = 0;
817d52f8 4998 bool found_uncached_bg = false;
0a24325e 4999 bool failed_cluster_refill = false;
1cdda9b8 5000 bool failed_alloc = false;
67377734 5001 bool use_cluster = true;
ccf0e725
JB
5002 u64 ideal_cache_percent = 0;
5003 u64 ideal_cache_offset = 0;
fec577fb 5004
db94535d 5005 WARN_ON(num_bytes < root->sectorsize);
b1a4d965 5006 btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
80eb234a
JB
5007 ins->objectid = 0;
5008 ins->offset = 0;
b1a4d965 5009
2552d17e 5010 space_info = __find_space_info(root->fs_info, data);
1b1d1f66
JB
5011 if (!space_info) {
5012 printk(KERN_ERR "No space info for %d\n", data);
5013 return -ENOSPC;
5014 }
2552d17e 5015
67377734
JB
5016 /*
5017 * If the space info is for both data and metadata it means we have a
5018 * small filesystem and we can't use the clustering stuff.
5019 */
5020 if (btrfs_mixed_space_info(space_info))
5021 use_cluster = false;
5022
0ef3e66b
CM
5023 if (orig_root->ref_cows || empty_size)
5024 allowed_chunk_alloc = 1;
5025
67377734 5026 if (data & BTRFS_BLOCK_GROUP_METADATA && use_cluster) {
fa9c0d79 5027 last_ptr = &root->fs_info->meta_alloc_cluster;
536ac8ae
CM
5028 if (!btrfs_test_opt(root, SSD))
5029 empty_cluster = 64 * 1024;
239b14b3
CM
5030 }
5031
67377734
JB
5032 if ((data & BTRFS_BLOCK_GROUP_DATA) && use_cluster &&
5033 btrfs_test_opt(root, SSD)) {
fa9c0d79
CM
5034 last_ptr = &root->fs_info->data_alloc_cluster;
5035 }
0f9dd46c 5036
239b14b3 5037 if (last_ptr) {
fa9c0d79
CM
5038 spin_lock(&last_ptr->lock);
5039 if (last_ptr->block_group)
5040 hint_byte = last_ptr->window_start;
5041 spin_unlock(&last_ptr->lock);
239b14b3 5042 }
fa9c0d79 5043
a061fc8d 5044 search_start = max(search_start, first_logical_byte(root, 0));
239b14b3 5045 search_start = max(search_start, hint_byte);
0b86a832 5046
817d52f8 5047 if (!last_ptr)
fa9c0d79 5048 empty_cluster = 0;
fa9c0d79 5049
2552d17e 5050 if (search_start == hint_byte) {
ccf0e725 5051ideal_cache:
2552d17e
JB
5052 block_group = btrfs_lookup_block_group(root->fs_info,
5053 search_start);
817d52f8
JB
5054 /*
5055 * we don't want to use the block group if it doesn't match our
5056 * allocation bits, or if its not cached.
ccf0e725
JB
5057 *
5058 * However if we are re-searching with an ideal block group
5059 * picked out then we don't care that the block group is cached.
817d52f8
JB
5060 */
5061 if (block_group && block_group_bits(block_group, data) &&
ccf0e725
JB
5062 (block_group->cached != BTRFS_CACHE_NO ||
5063 search_start == ideal_cache_offset)) {
2552d17e 5064 down_read(&space_info->groups_sem);
44fb5511
CM
5065 if (list_empty(&block_group->list) ||
5066 block_group->ro) {
5067 /*
5068 * someone is removing this block group,
5069 * we can't jump into the have_block_group
5070 * target because our list pointers are not
5071 * valid
5072 */
5073 btrfs_put_block_group(block_group);
5074 up_read(&space_info->groups_sem);
ccf0e725 5075 } else {
b742bb82 5076 index = get_block_group_index(block_group);
44fb5511 5077 goto have_block_group;
ccf0e725 5078 }
2552d17e 5079 } else if (block_group) {
fa9c0d79 5080 btrfs_put_block_group(block_group);
2552d17e 5081 }
42e70e7a 5082 }
2552d17e 5083search:
80eb234a 5084 down_read(&space_info->groups_sem);
b742bb82
YZ
5085 list_for_each_entry(block_group, &space_info->block_groups[index],
5086 list) {
6226cb0a 5087 u64 offset;
817d52f8 5088 int cached;
8a1413a2 5089
11dfe35a 5090 btrfs_get_block_group(block_group);
2552d17e 5091 search_start = block_group->key.objectid;
42e70e7a 5092
83a50de9
CM
5093 /*
5094 * this can happen if we end up cycling through all the
5095 * raid types, but we want to make sure we only allocate
5096 * for the proper type.
5097 */
5098 if (!block_group_bits(block_group, data)) {
5099 u64 extra = BTRFS_BLOCK_GROUP_DUP |
5100 BTRFS_BLOCK_GROUP_RAID1 |
5101 BTRFS_BLOCK_GROUP_RAID10;
5102
5103 /*
5104 * if they asked for extra copies and this block group
5105 * doesn't provide them, bail. This does allow us to
5106 * fill raid0 from raid1.
5107 */
5108 if ((data & extra) && !(block_group->flags & extra))
5109 goto loop;
5110 }
5111
2552d17e 5112have_block_group:
817d52f8 5113 if (unlikely(block_group->cached == BTRFS_CACHE_NO)) {
ccf0e725
JB
5114 u64 free_percent;
5115
b8399dee
JB
5116 ret = cache_block_group(block_group, trans,
5117 orig_root, 1);
9d66e233
JB
5118 if (block_group->cached == BTRFS_CACHE_FINISHED)
5119 goto have_block_group;
5120
ccf0e725
JB
5121 free_percent = btrfs_block_group_used(&block_group->item);
5122 free_percent *= 100;
5123 free_percent = div64_u64(free_percent,
5124 block_group->key.offset);
5125 free_percent = 100 - free_percent;
5126 if (free_percent > ideal_cache_percent &&
5127 likely(!block_group->ro)) {
5128 ideal_cache_offset = block_group->key.objectid;
5129 ideal_cache_percent = free_percent;
5130 }
5131
817d52f8 5132 /*
ccf0e725
JB
5133 * We only want to start kthread caching if we are at
5134 * the point where we will wait for caching to make
5135 * progress, or if our ideal search is over and we've
5136 * found somebody to start caching.
817d52f8
JB
5137 */
5138 if (loop > LOOP_CACHING_NOWAIT ||
ccf0e725
JB
5139 (loop > LOOP_FIND_IDEAL &&
5140 atomic_read(&space_info->caching_threads) < 2)) {
b8399dee
JB
5141 ret = cache_block_group(block_group, trans,
5142 orig_root, 0);
817d52f8 5143 BUG_ON(ret);
2552d17e 5144 }
817d52f8
JB
5145 found_uncached_bg = true;
5146
ccf0e725
JB
5147 /*
5148 * If loop is set for cached only, try the next block
5149 * group.
5150 */
5151 if (loop == LOOP_FIND_IDEAL)
817d52f8
JB
5152 goto loop;
5153 }
5154
ccf0e725
JB
5155 cached = block_group_cache_done(block_group);
5156 if (unlikely(!cached))
5157 found_uncached_bg = true;
5158
ea6a478e 5159 if (unlikely(block_group->ro))
2552d17e 5160 goto loop;
0f9dd46c 5161
0a24325e
JB
5162 /*
5163 * Ok we want to try and use the cluster allocator, so lets look
5164 * there, unless we are on LOOP_NO_EMPTY_SIZE, since we will
5165 * have tried the cluster allocator plenty of times at this
5166 * point and not have found anything, so we are likely way too
5167 * fragmented for the clustering stuff to find anything, so lets
5168 * just skip it and let the allocator find whatever block it can
5169 * find
5170 */
5171 if (last_ptr && loop < LOOP_NO_EMPTY_SIZE) {
fa9c0d79
CM
5172 /*
5173 * the refill lock keeps out other
5174 * people trying to start a new cluster
5175 */
5176 spin_lock(&last_ptr->refill_lock);
44fb5511
CM
5177 if (last_ptr->block_group &&
5178 (last_ptr->block_group->ro ||
5179 !block_group_bits(last_ptr->block_group, data))) {
5180 offset = 0;
5181 goto refill_cluster;
5182 }
5183
fa9c0d79
CM
5184 offset = btrfs_alloc_from_cluster(block_group, last_ptr,
5185 num_bytes, search_start);
5186 if (offset) {
5187 /* we have a block, we're done */
5188 spin_unlock(&last_ptr->refill_lock);
5189 goto checks;
5190 }
5191
5192 spin_lock(&last_ptr->lock);
5193 /*
5194 * whoops, this cluster doesn't actually point to
5195 * this block group. Get a ref on the block
5196 * group is does point to and try again
5197 */
5198 if (!last_ptr_loop && last_ptr->block_group &&
5199 last_ptr->block_group != block_group) {
5200
5201 btrfs_put_block_group(block_group);
5202 block_group = last_ptr->block_group;
11dfe35a 5203 btrfs_get_block_group(block_group);
fa9c0d79
CM
5204 spin_unlock(&last_ptr->lock);
5205 spin_unlock(&last_ptr->refill_lock);
5206
5207 last_ptr_loop = 1;
5208 search_start = block_group->key.objectid;
44fb5511
CM
5209 /*
5210 * we know this block group is properly
5211 * in the list because
5212 * btrfs_remove_block_group, drops the
5213 * cluster before it removes the block
5214 * group from the list
5215 */
fa9c0d79
CM
5216 goto have_block_group;
5217 }
5218 spin_unlock(&last_ptr->lock);
44fb5511 5219refill_cluster:
fa9c0d79
CM
5220 /*
5221 * this cluster didn't work out, free it and
5222 * start over
5223 */
5224 btrfs_return_cluster_to_free_space(NULL, last_ptr);
5225
5226 last_ptr_loop = 0;
5227
5228 /* allocate a cluster in this block group */
451d7585 5229 ret = btrfs_find_space_cluster(trans, root,
fa9c0d79
CM
5230 block_group, last_ptr,
5231 offset, num_bytes,
5232 empty_cluster + empty_size);
5233 if (ret == 0) {
5234 /*
5235 * now pull our allocation out of this
5236 * cluster
5237 */
5238 offset = btrfs_alloc_from_cluster(block_group,
5239 last_ptr, num_bytes,
5240 search_start);
5241 if (offset) {
5242 /* we found one, proceed */
5243 spin_unlock(&last_ptr->refill_lock);
5244 goto checks;
5245 }
0a24325e
JB
5246 } else if (!cached && loop > LOOP_CACHING_NOWAIT
5247 && !failed_cluster_refill) {
817d52f8
JB
5248 spin_unlock(&last_ptr->refill_lock);
5249
0a24325e 5250 failed_cluster_refill = true;
817d52f8
JB
5251 wait_block_group_cache_progress(block_group,
5252 num_bytes + empty_cluster + empty_size);
5253 goto have_block_group;
fa9c0d79 5254 }
817d52f8 5255
fa9c0d79
CM
5256 /*
5257 * at this point we either didn't find a cluster
5258 * or we weren't able to allocate a block from our
5259 * cluster. Free the cluster we've been trying
5260 * to use, and go to the next block group
5261 */
0a24325e 5262 btrfs_return_cluster_to_free_space(NULL, last_ptr);
fa9c0d79 5263 spin_unlock(&last_ptr->refill_lock);
0a24325e 5264 goto loop;
fa9c0d79
CM
5265 }
5266
6226cb0a
JB
5267 offset = btrfs_find_space_for_alloc(block_group, search_start,
5268 num_bytes, empty_size);
1cdda9b8
JB
5269 /*
5270 * If we didn't find a chunk, and we haven't failed on this
5271 * block group before, and this block group is in the middle of
5272 * caching and we are ok with waiting, then go ahead and wait
5273 * for progress to be made, and set failed_alloc to true.
5274 *
5275 * If failed_alloc is true then we've already waited on this
5276 * block group once and should move on to the next block group.
5277 */
5278 if (!offset && !failed_alloc && !cached &&
5279 loop > LOOP_CACHING_NOWAIT) {
817d52f8 5280 wait_block_group_cache_progress(block_group,
1cdda9b8
JB
5281 num_bytes + empty_size);
5282 failed_alloc = true;
817d52f8 5283 goto have_block_group;
1cdda9b8
JB
5284 } else if (!offset) {
5285 goto loop;
817d52f8 5286 }
fa9c0d79 5287checks:
6226cb0a 5288 search_start = stripe_align(root, offset);
2552d17e 5289 /* move on to the next group */
6226cb0a
JB
5290 if (search_start + num_bytes >= search_end) {
5291 btrfs_add_free_space(block_group, offset, num_bytes);
2552d17e 5292 goto loop;
6226cb0a 5293 }
25179201 5294
2552d17e
JB
5295 /* move on to the next group */
5296 if (search_start + num_bytes >
6226cb0a
JB
5297 block_group->key.objectid + block_group->key.offset) {
5298 btrfs_add_free_space(block_group, offset, num_bytes);
2552d17e 5299 goto loop;
6226cb0a 5300 }
f5a31e16 5301
f0486c68
YZ
5302 ins->objectid = search_start;
5303 ins->offset = num_bytes;
2552d17e 5304
f0486c68
YZ
5305 if (offset < search_start)
5306 btrfs_add_free_space(block_group, offset,
5307 search_start - offset);
5308 BUG_ON(offset > search_start);
2552d17e 5309
b4d00d56 5310 ret = btrfs_update_reserved_bytes(block_group, num_bytes, 1,
f0486c68
YZ
5311 (data & BTRFS_BLOCK_GROUP_DATA));
5312 if (ret == -EAGAIN) {
6226cb0a 5313 btrfs_add_free_space(block_group, offset, num_bytes);
2552d17e 5314 goto loop;
0f9dd46c 5315 }
0b86a832 5316
f0486c68 5317 /* we are all good, lets return */
2552d17e
JB
5318 ins->objectid = search_start;
5319 ins->offset = num_bytes;
d2fb3437 5320
6226cb0a
JB
5321 if (offset < search_start)
5322 btrfs_add_free_space(block_group, offset,
5323 search_start - offset);
5324 BUG_ON(offset > search_start);
d82a6f1d 5325 btrfs_put_block_group(block_group);
2552d17e
JB
5326 break;
5327loop:
0a24325e 5328 failed_cluster_refill = false;
1cdda9b8 5329 failed_alloc = false;
b742bb82 5330 BUG_ON(index != get_block_group_index(block_group));
fa9c0d79 5331 btrfs_put_block_group(block_group);
2552d17e
JB
5332 }
5333 up_read(&space_info->groups_sem);
5334
b742bb82
YZ
5335 if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES)
5336 goto search;
5337
ccf0e725
JB
5338 /* LOOP_FIND_IDEAL, only search caching/cached bg's, and don't wait for
5339 * for them to make caching progress. Also
5340 * determine the best possible bg to cache
5341 * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
5342 * caching kthreads as we move along
817d52f8
JB
5343 * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
5344 * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
5345 * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
5346 * again
fa9c0d79 5347 */
817d52f8
JB
5348 if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE &&
5349 (found_uncached_bg || empty_size || empty_cluster ||
5350 allowed_chunk_alloc)) {
b742bb82 5351 index = 0;
ccf0e725 5352 if (loop == LOOP_FIND_IDEAL && found_uncached_bg) {
817d52f8 5353 found_uncached_bg = false;
ccf0e725
JB
5354 loop++;
5355 if (!ideal_cache_percent &&
5356 atomic_read(&space_info->caching_threads))
817d52f8 5357 goto search;
ccf0e725
JB
5358
5359 /*
5360 * 1 of the following 2 things have happened so far
5361 *
5362 * 1) We found an ideal block group for caching that
5363 * is mostly full and will cache quickly, so we might
5364 * as well wait for it.
5365 *
5366 * 2) We searched for cached only and we didn't find
5367 * anything, and we didn't start any caching kthreads
5368 * either, so chances are we will loop through and
5369 * start a couple caching kthreads, and then come back
5370 * around and just wait for them. This will be slower
5371 * because we will have 2 caching kthreads reading at
5372 * the same time when we could have just started one
5373 * and waited for it to get far enough to give us an
5374 * allocation, so go ahead and go to the wait caching
5375 * loop.
5376 */
5377 loop = LOOP_CACHING_WAIT;
5378 search_start = ideal_cache_offset;
5379 ideal_cache_percent = 0;
5380 goto ideal_cache;
5381 } else if (loop == LOOP_FIND_IDEAL) {
5382 /*
5383 * Didn't find a uncached bg, wait on anything we find
5384 * next.
5385 */
5386 loop = LOOP_CACHING_WAIT;
5387 goto search;
5388 }
5389
5390 if (loop < LOOP_CACHING_WAIT) {
5391 loop++;
5392 goto search;
817d52f8
JB
5393 }
5394
5395 if (loop == LOOP_ALLOC_CHUNK) {
fa9c0d79
CM
5396 empty_size = 0;
5397 empty_cluster = 0;
5398 }
2552d17e
JB
5399
5400 if (allowed_chunk_alloc) {
5401 ret = do_chunk_alloc(trans, root, num_bytes +
0e4f8f88
CM
5402 2 * 1024 * 1024, data,
5403 CHUNK_ALLOC_LIMITED);
2552d17e 5404 allowed_chunk_alloc = 0;
ccf0e725 5405 done_chunk_alloc = 1;
0e4f8f88
CM
5406 } else if (!done_chunk_alloc &&
5407 space_info->force_alloc == CHUNK_ALLOC_NO_FORCE) {
5408 space_info->force_alloc = CHUNK_ALLOC_LIMITED;
2552d17e
JB
5409 }
5410
817d52f8 5411 if (loop < LOOP_NO_EMPTY_SIZE) {
fa9c0d79 5412 loop++;
2552d17e 5413 goto search;
fa9c0d79 5414 }
2552d17e
JB
5415 ret = -ENOSPC;
5416 } else if (!ins->objectid) {
5417 ret = -ENOSPC;
d82a6f1d 5418 } else if (ins->objectid) {
80eb234a 5419 ret = 0;
be744175 5420 }
be744175 5421
0f70abe2 5422 return ret;
fec577fb 5423}
ec44a35c 5424
9ed74f2d
JB
5425static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
5426 int dump_block_groups)
0f9dd46c
JB
5427{
5428 struct btrfs_block_group_cache *cache;
b742bb82 5429 int index = 0;
0f9dd46c 5430
9ed74f2d 5431 spin_lock(&info->lock);
d397712b
CM
5432 printk(KERN_INFO "space_info has %llu free, is %sfull\n",
5433 (unsigned long long)(info->total_bytes - info->bytes_used -
9ed74f2d 5434 info->bytes_pinned - info->bytes_reserved -
8929ecfa 5435 info->bytes_readonly),
d397712b 5436 (info->full) ? "" : "not ");
8929ecfa
YZ
5437 printk(KERN_INFO "space_info total=%llu, used=%llu, pinned=%llu, "
5438 "reserved=%llu, may_use=%llu, readonly=%llu\n",
21380931 5439 (unsigned long long)info->total_bytes,
8929ecfa 5440 (unsigned long long)info->bytes_used,
21380931 5441 (unsigned long long)info->bytes_pinned,
8929ecfa 5442 (unsigned long long)info->bytes_reserved,
21380931 5443 (unsigned long long)info->bytes_may_use,
8929ecfa 5444 (unsigned long long)info->bytes_readonly);
9ed74f2d
JB
5445 spin_unlock(&info->lock);
5446
5447 if (!dump_block_groups)
5448 return;
0f9dd46c 5449
80eb234a 5450 down_read(&info->groups_sem);
b742bb82
YZ
5451again:
5452 list_for_each_entry(cache, &info->block_groups[index], list) {
0f9dd46c 5453 spin_lock(&cache->lock);
d397712b
CM
5454 printk(KERN_INFO "block group %llu has %llu bytes, %llu used "
5455 "%llu pinned %llu reserved\n",
5456 (unsigned long long)cache->key.objectid,
5457 (unsigned long long)cache->key.offset,
5458 (unsigned long long)btrfs_block_group_used(&cache->item),
5459 (unsigned long long)cache->pinned,
5460 (unsigned long long)cache->reserved);
0f9dd46c
JB
5461 btrfs_dump_free_space(cache, bytes);
5462 spin_unlock(&cache->lock);
5463 }
b742bb82
YZ
5464 if (++index < BTRFS_NR_RAID_TYPES)
5465 goto again;
80eb234a 5466 up_read(&info->groups_sem);
0f9dd46c 5467}
e8569813 5468
11833d66
YZ
5469int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
5470 struct btrfs_root *root,
5471 u64 num_bytes, u64 min_alloc_size,
5472 u64 empty_size, u64 hint_byte,
5473 u64 search_end, struct btrfs_key *ins,
5474 u64 data)
fec577fb
CM
5475{
5476 int ret;
fbdc762b 5477 u64 search_start = 0;
925baedd 5478
6a63209f 5479 data = btrfs_get_alloc_profile(root, data);
98d20f67 5480again:
0ef3e66b
CM
5481 /*
5482 * the only place that sets empty_size is btrfs_realloc_node, which
5483 * is not called recursively on allocations
5484 */
83d3c969 5485 if (empty_size || root->ref_cows)
6324fbf3 5486 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
0e4f8f88
CM
5487 num_bytes + 2 * 1024 * 1024, data,
5488 CHUNK_ALLOC_NO_FORCE);
0b86a832 5489
db94535d
CM
5490 WARN_ON(num_bytes < root->sectorsize);
5491 ret = find_free_extent(trans, root, num_bytes, empty_size,
f0486c68
YZ
5492 search_start, search_end, hint_byte,
5493 ins, data);
3b951516 5494
98d20f67
CM
5495 if (ret == -ENOSPC && num_bytes > min_alloc_size) {
5496 num_bytes = num_bytes >> 1;
0f9dd46c 5497 num_bytes = num_bytes & ~(root->sectorsize - 1);
98d20f67 5498 num_bytes = max(num_bytes, min_alloc_size);
0ef3e66b 5499 do_chunk_alloc(trans, root->fs_info->extent_root,
0e4f8f88 5500 num_bytes, data, CHUNK_ALLOC_FORCE);
98d20f67
CM
5501 goto again;
5502 }
91435650 5503 if (ret == -ENOSPC && btrfs_test_opt(root, ENOSPC_DEBUG)) {
0f9dd46c
JB
5504 struct btrfs_space_info *sinfo;
5505
5506 sinfo = __find_space_info(root->fs_info, data);
d397712b
CM
5507 printk(KERN_ERR "btrfs allocation failed flags %llu, "
5508 "wanted %llu\n", (unsigned long long)data,
5509 (unsigned long long)num_bytes);
9ed74f2d 5510 dump_space_info(sinfo, num_bytes, 1);
925baedd 5511 }
0f9dd46c 5512
1abe9b8a 5513 trace_btrfs_reserved_extent_alloc(root, ins->objectid, ins->offset);
5514
0f9dd46c 5515 return ret;
e6dcd2dc
CM
5516}
5517
65b51a00
CM
5518int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len)
5519{
0f9dd46c 5520 struct btrfs_block_group_cache *cache;
1f3c79a2 5521 int ret = 0;
0f9dd46c 5522
0f9dd46c
JB
5523 cache = btrfs_lookup_block_group(root->fs_info, start);
5524 if (!cache) {
d397712b
CM
5525 printk(KERN_ERR "Unable to find block group for %llu\n",
5526 (unsigned long long)start);
0f9dd46c
JB
5527 return -ENOSPC;
5528 }
1f3c79a2 5529
5378e607
LD
5530 if (btrfs_test_opt(root, DISCARD))
5531 ret = btrfs_discard_extent(root, start, len, NULL);
1f3c79a2 5532
0f9dd46c 5533 btrfs_add_free_space(cache, start, len);
b4d00d56 5534 btrfs_update_reserved_bytes(cache, len, 0, 1);
fa9c0d79 5535 btrfs_put_block_group(cache);
817d52f8 5536
1abe9b8a 5537 trace_btrfs_reserved_extent_free(root, start, len);
5538
e6dcd2dc
CM
5539 return ret;
5540}
5541
5d4f98a2
YZ
5542static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
5543 struct btrfs_root *root,
5544 u64 parent, u64 root_objectid,
5545 u64 flags, u64 owner, u64 offset,
5546 struct btrfs_key *ins, int ref_mod)
e6dcd2dc
CM
5547{
5548 int ret;
5d4f98a2 5549 struct btrfs_fs_info *fs_info = root->fs_info;
e6dcd2dc 5550 struct btrfs_extent_item *extent_item;
5d4f98a2 5551 struct btrfs_extent_inline_ref *iref;
e6dcd2dc 5552 struct btrfs_path *path;
5d4f98a2
YZ
5553 struct extent_buffer *leaf;
5554 int type;
5555 u32 size;
26b8003f 5556
5d4f98a2
YZ
5557 if (parent > 0)
5558 type = BTRFS_SHARED_DATA_REF_KEY;
5559 else
5560 type = BTRFS_EXTENT_DATA_REF_KEY;
58176a96 5561
5d4f98a2 5562 size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
7bb86316
CM
5563
5564 path = btrfs_alloc_path();
db5b493a
TI
5565 if (!path)
5566 return -ENOMEM;
47e4bb98 5567
b9473439 5568 path->leave_spinning = 1;
5d4f98a2
YZ
5569 ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
5570 ins, size);
ccd467d6 5571 BUG_ON(ret);
0f9dd46c 5572
5d4f98a2
YZ
5573 leaf = path->nodes[0];
5574 extent_item = btrfs_item_ptr(leaf, path->slots[0],
47e4bb98 5575 struct btrfs_extent_item);
5d4f98a2
YZ
5576 btrfs_set_extent_refs(leaf, extent_item, ref_mod);
5577 btrfs_set_extent_generation(leaf, extent_item, trans->transid);
5578 btrfs_set_extent_flags(leaf, extent_item,
5579 flags | BTRFS_EXTENT_FLAG_DATA);
5580
5581 iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
5582 btrfs_set_extent_inline_ref_type(leaf, iref, type);
5583 if (parent > 0) {
5584 struct btrfs_shared_data_ref *ref;
5585 ref = (struct btrfs_shared_data_ref *)(iref + 1);
5586 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
5587 btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
5588 } else {
5589 struct btrfs_extent_data_ref *ref;
5590 ref = (struct btrfs_extent_data_ref *)(&iref->offset);
5591 btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
5592 btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
5593 btrfs_set_extent_data_ref_offset(leaf, ref, offset);
5594 btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
5595 }
47e4bb98
CM
5596
5597 btrfs_mark_buffer_dirty(path->nodes[0]);
7bb86316 5598 btrfs_free_path(path);
f510cfec 5599
f0486c68 5600 ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
f5947066 5601 if (ret) {
d397712b
CM
5602 printk(KERN_ERR "btrfs update block group failed for %llu "
5603 "%llu\n", (unsigned long long)ins->objectid,
5604 (unsigned long long)ins->offset);
f5947066
CM
5605 BUG();
5606 }
e6dcd2dc
CM
5607 return ret;
5608}
5609
5d4f98a2
YZ
5610static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
5611 struct btrfs_root *root,
5612 u64 parent, u64 root_objectid,
5613 u64 flags, struct btrfs_disk_key *key,
5614 int level, struct btrfs_key *ins)
e6dcd2dc
CM
5615{
5616 int ret;
5d4f98a2
YZ
5617 struct btrfs_fs_info *fs_info = root->fs_info;
5618 struct btrfs_extent_item *extent_item;
5619 struct btrfs_tree_block_info *block_info;
5620 struct btrfs_extent_inline_ref *iref;
5621 struct btrfs_path *path;
5622 struct extent_buffer *leaf;
5623 u32 size = sizeof(*extent_item) + sizeof(*block_info) + sizeof(*iref);
1c2308f8 5624
5d4f98a2
YZ
5625 path = btrfs_alloc_path();
5626 BUG_ON(!path);
56bec294 5627
5d4f98a2
YZ
5628 path->leave_spinning = 1;
5629 ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
5630 ins, size);
56bec294 5631 BUG_ON(ret);
5d4f98a2
YZ
5632
5633 leaf = path->nodes[0];
5634 extent_item = btrfs_item_ptr(leaf, path->slots[0],
5635 struct btrfs_extent_item);
5636 btrfs_set_extent_refs(leaf, extent_item, 1);
5637 btrfs_set_extent_generation(leaf, extent_item, trans->transid);
5638 btrfs_set_extent_flags(leaf, extent_item,
5639 flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
5640 block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
5641
5642 btrfs_set_tree_block_key(leaf, block_info, key);
5643 btrfs_set_tree_block_level(leaf, block_info, level);
5644
5645 iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
5646 if (parent > 0) {
5647 BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
5648 btrfs_set_extent_inline_ref_type(leaf, iref,
5649 BTRFS_SHARED_BLOCK_REF_KEY);
5650 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
5651 } else {
5652 btrfs_set_extent_inline_ref_type(leaf, iref,
5653 BTRFS_TREE_BLOCK_REF_KEY);
5654 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
5655 }
5656
5657 btrfs_mark_buffer_dirty(leaf);
5658 btrfs_free_path(path);
5659
f0486c68 5660 ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
5d4f98a2
YZ
5661 if (ret) {
5662 printk(KERN_ERR "btrfs update block group failed for %llu "
5663 "%llu\n", (unsigned long long)ins->objectid,
5664 (unsigned long long)ins->offset);
5665 BUG();
5666 }
5667 return ret;
5668}
5669
5670int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
5671 struct btrfs_root *root,
5672 u64 root_objectid, u64 owner,
5673 u64 offset, struct btrfs_key *ins)
5674{
5675 int ret;
5676
5677 BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID);
5678
5679 ret = btrfs_add_delayed_data_ref(trans, ins->objectid, ins->offset,
5680 0, root_objectid, owner, offset,
5681 BTRFS_ADD_DELAYED_EXTENT, NULL);
e6dcd2dc
CM
5682 return ret;
5683}
e02119d5
CM
5684
5685/*
5686 * this is used by the tree logging recovery code. It records that
5687 * an extent has been allocated and makes sure to clear the free
5688 * space cache bits as well
5689 */
5d4f98a2
YZ
5690int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
5691 struct btrfs_root *root,
5692 u64 root_objectid, u64 owner, u64 offset,
5693 struct btrfs_key *ins)
e02119d5
CM
5694{
5695 int ret;
5696 struct btrfs_block_group_cache *block_group;
11833d66
YZ
5697 struct btrfs_caching_control *caching_ctl;
5698 u64 start = ins->objectid;
5699 u64 num_bytes = ins->offset;
e02119d5 5700
e02119d5 5701 block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
b8399dee 5702 cache_block_group(block_group, trans, NULL, 0);
11833d66 5703 caching_ctl = get_caching_control(block_group);
e02119d5 5704
11833d66
YZ
5705 if (!caching_ctl) {
5706 BUG_ON(!block_group_cache_done(block_group));
5707 ret = btrfs_remove_free_space(block_group, start, num_bytes);
5708 BUG_ON(ret);
5709 } else {
5710 mutex_lock(&caching_ctl->mutex);
5711
5712 if (start >= caching_ctl->progress) {
5713 ret = add_excluded_extent(root, start, num_bytes);
5714 BUG_ON(ret);
5715 } else if (start + num_bytes <= caching_ctl->progress) {
5716 ret = btrfs_remove_free_space(block_group,
5717 start, num_bytes);
5718 BUG_ON(ret);
5719 } else {
5720 num_bytes = caching_ctl->progress - start;
5721 ret = btrfs_remove_free_space(block_group,
5722 start, num_bytes);
5723 BUG_ON(ret);
5724
5725 start = caching_ctl->progress;
5726 num_bytes = ins->objectid + ins->offset -
5727 caching_ctl->progress;
5728 ret = add_excluded_extent(root, start, num_bytes);
5729 BUG_ON(ret);
5730 }
5731
5732 mutex_unlock(&caching_ctl->mutex);
5733 put_caching_control(caching_ctl);
5734 }
5735
b4d00d56 5736 ret = btrfs_update_reserved_bytes(block_group, ins->offset, 1, 1);
f0486c68 5737 BUG_ON(ret);
fa9c0d79 5738 btrfs_put_block_group(block_group);
5d4f98a2
YZ
5739 ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
5740 0, owner, offset, ins, 1);
e02119d5
CM
5741 return ret;
5742}
5743
65b51a00
CM
5744struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
5745 struct btrfs_root *root,
4008c04a
CM
5746 u64 bytenr, u32 blocksize,
5747 int level)
65b51a00
CM
5748{
5749 struct extent_buffer *buf;
5750
5751 buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
5752 if (!buf)
5753 return ERR_PTR(-ENOMEM);
5754 btrfs_set_header_generation(buf, trans->transid);
4008c04a 5755 btrfs_set_buffer_lockdep_class(buf, level);
65b51a00
CM
5756 btrfs_tree_lock(buf);
5757 clean_tree_block(trans, root, buf);
b4ce94de
CM
5758
5759 btrfs_set_lock_blocking(buf);
65b51a00 5760 btrfs_set_buffer_uptodate(buf);
b4ce94de 5761
d0c803c4 5762 if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
8cef4e16
YZ
5763 /*
5764 * we allow two log transactions at a time, use different
5765 * EXENT bit to differentiate dirty pages.
5766 */
5767 if (root->log_transid % 2 == 0)
5768 set_extent_dirty(&root->dirty_log_pages, buf->start,
5769 buf->start + buf->len - 1, GFP_NOFS);
5770 else
5771 set_extent_new(&root->dirty_log_pages, buf->start,
5772 buf->start + buf->len - 1, GFP_NOFS);
d0c803c4
CM
5773 } else {
5774 set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
65b51a00 5775 buf->start + buf->len - 1, GFP_NOFS);
d0c803c4 5776 }
65b51a00 5777 trans->blocks_used++;
b4ce94de 5778 /* this returns a buffer locked for blocking */
65b51a00
CM
5779 return buf;
5780}
5781
f0486c68
YZ
5782static struct btrfs_block_rsv *
5783use_block_rsv(struct btrfs_trans_handle *trans,
5784 struct btrfs_root *root, u32 blocksize)
5785{
5786 struct btrfs_block_rsv *block_rsv;
68a82277 5787 struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
f0486c68
YZ
5788 int ret;
5789
5790 block_rsv = get_block_rsv(trans, root);
5791
5792 if (block_rsv->size == 0) {
8bb8ab2e
JB
5793 ret = reserve_metadata_bytes(trans, root, block_rsv,
5794 blocksize, 0);
68a82277
JB
5795 /*
5796 * If we couldn't reserve metadata bytes try and use some from
5797 * the global reserve.
5798 */
5799 if (ret && block_rsv != global_rsv) {
5800 ret = block_rsv_use_bytes(global_rsv, blocksize);
5801 if (!ret)
5802 return global_rsv;
f0486c68 5803 return ERR_PTR(ret);
68a82277 5804 } else if (ret) {
f0486c68 5805 return ERR_PTR(ret);
68a82277 5806 }
f0486c68
YZ
5807 return block_rsv;
5808 }
5809
5810 ret = block_rsv_use_bytes(block_rsv, blocksize);
5811 if (!ret)
5812 return block_rsv;
68a82277
JB
5813 if (ret) {
5814 WARN_ON(1);
5815 ret = reserve_metadata_bytes(trans, root, block_rsv, blocksize,
5816 0);
5817 if (!ret) {
5818 spin_lock(&block_rsv->lock);
5819 block_rsv->size += blocksize;
5820 spin_unlock(&block_rsv->lock);
5821 return block_rsv;
5822 } else if (ret && block_rsv != global_rsv) {
5823 ret = block_rsv_use_bytes(global_rsv, blocksize);
5824 if (!ret)
5825 return global_rsv;
5826 }
5827 }
f0486c68 5828
f0486c68
YZ
5829 return ERR_PTR(-ENOSPC);
5830}
5831
5832static void unuse_block_rsv(struct btrfs_block_rsv *block_rsv, u32 blocksize)
5833{
5834 block_rsv_add_bytes(block_rsv, blocksize, 0);
5835 block_rsv_release_bytes(block_rsv, NULL, 0);
5836}
5837
fec577fb 5838/*
f0486c68
YZ
5839 * finds a free extent and does all the dirty work required for allocation
5840 * returns the key for the extent through ins, and a tree buffer for
5841 * the first block of the extent through buf.
5842 *
fec577fb
CM
5843 * returns the tree buffer or NULL.
5844 */
5f39d397 5845struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
5d4f98a2
YZ
5846 struct btrfs_root *root, u32 blocksize,
5847 u64 parent, u64 root_objectid,
5848 struct btrfs_disk_key *key, int level,
5849 u64 hint, u64 empty_size)
fec577fb 5850{
e2fa7227 5851 struct btrfs_key ins;
f0486c68 5852 struct btrfs_block_rsv *block_rsv;
5f39d397 5853 struct extent_buffer *buf;
f0486c68
YZ
5854 u64 flags = 0;
5855 int ret;
5856
fec577fb 5857
f0486c68
YZ
5858 block_rsv = use_block_rsv(trans, root, blocksize);
5859 if (IS_ERR(block_rsv))
5860 return ERR_CAST(block_rsv);
5861
5862 ret = btrfs_reserve_extent(trans, root, blocksize, blocksize,
5863 empty_size, hint, (u64)-1, &ins, 0);
fec577fb 5864 if (ret) {
f0486c68 5865 unuse_block_rsv(block_rsv, blocksize);
54aa1f4d 5866 return ERR_PTR(ret);
fec577fb 5867 }
55c69072 5868
4008c04a
CM
5869 buf = btrfs_init_new_buffer(trans, root, ins.objectid,
5870 blocksize, level);
f0486c68
YZ
5871 BUG_ON(IS_ERR(buf));
5872
5873 if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
5874 if (parent == 0)
5875 parent = ins.objectid;
5876 flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
5877 } else
5878 BUG_ON(parent > 0);
5879
5880 if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
5881 struct btrfs_delayed_extent_op *extent_op;
5882 extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
5883 BUG_ON(!extent_op);
5884 if (key)
5885 memcpy(&extent_op->key, key, sizeof(extent_op->key));
5886 else
5887 memset(&extent_op->key, 0, sizeof(extent_op->key));
5888 extent_op->flags_to_set = flags;
5889 extent_op->update_key = 1;
5890 extent_op->update_flags = 1;
5891 extent_op->is_data = 0;
5892
5893 ret = btrfs_add_delayed_tree_ref(trans, ins.objectid,
5894 ins.offset, parent, root_objectid,
5895 level, BTRFS_ADD_DELAYED_EXTENT,
5896 extent_op);
5897 BUG_ON(ret);
5898 }
fec577fb
CM
5899 return buf;
5900}
a28ec197 5901
2c47e605
YZ
5902struct walk_control {
5903 u64 refs[BTRFS_MAX_LEVEL];
5904 u64 flags[BTRFS_MAX_LEVEL];
5905 struct btrfs_key update_progress;
5906 int stage;
5907 int level;
5908 int shared_level;
5909 int update_ref;
5910 int keep_locks;
1c4850e2
YZ
5911 int reada_slot;
5912 int reada_count;
2c47e605
YZ
5913};
5914
5915#define DROP_REFERENCE 1
5916#define UPDATE_BACKREF 2
5917
1c4850e2
YZ
5918static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
5919 struct btrfs_root *root,
5920 struct walk_control *wc,
5921 struct btrfs_path *path)
6407bf6d 5922{
1c4850e2
YZ
5923 u64 bytenr;
5924 u64 generation;
5925 u64 refs;
94fcca9f 5926 u64 flags;
5d4f98a2 5927 u32 nritems;
1c4850e2
YZ
5928 u32 blocksize;
5929 struct btrfs_key key;
5930 struct extent_buffer *eb;
6407bf6d 5931 int ret;
1c4850e2
YZ
5932 int slot;
5933 int nread = 0;
6407bf6d 5934
1c4850e2
YZ
5935 if (path->slots[wc->level] < wc->reada_slot) {
5936 wc->reada_count = wc->reada_count * 2 / 3;
5937 wc->reada_count = max(wc->reada_count, 2);
5938 } else {
5939 wc->reada_count = wc->reada_count * 3 / 2;
5940 wc->reada_count = min_t(int, wc->reada_count,
5941 BTRFS_NODEPTRS_PER_BLOCK(root));
5942 }
7bb86316 5943
1c4850e2
YZ
5944 eb = path->nodes[wc->level];
5945 nritems = btrfs_header_nritems(eb);
5946 blocksize = btrfs_level_size(root, wc->level - 1);
bd56b302 5947
1c4850e2
YZ
5948 for (slot = path->slots[wc->level]; slot < nritems; slot++) {
5949 if (nread >= wc->reada_count)
5950 break;
bd56b302 5951
2dd3e67b 5952 cond_resched();
1c4850e2
YZ
5953 bytenr = btrfs_node_blockptr(eb, slot);
5954 generation = btrfs_node_ptr_generation(eb, slot);
2dd3e67b 5955
1c4850e2
YZ
5956 if (slot == path->slots[wc->level])
5957 goto reada;
5d4f98a2 5958
1c4850e2
YZ
5959 if (wc->stage == UPDATE_BACKREF &&
5960 generation <= root->root_key.offset)
bd56b302
CM
5961 continue;
5962
94fcca9f
YZ
5963 /* We don't lock the tree block, it's OK to be racy here */
5964 ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize,
5965 &refs, &flags);
5966 BUG_ON(ret);
5967 BUG_ON(refs == 0);
5968
1c4850e2 5969 if (wc->stage == DROP_REFERENCE) {
1c4850e2
YZ
5970 if (refs == 1)
5971 goto reada;
bd56b302 5972
94fcca9f
YZ
5973 if (wc->level == 1 &&
5974 (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
5975 continue;
1c4850e2
YZ
5976 if (!wc->update_ref ||
5977 generation <= root->root_key.offset)
5978 continue;
5979 btrfs_node_key_to_cpu(eb, &key, slot);
5980 ret = btrfs_comp_cpu_keys(&key,
5981 &wc->update_progress);
5982 if (ret < 0)
5983 continue;
94fcca9f
YZ
5984 } else {
5985 if (wc->level == 1 &&
5986 (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
5987 continue;
6407bf6d 5988 }
1c4850e2
YZ
5989reada:
5990 ret = readahead_tree_block(root, bytenr, blocksize,
5991 generation);
5992 if (ret)
bd56b302 5993 break;
1c4850e2 5994 nread++;
20524f02 5995 }
1c4850e2 5996 wc->reada_slot = slot;
20524f02 5997}
2c47e605 5998
f82d02d9 5999/*
2c47e605
YZ
6000 * hepler to process tree block while walking down the tree.
6001 *
2c47e605
YZ
6002 * when wc->stage == UPDATE_BACKREF, this function updates
6003 * back refs for pointers in the block.
6004 *
6005 * NOTE: return value 1 means we should stop walking down.
f82d02d9 6006 */
2c47e605 6007static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
5d4f98a2 6008 struct btrfs_root *root,
2c47e605 6009 struct btrfs_path *path,
94fcca9f 6010 struct walk_control *wc, int lookup_info)
f82d02d9 6011{
2c47e605
YZ
6012 int level = wc->level;
6013 struct extent_buffer *eb = path->nodes[level];
2c47e605 6014 u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
f82d02d9
YZ
6015 int ret;
6016
2c47e605
YZ
6017 if (wc->stage == UPDATE_BACKREF &&
6018 btrfs_header_owner(eb) != root->root_key.objectid)
6019 return 1;
f82d02d9 6020
2c47e605
YZ
6021 /*
6022 * when reference count of tree block is 1, it won't increase
6023 * again. once full backref flag is set, we never clear it.
6024 */
94fcca9f
YZ
6025 if (lookup_info &&
6026 ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
6027 (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
2c47e605
YZ
6028 BUG_ON(!path->locks[level]);
6029 ret = btrfs_lookup_extent_info(trans, root,
6030 eb->start, eb->len,
6031 &wc->refs[level],
6032 &wc->flags[level]);
6033 BUG_ON(ret);
6034 BUG_ON(wc->refs[level] == 0);
6035 }
5d4f98a2 6036
2c47e605
YZ
6037 if (wc->stage == DROP_REFERENCE) {
6038 if (wc->refs[level] > 1)
6039 return 1;
f82d02d9 6040
2c47e605
YZ
6041 if (path->locks[level] && !wc->keep_locks) {
6042 btrfs_tree_unlock(eb);
6043 path->locks[level] = 0;
6044 }
6045 return 0;
6046 }
f82d02d9 6047
2c47e605
YZ
6048 /* wc->stage == UPDATE_BACKREF */
6049 if (!(wc->flags[level] & flag)) {
6050 BUG_ON(!path->locks[level]);
6051 ret = btrfs_inc_ref(trans, root, eb, 1);
f82d02d9 6052 BUG_ON(ret);
2c47e605
YZ
6053 ret = btrfs_dec_ref(trans, root, eb, 0);
6054 BUG_ON(ret);
6055 ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
6056 eb->len, flag, 0);
6057 BUG_ON(ret);
6058 wc->flags[level] |= flag;
6059 }
6060
6061 /*
6062 * the block is shared by multiple trees, so it's not good to
6063 * keep the tree lock
6064 */
6065 if (path->locks[level] && level > 0) {
6066 btrfs_tree_unlock(eb);
6067 path->locks[level] = 0;
6068 }
6069 return 0;
6070}
6071
1c4850e2
YZ
6072/*
6073 * hepler to process tree block pointer.
6074 *
6075 * when wc->stage == DROP_REFERENCE, this function checks
6076 * reference count of the block pointed to. if the block
6077 * is shared and we need update back refs for the subtree
6078 * rooted at the block, this function changes wc->stage to
6079 * UPDATE_BACKREF. if the block is shared and there is no
6080 * need to update back, this function drops the reference
6081 * to the block.
6082 *
6083 * NOTE: return value 1 means we should stop walking down.
6084 */
6085static noinline int do_walk_down(struct btrfs_trans_handle *trans,
6086 struct btrfs_root *root,
6087 struct btrfs_path *path,
94fcca9f 6088 struct walk_control *wc, int *lookup_info)
1c4850e2
YZ
6089{
6090 u64 bytenr;
6091 u64 generation;
6092 u64 parent;
6093 u32 blocksize;
6094 struct btrfs_key key;
6095 struct extent_buffer *next;
6096 int level = wc->level;
6097 int reada = 0;
6098 int ret = 0;
6099
6100 generation = btrfs_node_ptr_generation(path->nodes[level],
6101 path->slots[level]);
6102 /*
6103 * if the lower level block was created before the snapshot
6104 * was created, we know there is no need to update back refs
6105 * for the subtree
6106 */
6107 if (wc->stage == UPDATE_BACKREF &&
94fcca9f
YZ
6108 generation <= root->root_key.offset) {
6109 *lookup_info = 1;
1c4850e2 6110 return 1;
94fcca9f 6111 }
1c4850e2
YZ
6112
6113 bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
6114 blocksize = btrfs_level_size(root, level - 1);
6115
6116 next = btrfs_find_tree_block(root, bytenr, blocksize);
6117 if (!next) {
6118 next = btrfs_find_create_tree_block(root, bytenr, blocksize);
90d2c51d
MX
6119 if (!next)
6120 return -ENOMEM;
1c4850e2
YZ
6121 reada = 1;
6122 }
6123 btrfs_tree_lock(next);
6124 btrfs_set_lock_blocking(next);
6125
94fcca9f
YZ
6126 ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize,
6127 &wc->refs[level - 1],
6128 &wc->flags[level - 1]);
6129 BUG_ON(ret);
6130 BUG_ON(wc->refs[level - 1] == 0);
6131 *lookup_info = 0;
1c4850e2 6132
94fcca9f 6133 if (wc->stage == DROP_REFERENCE) {
1c4850e2 6134 if (wc->refs[level - 1] > 1) {
94fcca9f
YZ
6135 if (level == 1 &&
6136 (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
6137 goto skip;
6138
1c4850e2
YZ
6139 if (!wc->update_ref ||
6140 generation <= root->root_key.offset)
6141 goto skip;
6142
6143 btrfs_node_key_to_cpu(path->nodes[level], &key,
6144 path->slots[level]);
6145 ret = btrfs_comp_cpu_keys(&key, &wc->update_progress);
6146 if (ret < 0)
6147 goto skip;
6148
6149 wc->stage = UPDATE_BACKREF;
6150 wc->shared_level = level - 1;
6151 }
94fcca9f
YZ
6152 } else {
6153 if (level == 1 &&
6154 (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
6155 goto skip;
1c4850e2
YZ
6156 }
6157
6158 if (!btrfs_buffer_uptodate(next, generation)) {
6159 btrfs_tree_unlock(next);
6160 free_extent_buffer(next);
6161 next = NULL;
94fcca9f 6162 *lookup_info = 1;
1c4850e2
YZ
6163 }
6164
6165 if (!next) {
6166 if (reada && level == 1)
6167 reada_walk_down(trans, root, wc, path);
6168 next = read_tree_block(root, bytenr, blocksize, generation);
97d9a8a4
TI
6169 if (!next)
6170 return -EIO;
1c4850e2
YZ
6171 btrfs_tree_lock(next);
6172 btrfs_set_lock_blocking(next);
6173 }
6174
6175 level--;
6176 BUG_ON(level != btrfs_header_level(next));
6177 path->nodes[level] = next;
6178 path->slots[level] = 0;
6179 path->locks[level] = 1;
6180 wc->level = level;
6181 if (wc->level == 1)
6182 wc->reada_slot = 0;
6183 return 0;
6184skip:
6185 wc->refs[level - 1] = 0;
6186 wc->flags[level - 1] = 0;
94fcca9f
YZ
6187 if (wc->stage == DROP_REFERENCE) {
6188 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
6189 parent = path->nodes[level]->start;
6190 } else {
6191 BUG_ON(root->root_key.objectid !=
6192 btrfs_header_owner(path->nodes[level]));
6193 parent = 0;
6194 }
1c4850e2 6195
94fcca9f
YZ
6196 ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
6197 root->root_key.objectid, level - 1, 0);
6198 BUG_ON(ret);
1c4850e2 6199 }
1c4850e2
YZ
6200 btrfs_tree_unlock(next);
6201 free_extent_buffer(next);
94fcca9f 6202 *lookup_info = 1;
1c4850e2
YZ
6203 return 1;
6204}
6205
2c47e605
YZ
6206/*
6207 * hepler to process tree block while walking up the tree.
6208 *
6209 * when wc->stage == DROP_REFERENCE, this function drops
6210 * reference count on the block.
6211 *
6212 * when wc->stage == UPDATE_BACKREF, this function changes
6213 * wc->stage back to DROP_REFERENCE if we changed wc->stage
6214 * to UPDATE_BACKREF previously while processing the block.
6215 *
6216 * NOTE: return value 1 means we should stop walking up.
6217 */
6218static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
6219 struct btrfs_root *root,
6220 struct btrfs_path *path,
6221 struct walk_control *wc)
6222{
f0486c68 6223 int ret;
2c47e605
YZ
6224 int level = wc->level;
6225 struct extent_buffer *eb = path->nodes[level];
6226 u64 parent = 0;
6227
6228 if (wc->stage == UPDATE_BACKREF) {
6229 BUG_ON(wc->shared_level < level);
6230 if (level < wc->shared_level)
6231 goto out;
6232
2c47e605
YZ
6233 ret = find_next_key(path, level + 1, &wc->update_progress);
6234 if (ret > 0)
6235 wc->update_ref = 0;
6236
6237 wc->stage = DROP_REFERENCE;
6238 wc->shared_level = -1;
6239 path->slots[level] = 0;
6240
6241 /*
6242 * check reference count again if the block isn't locked.
6243 * we should start walking down the tree again if reference
6244 * count is one.
6245 */
6246 if (!path->locks[level]) {
6247 BUG_ON(level == 0);
6248 btrfs_tree_lock(eb);
6249 btrfs_set_lock_blocking(eb);
6250 path->locks[level] = 1;
6251
6252 ret = btrfs_lookup_extent_info(trans, root,
6253 eb->start, eb->len,
6254 &wc->refs[level],
6255 &wc->flags[level]);
f82d02d9 6256 BUG_ON(ret);
2c47e605
YZ
6257 BUG_ON(wc->refs[level] == 0);
6258 if (wc->refs[level] == 1) {
6259 btrfs_tree_unlock(eb);
6260 path->locks[level] = 0;
6261 return 1;
6262 }
f82d02d9 6263 }
2c47e605 6264 }
f82d02d9 6265
2c47e605
YZ
6266 /* wc->stage == DROP_REFERENCE */
6267 BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
5d4f98a2 6268
2c47e605
YZ
6269 if (wc->refs[level] == 1) {
6270 if (level == 0) {
6271 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
6272 ret = btrfs_dec_ref(trans, root, eb, 1);
6273 else
6274 ret = btrfs_dec_ref(trans, root, eb, 0);
6275 BUG_ON(ret);
6276 }
6277 /* make block locked assertion in clean_tree_block happy */
6278 if (!path->locks[level] &&
6279 btrfs_header_generation(eb) == trans->transid) {
6280 btrfs_tree_lock(eb);
6281 btrfs_set_lock_blocking(eb);
6282 path->locks[level] = 1;
6283 }
6284 clean_tree_block(trans, root, eb);
6285 }
6286
6287 if (eb == root->node) {
6288 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
6289 parent = eb->start;
6290 else
6291 BUG_ON(root->root_key.objectid !=
6292 btrfs_header_owner(eb));
6293 } else {
6294 if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
6295 parent = path->nodes[level + 1]->start;
6296 else
6297 BUG_ON(root->root_key.objectid !=
6298 btrfs_header_owner(path->nodes[level + 1]));
f82d02d9 6299 }
f82d02d9 6300
f0486c68 6301 btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1);
2c47e605
YZ
6302out:
6303 wc->refs[level] = 0;
6304 wc->flags[level] = 0;
f0486c68 6305 return 0;
2c47e605
YZ
6306}
6307
6308static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
6309 struct btrfs_root *root,
6310 struct btrfs_path *path,
6311 struct walk_control *wc)
6312{
2c47e605 6313 int level = wc->level;
94fcca9f 6314 int lookup_info = 1;
2c47e605
YZ
6315 int ret;
6316
6317 while (level >= 0) {
94fcca9f 6318 ret = walk_down_proc(trans, root, path, wc, lookup_info);
2c47e605
YZ
6319 if (ret > 0)
6320 break;
6321
6322 if (level == 0)
6323 break;
6324
7a7965f8
YZ
6325 if (path->slots[level] >=
6326 btrfs_header_nritems(path->nodes[level]))
6327 break;
6328
94fcca9f 6329 ret = do_walk_down(trans, root, path, wc, &lookup_info);
1c4850e2
YZ
6330 if (ret > 0) {
6331 path->slots[level]++;
6332 continue;
90d2c51d
MX
6333 } else if (ret < 0)
6334 return ret;
1c4850e2 6335 level = wc->level;
f82d02d9 6336 }
f82d02d9
YZ
6337 return 0;
6338}
6339
d397712b 6340static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
98ed5174 6341 struct btrfs_root *root,
f82d02d9 6342 struct btrfs_path *path,
2c47e605 6343 struct walk_control *wc, int max_level)
20524f02 6344{
2c47e605 6345 int level = wc->level;
20524f02 6346 int ret;
9f3a7427 6347
2c47e605
YZ
6348 path->slots[level] = btrfs_header_nritems(path->nodes[level]);
6349 while (level < max_level && path->nodes[level]) {
6350 wc->level = level;
6351 if (path->slots[level] + 1 <
6352 btrfs_header_nritems(path->nodes[level])) {
6353 path->slots[level]++;
20524f02
CM
6354 return 0;
6355 } else {
2c47e605
YZ
6356 ret = walk_up_proc(trans, root, path, wc);
6357 if (ret > 0)
6358 return 0;
bd56b302 6359
2c47e605
YZ
6360 if (path->locks[level]) {
6361 btrfs_tree_unlock(path->nodes[level]);
6362 path->locks[level] = 0;
f82d02d9 6363 }
2c47e605
YZ
6364 free_extent_buffer(path->nodes[level]);
6365 path->nodes[level] = NULL;
6366 level++;
20524f02
CM
6367 }
6368 }
6369 return 1;
6370}
6371
9aca1d51 6372/*
2c47e605
YZ
6373 * drop a subvolume tree.
6374 *
6375 * this function traverses the tree freeing any blocks that only
6376 * referenced by the tree.
6377 *
6378 * when a shared tree block is found. this function decreases its
6379 * reference count by one. if update_ref is true, this function
6380 * also make sure backrefs for the shared block and all lower level
6381 * blocks are properly updated.
9aca1d51 6382 */
3fd0a558
YZ
6383int btrfs_drop_snapshot(struct btrfs_root *root,
6384 struct btrfs_block_rsv *block_rsv, int update_ref)
20524f02 6385{
5caf2a00 6386 struct btrfs_path *path;
2c47e605
YZ
6387 struct btrfs_trans_handle *trans;
6388 struct btrfs_root *tree_root = root->fs_info->tree_root;
9f3a7427 6389 struct btrfs_root_item *root_item = &root->root_item;
2c47e605
YZ
6390 struct walk_control *wc;
6391 struct btrfs_key key;
6392 int err = 0;
6393 int ret;
6394 int level;
20524f02 6395
5caf2a00
CM
6396 path = btrfs_alloc_path();
6397 BUG_ON(!path);
20524f02 6398
2c47e605
YZ
6399 wc = kzalloc(sizeof(*wc), GFP_NOFS);
6400 BUG_ON(!wc);
6401
a22285a6 6402 trans = btrfs_start_transaction(tree_root, 0);
98d5dc13
TI
6403 BUG_ON(IS_ERR(trans));
6404
3fd0a558
YZ
6405 if (block_rsv)
6406 trans->block_rsv = block_rsv;
2c47e605 6407
9f3a7427 6408 if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
2c47e605 6409 level = btrfs_header_level(root->node);
5d4f98a2
YZ
6410 path->nodes[level] = btrfs_lock_root_node(root);
6411 btrfs_set_lock_blocking(path->nodes[level]);
9f3a7427 6412 path->slots[level] = 0;
5d4f98a2 6413 path->locks[level] = 1;
2c47e605
YZ
6414 memset(&wc->update_progress, 0,
6415 sizeof(wc->update_progress));
9f3a7427 6416 } else {
9f3a7427 6417 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
2c47e605
YZ
6418 memcpy(&wc->update_progress, &key,
6419 sizeof(wc->update_progress));
6420
6702ed49 6421 level = root_item->drop_level;
2c47e605 6422 BUG_ON(level == 0);
6702ed49 6423 path->lowest_level = level;
2c47e605
YZ
6424 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
6425 path->lowest_level = 0;
6426 if (ret < 0) {
6427 err = ret;
9f3a7427
CM
6428 goto out;
6429 }
1c4850e2 6430 WARN_ON(ret > 0);
2c47e605 6431
7d9eb12c
CM
6432 /*
6433 * unlock our path, this is safe because only this
6434 * function is allowed to delete this snapshot
6435 */
5d4f98a2 6436 btrfs_unlock_up_safe(path, 0);
2c47e605
YZ
6437
6438 level = btrfs_header_level(root->node);
6439 while (1) {
6440 btrfs_tree_lock(path->nodes[level]);
6441 btrfs_set_lock_blocking(path->nodes[level]);
6442
6443 ret = btrfs_lookup_extent_info(trans, root,
6444 path->nodes[level]->start,
6445 path->nodes[level]->len,
6446 &wc->refs[level],
6447 &wc->flags[level]);
6448 BUG_ON(ret);
6449 BUG_ON(wc->refs[level] == 0);
6450
6451 if (level == root_item->drop_level)
6452 break;
6453
6454 btrfs_tree_unlock(path->nodes[level]);
6455 WARN_ON(wc->refs[level] != 1);
6456 level--;
6457 }
9f3a7427 6458 }
2c47e605
YZ
6459
6460 wc->level = level;
6461 wc->shared_level = -1;
6462 wc->stage = DROP_REFERENCE;
6463 wc->update_ref = update_ref;
6464 wc->keep_locks = 0;
1c4850e2 6465 wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
2c47e605 6466
d397712b 6467 while (1) {
2c47e605
YZ
6468 ret = walk_down_tree(trans, root, path, wc);
6469 if (ret < 0) {
6470 err = ret;
20524f02 6471 break;
2c47e605 6472 }
9aca1d51 6473
2c47e605
YZ
6474 ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
6475 if (ret < 0) {
6476 err = ret;
20524f02 6477 break;
2c47e605
YZ
6478 }
6479
6480 if (ret > 0) {
6481 BUG_ON(wc->stage != DROP_REFERENCE);
e7a84565
CM
6482 break;
6483 }
2c47e605
YZ
6484
6485 if (wc->stage == DROP_REFERENCE) {
6486 level = wc->level;
6487 btrfs_node_key(path->nodes[level],
6488 &root_item->drop_progress,
6489 path->slots[level]);
6490 root_item->drop_level = level;
6491 }
6492
6493 BUG_ON(wc->level == 0);
3fd0a558 6494 if (btrfs_should_end_transaction(trans, tree_root)) {
2c47e605
YZ
6495 ret = btrfs_update_root(trans, tree_root,
6496 &root->root_key,
6497 root_item);
6498 BUG_ON(ret);
6499
3fd0a558 6500 btrfs_end_transaction_throttle(trans, tree_root);
a22285a6 6501 trans = btrfs_start_transaction(tree_root, 0);
98d5dc13 6502 BUG_ON(IS_ERR(trans));
3fd0a558
YZ
6503 if (block_rsv)
6504 trans->block_rsv = block_rsv;
c3e69d58 6505 }
20524f02 6506 }
2c47e605
YZ
6507 btrfs_release_path(root, path);
6508 BUG_ON(err);
6509
6510 ret = btrfs_del_root(trans, tree_root, &root->root_key);
6511 BUG_ON(ret);
6512
76dda93c
YZ
6513 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
6514 ret = btrfs_find_last_root(tree_root, root->root_key.objectid,
6515 NULL, NULL);
6516 BUG_ON(ret < 0);
6517 if (ret > 0) {
84cd948c
JB
6518 /* if we fail to delete the orphan item this time
6519 * around, it'll get picked up the next time.
6520 *
6521 * The most common failure here is just -ENOENT.
6522 */
6523 btrfs_del_orphan_item(trans, tree_root,
6524 root->root_key.objectid);
76dda93c
YZ
6525 }
6526 }
6527
6528 if (root->in_radix) {
6529 btrfs_free_fs_root(tree_root->fs_info, root);
6530 } else {
6531 free_extent_buffer(root->node);
6532 free_extent_buffer(root->commit_root);
6533 kfree(root);
6534 }
9f3a7427 6535out:
3fd0a558 6536 btrfs_end_transaction_throttle(trans, tree_root);
2c47e605 6537 kfree(wc);
5caf2a00 6538 btrfs_free_path(path);
2c47e605 6539 return err;
20524f02 6540}
9078a3e1 6541
2c47e605
YZ
6542/*
6543 * drop subtree rooted at tree block 'node'.
6544 *
6545 * NOTE: this function will unlock and release tree block 'node'
6546 */
f82d02d9
YZ
6547int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
6548 struct btrfs_root *root,
6549 struct extent_buffer *node,
6550 struct extent_buffer *parent)
6551{
6552 struct btrfs_path *path;
2c47e605 6553 struct walk_control *wc;
f82d02d9
YZ
6554 int level;
6555 int parent_level;
6556 int ret = 0;
6557 int wret;
6558
2c47e605
YZ
6559 BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
6560
f82d02d9 6561 path = btrfs_alloc_path();
db5b493a
TI
6562 if (!path)
6563 return -ENOMEM;
f82d02d9 6564
2c47e605 6565 wc = kzalloc(sizeof(*wc), GFP_NOFS);
db5b493a
TI
6566 if (!wc) {
6567 btrfs_free_path(path);
6568 return -ENOMEM;
6569 }
2c47e605 6570
b9447ef8 6571 btrfs_assert_tree_locked(parent);
f82d02d9
YZ
6572 parent_level = btrfs_header_level(parent);
6573 extent_buffer_get(parent);
6574 path->nodes[parent_level] = parent;
6575 path->slots[parent_level] = btrfs_header_nritems(parent);
6576
b9447ef8 6577 btrfs_assert_tree_locked(node);
f82d02d9 6578 level = btrfs_header_level(node);
f82d02d9
YZ
6579 path->nodes[level] = node;
6580 path->slots[level] = 0;
2c47e605
YZ
6581 path->locks[level] = 1;
6582
6583 wc->refs[parent_level] = 1;
6584 wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
6585 wc->level = level;
6586 wc->shared_level = -1;
6587 wc->stage = DROP_REFERENCE;
6588 wc->update_ref = 0;
6589 wc->keep_locks = 1;
1c4850e2 6590 wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
f82d02d9
YZ
6591
6592 while (1) {
2c47e605
YZ
6593 wret = walk_down_tree(trans, root, path, wc);
6594 if (wret < 0) {
f82d02d9 6595 ret = wret;
f82d02d9 6596 break;
2c47e605 6597 }
f82d02d9 6598
2c47e605 6599 wret = walk_up_tree(trans, root, path, wc, parent_level);
f82d02d9
YZ
6600 if (wret < 0)
6601 ret = wret;
6602 if (wret != 0)
6603 break;
6604 }
6605
2c47e605 6606 kfree(wc);
f82d02d9
YZ
6607 btrfs_free_path(path);
6608 return ret;
6609}
6610
5d4f98a2 6611#if 0
8e7bf94f
CM
6612static unsigned long calc_ra(unsigned long start, unsigned long last,
6613 unsigned long nr)
6614{
6615 return min(last, start + nr - 1);
6616}
6617
d397712b 6618static noinline int relocate_inode_pages(struct inode *inode, u64 start,
98ed5174 6619 u64 len)
edbd8d4e
CM
6620{
6621 u64 page_start;
6622 u64 page_end;
1a40e23b 6623 unsigned long first_index;
edbd8d4e 6624 unsigned long last_index;
edbd8d4e
CM
6625 unsigned long i;
6626 struct page *page;
d1310b2e 6627 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
4313b399 6628 struct file_ra_state *ra;
3eaa2885 6629 struct btrfs_ordered_extent *ordered;
1a40e23b
ZY
6630 unsigned int total_read = 0;
6631 unsigned int total_dirty = 0;
6632 int ret = 0;
4313b399
CM
6633
6634 ra = kzalloc(sizeof(*ra), GFP_NOFS);
5df67083
TI
6635 if (!ra)
6636 return -ENOMEM;
edbd8d4e
CM
6637
6638 mutex_lock(&inode->i_mutex);
1a40e23b 6639 first_index = start >> PAGE_CACHE_SHIFT;
edbd8d4e
CM
6640 last_index = (start + len - 1) >> PAGE_CACHE_SHIFT;
6641
1a40e23b
ZY
6642 /* make sure the dirty trick played by the caller work */
6643 ret = invalidate_inode_pages2_range(inode->i_mapping,
6644 first_index, last_index);
6645 if (ret)
6646 goto out_unlock;
8e7bf94f 6647
4313b399 6648 file_ra_state_init(ra, inode->i_mapping);
edbd8d4e 6649
1a40e23b
ZY
6650 for (i = first_index ; i <= last_index; i++) {
6651 if (total_read % ra->ra_pages == 0) {
8e7bf94f 6652 btrfs_force_ra(inode->i_mapping, ra, NULL, i,
1a40e23b 6653 calc_ra(i, last_index, ra->ra_pages));
8e7bf94f
CM
6654 }
6655 total_read++;
3eaa2885
CM
6656again:
6657 if (((u64)i << PAGE_CACHE_SHIFT) > i_size_read(inode))
1a40e23b 6658 BUG_ON(1);
edbd8d4e 6659 page = grab_cache_page(inode->i_mapping, i);
a061fc8d 6660 if (!page) {
1a40e23b 6661 ret = -ENOMEM;
edbd8d4e 6662 goto out_unlock;
a061fc8d 6663 }
edbd8d4e
CM
6664 if (!PageUptodate(page)) {
6665 btrfs_readpage(NULL, page);
6666 lock_page(page);
6667 if (!PageUptodate(page)) {
6668 unlock_page(page);
6669 page_cache_release(page);
1a40e23b 6670 ret = -EIO;
edbd8d4e
CM
6671 goto out_unlock;
6672 }
6673 }
ec44a35c 6674 wait_on_page_writeback(page);
3eaa2885 6675
edbd8d4e
CM
6676 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
6677 page_end = page_start + PAGE_CACHE_SIZE - 1;
d1310b2e 6678 lock_extent(io_tree, page_start, page_end, GFP_NOFS);
edbd8d4e 6679
3eaa2885
CM
6680 ordered = btrfs_lookup_ordered_extent(inode, page_start);
6681 if (ordered) {
6682 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
6683 unlock_page(page);
6684 page_cache_release(page);
6685 btrfs_start_ordered_extent(inode, ordered, 1);
6686 btrfs_put_ordered_extent(ordered);
6687 goto again;
6688 }
6689 set_page_extent_mapped(page);
6690
1a40e23b
ZY
6691 if (i == first_index)
6692 set_extent_bits(io_tree, page_start, page_end,
6693 EXTENT_BOUNDARY, GFP_NOFS);
1f80e4db 6694 btrfs_set_extent_delalloc(inode, page_start, page_end);
1a40e23b 6695
a061fc8d 6696 set_page_dirty(page);
1a40e23b 6697 total_dirty++;
edbd8d4e 6698
d1310b2e 6699 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
edbd8d4e
CM
6700 unlock_page(page);
6701 page_cache_release(page);
6702 }
6703
6704out_unlock:
ec44a35c 6705 kfree(ra);
edbd8d4e 6706 mutex_unlock(&inode->i_mutex);
1a40e23b
ZY
6707 balance_dirty_pages_ratelimited_nr(inode->i_mapping, total_dirty);
6708 return ret;
edbd8d4e
CM
6709}
6710
d397712b 6711static noinline int relocate_data_extent(struct inode *reloc_inode,
1a40e23b
ZY
6712 struct btrfs_key *extent_key,
6713 u64 offset)
6714{
6715 struct btrfs_root *root = BTRFS_I(reloc_inode)->root;
6716 struct extent_map_tree *em_tree = &BTRFS_I(reloc_inode)->extent_tree;
6717 struct extent_map *em;
6643558d
YZ
6718 u64 start = extent_key->objectid - offset;
6719 u64 end = start + extent_key->offset - 1;
bf4ef679 6720
1a40e23b 6721 em = alloc_extent_map(GFP_NOFS);
c26a9203 6722 BUG_ON(!em);
bf4ef679 6723
6643558d 6724 em->start = start;
1a40e23b 6725 em->len = extent_key->offset;
c8b97818 6726 em->block_len = extent_key->offset;
1a40e23b
ZY
6727 em->block_start = extent_key->objectid;
6728 em->bdev = root->fs_info->fs_devices->latest_bdev;
6729 set_bit(EXTENT_FLAG_PINNED, &em->flags);
6730
6731 /* setup extent map to cheat btrfs_readpage */
6643558d 6732 lock_extent(&BTRFS_I(reloc_inode)->io_tree, start, end, GFP_NOFS);
1a40e23b
ZY
6733 while (1) {
6734 int ret;
890871be 6735 write_lock(&em_tree->lock);
1a40e23b 6736 ret = add_extent_mapping(em_tree, em);
890871be 6737 write_unlock(&em_tree->lock);
1a40e23b
ZY
6738 if (ret != -EEXIST) {
6739 free_extent_map(em);
bf4ef679
CM
6740 break;
6741 }
6643558d 6742 btrfs_drop_extent_cache(reloc_inode, start, end, 0);
bf4ef679 6743 }
6643558d 6744 unlock_extent(&BTRFS_I(reloc_inode)->io_tree, start, end, GFP_NOFS);
bf4ef679 6745
6643558d 6746 return relocate_inode_pages(reloc_inode, start, extent_key->offset);
1a40e23b 6747}
edbd8d4e 6748
1a40e23b
ZY
6749struct btrfs_ref_path {
6750 u64 extent_start;
6751 u64 nodes[BTRFS_MAX_LEVEL];
6752 u64 root_objectid;
6753 u64 root_generation;
6754 u64 owner_objectid;
1a40e23b
ZY
6755 u32 num_refs;
6756 int lowest_level;
6757 int current_level;
f82d02d9
YZ
6758 int shared_level;
6759
6760 struct btrfs_key node_keys[BTRFS_MAX_LEVEL];
6761 u64 new_nodes[BTRFS_MAX_LEVEL];
1a40e23b 6762};
7d9eb12c 6763
1a40e23b 6764struct disk_extent {
c8b97818 6765 u64 ram_bytes;
1a40e23b
ZY
6766 u64 disk_bytenr;
6767 u64 disk_num_bytes;
6768 u64 offset;
6769 u64 num_bytes;
c8b97818
CM
6770 u8 compression;
6771 u8 encryption;
6772 u16 other_encoding;
1a40e23b 6773};
4313b399 6774
1a40e23b
ZY
6775static int is_cowonly_root(u64 root_objectid)
6776{
6777 if (root_objectid == BTRFS_ROOT_TREE_OBJECTID ||
6778 root_objectid == BTRFS_EXTENT_TREE_OBJECTID ||
6779 root_objectid == BTRFS_CHUNK_TREE_OBJECTID ||
6780 root_objectid == BTRFS_DEV_TREE_OBJECTID ||
0403e47e
YZ
6781 root_objectid == BTRFS_TREE_LOG_OBJECTID ||
6782 root_objectid == BTRFS_CSUM_TREE_OBJECTID)
1a40e23b
ZY
6783 return 1;
6784 return 0;
6785}
edbd8d4e 6786
d397712b 6787static noinline int __next_ref_path(struct btrfs_trans_handle *trans,
1a40e23b
ZY
6788 struct btrfs_root *extent_root,
6789 struct btrfs_ref_path *ref_path,
6790 int first_time)
6791{
6792 struct extent_buffer *leaf;
6793 struct btrfs_path *path;
6794 struct btrfs_extent_ref *ref;
6795 struct btrfs_key key;
6796 struct btrfs_key found_key;
6797 u64 bytenr;
6798 u32 nritems;
6799 int level;
6800 int ret = 1;
edbd8d4e 6801
1a40e23b
ZY
6802 path = btrfs_alloc_path();
6803 if (!path)
6804 return -ENOMEM;
bf4ef679 6805
1a40e23b
ZY
6806 if (first_time) {
6807 ref_path->lowest_level = -1;
6808 ref_path->current_level = -1;
f82d02d9 6809 ref_path->shared_level = -1;
1a40e23b
ZY
6810 goto walk_up;
6811 }
6812walk_down:
6813 level = ref_path->current_level - 1;
6814 while (level >= -1) {
6815 u64 parent;
6816 if (level < ref_path->lowest_level)
6817 break;
bf4ef679 6818
d397712b 6819 if (level >= 0)
1a40e23b 6820 bytenr = ref_path->nodes[level];
d397712b 6821 else
1a40e23b 6822 bytenr = ref_path->extent_start;
1a40e23b 6823 BUG_ON(bytenr == 0);
bf4ef679 6824
1a40e23b
ZY
6825 parent = ref_path->nodes[level + 1];
6826 ref_path->nodes[level + 1] = 0;
6827 ref_path->current_level = level;
6828 BUG_ON(parent == 0);
0ef3e66b 6829
1a40e23b
ZY
6830 key.objectid = bytenr;
6831 key.offset = parent + 1;
6832 key.type = BTRFS_EXTENT_REF_KEY;
edbd8d4e 6833
1a40e23b
ZY
6834 ret = btrfs_search_slot(trans, extent_root, &key, path, 0, 0);
6835 if (ret < 0)
edbd8d4e 6836 goto out;
1a40e23b 6837 BUG_ON(ret == 0);
7d9eb12c 6838
1a40e23b
ZY
6839 leaf = path->nodes[0];
6840 nritems = btrfs_header_nritems(leaf);
6841 if (path->slots[0] >= nritems) {
6842 ret = btrfs_next_leaf(extent_root, path);
6843 if (ret < 0)
6844 goto out;
6845 if (ret > 0)
6846 goto next;
6847 leaf = path->nodes[0];
6848 }
0ef3e66b 6849
1a40e23b
ZY
6850 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6851 if (found_key.objectid == bytenr &&
f82d02d9
YZ
6852 found_key.type == BTRFS_EXTENT_REF_KEY) {
6853 if (level < ref_path->shared_level)
6854 ref_path->shared_level = level;
1a40e23b 6855 goto found;
f82d02d9 6856 }
1a40e23b
ZY
6857next:
6858 level--;
6859 btrfs_release_path(extent_root, path);
d899e052 6860 cond_resched();
1a40e23b
ZY
6861 }
6862 /* reached lowest level */
6863 ret = 1;
6864 goto out;
6865walk_up:
6866 level = ref_path->current_level;
6867 while (level < BTRFS_MAX_LEVEL - 1) {
6868 u64 ref_objectid;
d397712b
CM
6869
6870 if (level >= 0)
1a40e23b 6871 bytenr = ref_path->nodes[level];
d397712b 6872 else
1a40e23b 6873 bytenr = ref_path->extent_start;
d397712b 6874
1a40e23b 6875 BUG_ON(bytenr == 0);
edbd8d4e 6876
1a40e23b
ZY
6877 key.objectid = bytenr;
6878 key.offset = 0;
6879 key.type = BTRFS_EXTENT_REF_KEY;
edbd8d4e 6880
1a40e23b
ZY
6881 ret = btrfs_search_slot(trans, extent_root, &key, path, 0, 0);
6882 if (ret < 0)
6883 goto out;
edbd8d4e 6884
1a40e23b
ZY
6885 leaf = path->nodes[0];
6886 nritems = btrfs_header_nritems(leaf);
6887 if (path->slots[0] >= nritems) {
6888 ret = btrfs_next_leaf(extent_root, path);
6889 if (ret < 0)
6890 goto out;
6891 if (ret > 0) {
6892 /* the extent was freed by someone */
6893 if (ref_path->lowest_level == level)
6894 goto out;
6895 btrfs_release_path(extent_root, path);
6896 goto walk_down;
6897 }
6898 leaf = path->nodes[0];
6899 }
edbd8d4e 6900
1a40e23b
ZY
6901 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6902 if (found_key.objectid != bytenr ||
6903 found_key.type != BTRFS_EXTENT_REF_KEY) {
6904 /* the extent was freed by someone */
6905 if (ref_path->lowest_level == level) {
6906 ret = 1;
6907 goto out;
6908 }
6909 btrfs_release_path(extent_root, path);
6910 goto walk_down;
6911 }
6912found:
6913 ref = btrfs_item_ptr(leaf, path->slots[0],
6914 struct btrfs_extent_ref);
6915 ref_objectid = btrfs_ref_objectid(leaf, ref);
6916 if (ref_objectid < BTRFS_FIRST_FREE_OBJECTID) {
6917 if (first_time) {
6918 level = (int)ref_objectid;
6919 BUG_ON(level >= BTRFS_MAX_LEVEL);
6920 ref_path->lowest_level = level;
6921 ref_path->current_level = level;
6922 ref_path->nodes[level] = bytenr;
6923 } else {
6924 WARN_ON(ref_objectid != level);
6925 }
6926 } else {
6927 WARN_ON(level != -1);
6928 }
6929 first_time = 0;
bf4ef679 6930
1a40e23b
ZY
6931 if (ref_path->lowest_level == level) {
6932 ref_path->owner_objectid = ref_objectid;
1a40e23b
ZY
6933 ref_path->num_refs = btrfs_ref_num_refs(leaf, ref);
6934 }
bf4ef679 6935
7d9eb12c 6936 /*
1a40e23b
ZY
6937 * the block is tree root or the block isn't in reference
6938 * counted tree.
7d9eb12c 6939 */
1a40e23b
ZY
6940 if (found_key.objectid == found_key.offset ||
6941 is_cowonly_root(btrfs_ref_root(leaf, ref))) {
6942 ref_path->root_objectid = btrfs_ref_root(leaf, ref);
6943 ref_path->root_generation =
6944 btrfs_ref_generation(leaf, ref);
6945 if (level < 0) {
6946 /* special reference from the tree log */
6947 ref_path->nodes[0] = found_key.offset;
6948 ref_path->current_level = 0;
6949 }
6950 ret = 0;
6951 goto out;
6952 }
7d9eb12c 6953
1a40e23b
ZY
6954 level++;
6955 BUG_ON(ref_path->nodes[level] != 0);
6956 ref_path->nodes[level] = found_key.offset;
6957 ref_path->current_level = level;
bf4ef679 6958
1a40e23b
ZY
6959 /*
6960 * the reference was created in the running transaction,
6961 * no need to continue walking up.
6962 */
6963 if (btrfs_ref_generation(leaf, ref) == trans->transid) {
6964 ref_path->root_objectid = btrfs_ref_root(leaf, ref);
6965 ref_path->root_generation =
6966 btrfs_ref_generation(leaf, ref);
6967 ret = 0;
6968 goto out;
7d9eb12c
CM
6969 }
6970
1a40e23b 6971 btrfs_release_path(extent_root, path);
d899e052 6972 cond_resched();
7d9eb12c 6973 }
1a40e23b
ZY
6974 /* reached max tree level, but no tree root found. */
6975 BUG();
edbd8d4e 6976out:
1a40e23b
ZY
6977 btrfs_free_path(path);
6978 return ret;
edbd8d4e
CM
6979}
6980
1a40e23b
ZY
6981static int btrfs_first_ref_path(struct btrfs_trans_handle *trans,
6982 struct btrfs_root *extent_root,
6983 struct btrfs_ref_path *ref_path,
6984 u64 extent_start)
a061fc8d 6985{
1a40e23b
ZY
6986 memset(ref_path, 0, sizeof(*ref_path));
6987 ref_path->extent_start = extent_start;
a061fc8d 6988
1a40e23b 6989 return __next_ref_path(trans, extent_root, ref_path, 1);
a061fc8d
CM
6990}
6991
1a40e23b
ZY
6992static int btrfs_next_ref_path(struct btrfs_trans_handle *trans,
6993 struct btrfs_root *extent_root,
6994 struct btrfs_ref_path *ref_path)
edbd8d4e 6995{
1a40e23b
ZY
6996 return __next_ref_path(trans, extent_root, ref_path, 0);
6997}
6998
d397712b 6999static noinline int get_new_locations(struct inode *reloc_inode,
1a40e23b
ZY
7000 struct btrfs_key *extent_key,
7001 u64 offset, int no_fragment,
7002 struct disk_extent **extents,
7003 int *nr_extents)
7004{
7005 struct btrfs_root *root = BTRFS_I(reloc_inode)->root;
7006 struct btrfs_path *path;
7007 struct btrfs_file_extent_item *fi;
edbd8d4e 7008 struct extent_buffer *leaf;
1a40e23b
ZY
7009 struct disk_extent *exts = *extents;
7010 struct btrfs_key found_key;
7011 u64 cur_pos;
7012 u64 last_byte;
edbd8d4e 7013 u32 nritems;
1a40e23b
ZY
7014 int nr = 0;
7015 int max = *nr_extents;
7016 int ret;
edbd8d4e 7017
1a40e23b
ZY
7018 WARN_ON(!no_fragment && *extents);
7019 if (!exts) {
7020 max = 1;
7021 exts = kmalloc(sizeof(*exts) * max, GFP_NOFS);
7022 if (!exts)
7023 return -ENOMEM;
a061fc8d 7024 }
edbd8d4e 7025
1a40e23b 7026 path = btrfs_alloc_path();
db5b493a
TI
7027 if (!path) {
7028 if (exts != *extents)
7029 kfree(exts);
7030 return -ENOMEM;
7031 }
edbd8d4e 7032
1a40e23b
ZY
7033 cur_pos = extent_key->objectid - offset;
7034 last_byte = extent_key->objectid + extent_key->offset;
7035 ret = btrfs_lookup_file_extent(NULL, root, path, reloc_inode->i_ino,
7036 cur_pos, 0);
7037 if (ret < 0)
7038 goto out;
7039 if (ret > 0) {
7040 ret = -ENOENT;
7041 goto out;
7042 }
edbd8d4e 7043
1a40e23b 7044 while (1) {
edbd8d4e
CM
7045 leaf = path->nodes[0];
7046 nritems = btrfs_header_nritems(leaf);
1a40e23b
ZY
7047 if (path->slots[0] >= nritems) {
7048 ret = btrfs_next_leaf(root, path);
a061fc8d
CM
7049 if (ret < 0)
7050 goto out;
1a40e23b
ZY
7051 if (ret > 0)
7052 break;
bf4ef679 7053 leaf = path->nodes[0];
a061fc8d 7054 }
edbd8d4e
CM
7055
7056 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1a40e23b
ZY
7057 if (found_key.offset != cur_pos ||
7058 found_key.type != BTRFS_EXTENT_DATA_KEY ||
7059 found_key.objectid != reloc_inode->i_ino)
edbd8d4e
CM
7060 break;
7061
1a40e23b
ZY
7062 fi = btrfs_item_ptr(leaf, path->slots[0],
7063 struct btrfs_file_extent_item);
7064 if (btrfs_file_extent_type(leaf, fi) !=
7065 BTRFS_FILE_EXTENT_REG ||
7066 btrfs_file_extent_disk_bytenr(leaf, fi) == 0)
edbd8d4e 7067 break;
1a40e23b
ZY
7068
7069 if (nr == max) {
7070 struct disk_extent *old = exts;
7071 max *= 2;
7072 exts = kzalloc(sizeof(*exts) * max, GFP_NOFS);
dac97e51
YS
7073 if (!exts) {
7074 ret = -ENOMEM;
7075 goto out;
7076 }
1a40e23b
ZY
7077 memcpy(exts, old, sizeof(*exts) * nr);
7078 if (old != *extents)
7079 kfree(old);
a061fc8d 7080 }
edbd8d4e 7081
1a40e23b
ZY
7082 exts[nr].disk_bytenr =
7083 btrfs_file_extent_disk_bytenr(leaf, fi);
7084 exts[nr].disk_num_bytes =
7085 btrfs_file_extent_disk_num_bytes(leaf, fi);
7086 exts[nr].offset = btrfs_file_extent_offset(leaf, fi);
7087 exts[nr].num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
c8b97818
CM
7088 exts[nr].ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
7089 exts[nr].compression = btrfs_file_extent_compression(leaf, fi);
7090 exts[nr].encryption = btrfs_file_extent_encryption(leaf, fi);
7091 exts[nr].other_encoding = btrfs_file_extent_other_encoding(leaf,
7092 fi);
d899e052
YZ
7093 BUG_ON(exts[nr].offset > 0);
7094 BUG_ON(exts[nr].compression || exts[nr].encryption);
7095 BUG_ON(exts[nr].num_bytes != exts[nr].disk_num_bytes);
edbd8d4e 7096
1a40e23b
ZY
7097 cur_pos += exts[nr].num_bytes;
7098 nr++;
7099
7100 if (cur_pos + offset >= last_byte)
7101 break;
7102
7103 if (no_fragment) {
7104 ret = 1;
edbd8d4e 7105 goto out;
1a40e23b
ZY
7106 }
7107 path->slots[0]++;
7108 }
7109
1f80e4db 7110 BUG_ON(cur_pos + offset > last_byte);
1a40e23b
ZY
7111 if (cur_pos + offset < last_byte) {
7112 ret = -ENOENT;
7113 goto out;
edbd8d4e
CM
7114 }
7115 ret = 0;
7116out:
1a40e23b
ZY
7117 btrfs_free_path(path);
7118 if (ret) {
7119 if (exts != *extents)
7120 kfree(exts);
7121 } else {
7122 *extents = exts;
7123 *nr_extents = nr;
7124 }
7125 return ret;
7126}
7127
d397712b 7128static noinline int replace_one_extent(struct btrfs_trans_handle *trans,
1a40e23b
ZY
7129 struct btrfs_root *root,
7130 struct btrfs_path *path,
7131 struct btrfs_key *extent_key,
7132 struct btrfs_key *leaf_key,
7133 struct btrfs_ref_path *ref_path,
7134 struct disk_extent *new_extents,
7135 int nr_extents)
7136{
7137 struct extent_buffer *leaf;
7138 struct btrfs_file_extent_item *fi;
7139 struct inode *inode = NULL;
7140 struct btrfs_key key;
7141 u64 lock_start = 0;
7142 u64 lock_end = 0;
7143 u64 num_bytes;
7144 u64 ext_offset;
86288a19 7145 u64 search_end = (u64)-1;
1a40e23b 7146 u32 nritems;
3bb1a1bc 7147 int nr_scaned = 0;
1a40e23b 7148 int extent_locked = 0;
d899e052 7149 int extent_type;
1a40e23b
ZY
7150 int ret;
7151
3bb1a1bc 7152 memcpy(&key, leaf_key, sizeof(key));
1a40e23b 7153 if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS) {
3bb1a1bc
YZ
7154 if (key.objectid < ref_path->owner_objectid ||
7155 (key.objectid == ref_path->owner_objectid &&
7156 key.type < BTRFS_EXTENT_DATA_KEY)) {
7157 key.objectid = ref_path->owner_objectid;
7158 key.type = BTRFS_EXTENT_DATA_KEY;
7159 key.offset = 0;
7160 }
1a40e23b
ZY
7161 }
7162
7163 while (1) {
7164 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
7165 if (ret < 0)
7166 goto out;
7167
7168 leaf = path->nodes[0];
7169 nritems = btrfs_header_nritems(leaf);
7170next:
7171 if (extent_locked && ret > 0) {
7172 /*
7173 * the file extent item was modified by someone
7174 * before the extent got locked.
7175 */
1a40e23b
ZY
7176 unlock_extent(&BTRFS_I(inode)->io_tree, lock_start,
7177 lock_end, GFP_NOFS);
7178 extent_locked = 0;
7179 }
7180
7181 if (path->slots[0] >= nritems) {
3bb1a1bc 7182 if (++nr_scaned > 2)
1a40e23b
ZY
7183 break;
7184
7185 BUG_ON(extent_locked);
7186 ret = btrfs_next_leaf(root, path);
7187 if (ret < 0)
7188 goto out;
7189 if (ret > 0)
7190 break;
7191 leaf = path->nodes[0];
7192 nritems = btrfs_header_nritems(leaf);
7193 }
7194
7195 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
7196
7197 if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS) {
7198 if ((key.objectid > ref_path->owner_objectid) ||
7199 (key.objectid == ref_path->owner_objectid &&
7200 key.type > BTRFS_EXTENT_DATA_KEY) ||
86288a19 7201 key.offset >= search_end)
1a40e23b
ZY
7202 break;
7203 }
7204
7205 if (inode && key.objectid != inode->i_ino) {
7206 BUG_ON(extent_locked);
7207 btrfs_release_path(root, path);
7208 mutex_unlock(&inode->i_mutex);
7209 iput(inode);
7210 inode = NULL;
7211 continue;
7212 }
7213
7214 if (key.type != BTRFS_EXTENT_DATA_KEY) {
7215 path->slots[0]++;
7216 ret = 1;
7217 goto next;
7218 }
7219 fi = btrfs_item_ptr(leaf, path->slots[0],
7220 struct btrfs_file_extent_item);
d899e052
YZ
7221 extent_type = btrfs_file_extent_type(leaf, fi);
7222 if ((extent_type != BTRFS_FILE_EXTENT_REG &&
7223 extent_type != BTRFS_FILE_EXTENT_PREALLOC) ||
1a40e23b
ZY
7224 (btrfs_file_extent_disk_bytenr(leaf, fi) !=
7225 extent_key->objectid)) {
7226 path->slots[0]++;
7227 ret = 1;
7228 goto next;
7229 }
7230
7231 num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
7232 ext_offset = btrfs_file_extent_offset(leaf, fi);
7233
86288a19
YZ
7234 if (search_end == (u64)-1) {
7235 search_end = key.offset - ext_offset +
7236 btrfs_file_extent_ram_bytes(leaf, fi);
7237 }
1a40e23b
ZY
7238
7239 if (!extent_locked) {
7240 lock_start = key.offset;
7241 lock_end = lock_start + num_bytes - 1;
7242 } else {
6643558d
YZ
7243 if (lock_start > key.offset ||
7244 lock_end + 1 < key.offset + num_bytes) {
7245 unlock_extent(&BTRFS_I(inode)->io_tree,
7246 lock_start, lock_end, GFP_NOFS);
7247 extent_locked = 0;
7248 }
1a40e23b
ZY
7249 }
7250
7251 if (!inode) {
7252 btrfs_release_path(root, path);
7253
7254 inode = btrfs_iget_locked(root->fs_info->sb,
7255 key.objectid, root);
7256 if (inode->i_state & I_NEW) {
7257 BTRFS_I(inode)->root = root;
7258 BTRFS_I(inode)->location.objectid =
7259 key.objectid;
7260 BTRFS_I(inode)->location.type =
7261 BTRFS_INODE_ITEM_KEY;
7262 BTRFS_I(inode)->location.offset = 0;
7263 btrfs_read_locked_inode(inode);
7264 unlock_new_inode(inode);
7265 }
7266 /*
7267 * some code call btrfs_commit_transaction while
7268 * holding the i_mutex, so we can't use mutex_lock
7269 * here.
7270 */
7271 if (is_bad_inode(inode) ||
7272 !mutex_trylock(&inode->i_mutex)) {
7273 iput(inode);
7274 inode = NULL;
7275 key.offset = (u64)-1;
7276 goto skip;
7277 }
7278 }
7279
7280 if (!extent_locked) {
7281 struct btrfs_ordered_extent *ordered;
7282
7283 btrfs_release_path(root, path);
7284
7285 lock_extent(&BTRFS_I(inode)->io_tree, lock_start,
7286 lock_end, GFP_NOFS);
7287 ordered = btrfs_lookup_first_ordered_extent(inode,
7288 lock_end);
7289 if (ordered &&
7290 ordered->file_offset <= lock_end &&
7291 ordered->file_offset + ordered->len > lock_start) {
7292 unlock_extent(&BTRFS_I(inode)->io_tree,
7293 lock_start, lock_end, GFP_NOFS);
7294 btrfs_start_ordered_extent(inode, ordered, 1);
7295 btrfs_put_ordered_extent(ordered);
7296 key.offset += num_bytes;
7297 goto skip;
7298 }
7299 if (ordered)
7300 btrfs_put_ordered_extent(ordered);
7301
1a40e23b
ZY
7302 extent_locked = 1;
7303 continue;
7304 }
7305
7306 if (nr_extents == 1) {
7307 /* update extent pointer in place */
1a40e23b
ZY
7308 btrfs_set_file_extent_disk_bytenr(leaf, fi,
7309 new_extents[0].disk_bytenr);
7310 btrfs_set_file_extent_disk_num_bytes(leaf, fi,
7311 new_extents[0].disk_num_bytes);
1a40e23b
ZY
7312 btrfs_mark_buffer_dirty(leaf);
7313
7314 btrfs_drop_extent_cache(inode, key.offset,
7315 key.offset + num_bytes - 1, 0);
7316
7317 ret = btrfs_inc_extent_ref(trans, root,
7318 new_extents[0].disk_bytenr,
7319 new_extents[0].disk_num_bytes,
7320 leaf->start,
7321 root->root_key.objectid,
7322 trans->transid,
3bb1a1bc 7323 key.objectid);
1a40e23b
ZY
7324 BUG_ON(ret);
7325
7326 ret = btrfs_free_extent(trans, root,
7327 extent_key->objectid,
7328 extent_key->offset,
7329 leaf->start,
7330 btrfs_header_owner(leaf),
7331 btrfs_header_generation(leaf),
3bb1a1bc 7332 key.objectid, 0);
1a40e23b
ZY
7333 BUG_ON(ret);
7334
7335 btrfs_release_path(root, path);
7336 key.offset += num_bytes;
7337 } else {
d899e052
YZ
7338 BUG_ON(1);
7339#if 0
1a40e23b
ZY
7340 u64 alloc_hint;
7341 u64 extent_len;
7342 int i;
7343 /*
7344 * drop old extent pointer at first, then insert the
7345 * new pointers one bye one
7346 */
7347 btrfs_release_path(root, path);
7348 ret = btrfs_drop_extents(trans, root, inode, key.offset,
7349 key.offset + num_bytes,
7350 key.offset, &alloc_hint);
7351 BUG_ON(ret);
7352
7353 for (i = 0; i < nr_extents; i++) {
7354 if (ext_offset >= new_extents[i].num_bytes) {
7355 ext_offset -= new_extents[i].num_bytes;
7356 continue;
7357 }
7358 extent_len = min(new_extents[i].num_bytes -
7359 ext_offset, num_bytes);
7360
7361 ret = btrfs_insert_empty_item(trans, root,
7362 path, &key,
7363 sizeof(*fi));
7364 BUG_ON(ret);
7365
7366 leaf = path->nodes[0];
7367 fi = btrfs_item_ptr(leaf, path->slots[0],
7368 struct btrfs_file_extent_item);
7369 btrfs_set_file_extent_generation(leaf, fi,
7370 trans->transid);
7371 btrfs_set_file_extent_type(leaf, fi,
7372 BTRFS_FILE_EXTENT_REG);
7373 btrfs_set_file_extent_disk_bytenr(leaf, fi,
7374 new_extents[i].disk_bytenr);
7375 btrfs_set_file_extent_disk_num_bytes(leaf, fi,
7376 new_extents[i].disk_num_bytes);
c8b97818
CM
7377 btrfs_set_file_extent_ram_bytes(leaf, fi,
7378 new_extents[i].ram_bytes);
7379
7380 btrfs_set_file_extent_compression(leaf, fi,
7381 new_extents[i].compression);
7382 btrfs_set_file_extent_encryption(leaf, fi,
7383 new_extents[i].encryption);
7384 btrfs_set_file_extent_other_encoding(leaf, fi,
7385 new_extents[i].other_encoding);
7386
1a40e23b
ZY
7387 btrfs_set_file_extent_num_bytes(leaf, fi,
7388 extent_len);
7389 ext_offset += new_extents[i].offset;
7390 btrfs_set_file_extent_offset(leaf, fi,
7391 ext_offset);
7392 btrfs_mark_buffer_dirty(leaf);
7393
7394 btrfs_drop_extent_cache(inode, key.offset,
7395 key.offset + extent_len - 1, 0);
7396
7397 ret = btrfs_inc_extent_ref(trans, root,
7398 new_extents[i].disk_bytenr,
7399 new_extents[i].disk_num_bytes,
7400 leaf->start,
7401 root->root_key.objectid,
3bb1a1bc 7402 trans->transid, key.objectid);
1a40e23b
ZY
7403 BUG_ON(ret);
7404 btrfs_release_path(root, path);
7405
a76a3cd4 7406 inode_add_bytes(inode, extent_len);
1a40e23b
ZY
7407
7408 ext_offset = 0;
7409 num_bytes -= extent_len;
7410 key.offset += extent_len;
7411
7412 if (num_bytes == 0)
7413 break;
7414 }
7415 BUG_ON(i >= nr_extents);
d899e052 7416#endif
1a40e23b
ZY
7417 }
7418
7419 if (extent_locked) {
1a40e23b
ZY
7420 unlock_extent(&BTRFS_I(inode)->io_tree, lock_start,
7421 lock_end, GFP_NOFS);
7422 extent_locked = 0;
7423 }
7424skip:
7425 if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS &&
86288a19 7426 key.offset >= search_end)
1a40e23b
ZY
7427 break;
7428
7429 cond_resched();
7430 }
7431 ret = 0;
7432out:
7433 btrfs_release_path(root, path);
7434 if (inode) {
7435 mutex_unlock(&inode->i_mutex);
7436 if (extent_locked) {
1a40e23b
ZY
7437 unlock_extent(&BTRFS_I(inode)->io_tree, lock_start,
7438 lock_end, GFP_NOFS);
7439 }
7440 iput(inode);
7441 }
7442 return ret;
7443}
7444
1a40e23b
ZY
7445int btrfs_reloc_tree_cache_ref(struct btrfs_trans_handle *trans,
7446 struct btrfs_root *root,
7447 struct extent_buffer *buf, u64 orig_start)
7448{
7449 int level;
7450 int ret;
7451
7452 BUG_ON(btrfs_header_generation(buf) != trans->transid);
7453 BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
7454
7455 level = btrfs_header_level(buf);
7456 if (level == 0) {
7457 struct btrfs_leaf_ref *ref;
7458 struct btrfs_leaf_ref *orig_ref;
7459
7460 orig_ref = btrfs_lookup_leaf_ref(root, orig_start);
7461 if (!orig_ref)
7462 return -ENOENT;
7463
7464 ref = btrfs_alloc_leaf_ref(root, orig_ref->nritems);
7465 if (!ref) {
7466 btrfs_free_leaf_ref(root, orig_ref);
7467 return -ENOMEM;
7468 }
7469
7470 ref->nritems = orig_ref->nritems;
7471 memcpy(ref->extents, orig_ref->extents,
7472 sizeof(ref->extents[0]) * ref->nritems);
7473
7474 btrfs_free_leaf_ref(root, orig_ref);
7475
7476 ref->root_gen = trans->transid;
7477 ref->bytenr = buf->start;
7478 ref->owner = btrfs_header_owner(buf);
7479 ref->generation = btrfs_header_generation(buf);
bd56b302 7480
1a40e23b
ZY
7481 ret = btrfs_add_leaf_ref(root, ref, 0);
7482 WARN_ON(ret);
7483 btrfs_free_leaf_ref(root, ref);
7484 }
7485 return 0;
7486}
7487
d397712b 7488static noinline int invalidate_extent_cache(struct btrfs_root *root,
1a40e23b
ZY
7489 struct extent_buffer *leaf,
7490 struct btrfs_block_group_cache *group,
7491 struct btrfs_root *target_root)
7492{
7493 struct btrfs_key key;
7494 struct inode *inode = NULL;
7495 struct btrfs_file_extent_item *fi;
2ac55d41 7496 struct extent_state *cached_state = NULL;
1a40e23b
ZY
7497 u64 num_bytes;
7498 u64 skip_objectid = 0;
7499 u32 nritems;
7500 u32 i;
7501
7502 nritems = btrfs_header_nritems(leaf);
7503 for (i = 0; i < nritems; i++) {
7504 btrfs_item_key_to_cpu(leaf, &key, i);
7505 if (key.objectid == skip_objectid ||
7506 key.type != BTRFS_EXTENT_DATA_KEY)
7507 continue;
7508 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
7509 if (btrfs_file_extent_type(leaf, fi) ==
7510 BTRFS_FILE_EXTENT_INLINE)
7511 continue;
7512 if (btrfs_file_extent_disk_bytenr(leaf, fi) == 0)
7513 continue;
7514 if (!inode || inode->i_ino != key.objectid) {
7515 iput(inode);
7516 inode = btrfs_ilookup(target_root->fs_info->sb,
7517 key.objectid, target_root, 1);
7518 }
7519 if (!inode) {
7520 skip_objectid = key.objectid;
7521 continue;
7522 }
7523 num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
7524
2ac55d41
JB
7525 lock_extent_bits(&BTRFS_I(inode)->io_tree, key.offset,
7526 key.offset + num_bytes - 1, 0, &cached_state,
7527 GFP_NOFS);
1a40e23b
ZY
7528 btrfs_drop_extent_cache(inode, key.offset,
7529 key.offset + num_bytes - 1, 1);
2ac55d41
JB
7530 unlock_extent_cached(&BTRFS_I(inode)->io_tree, key.offset,
7531 key.offset + num_bytes - 1, &cached_state,
7532 GFP_NOFS);
1a40e23b
ZY
7533 cond_resched();
7534 }
7535 iput(inode);
7536 return 0;
7537}
7538
d397712b 7539static noinline int replace_extents_in_leaf(struct btrfs_trans_handle *trans,
1a40e23b
ZY
7540 struct btrfs_root *root,
7541 struct extent_buffer *leaf,
7542 struct btrfs_block_group_cache *group,
7543 struct inode *reloc_inode)
7544{
7545 struct btrfs_key key;
7546 struct btrfs_key extent_key;
7547 struct btrfs_file_extent_item *fi;
7548 struct btrfs_leaf_ref *ref;
7549 struct disk_extent *new_extent;
7550 u64 bytenr;
7551 u64 num_bytes;
7552 u32 nritems;
7553 u32 i;
7554 int ext_index;
7555 int nr_extent;
7556 int ret;
7557
7558 new_extent = kmalloc(sizeof(*new_extent), GFP_NOFS);
db5b493a
TI
7559 if (!new_extent)
7560 return -ENOMEM;
1a40e23b
ZY
7561
7562 ref = btrfs_lookup_leaf_ref(root, leaf->start);
7563 BUG_ON(!ref);
7564
7565 ext_index = -1;
7566 nritems = btrfs_header_nritems(leaf);
7567 for (i = 0; i < nritems; i++) {
7568 btrfs_item_key_to_cpu(leaf, &key, i);
7569 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
7570 continue;
7571 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
7572 if (btrfs_file_extent_type(leaf, fi) ==
7573 BTRFS_FILE_EXTENT_INLINE)
7574 continue;
7575 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
7576 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
7577 if (bytenr == 0)
7578 continue;
7579
7580 ext_index++;
7581 if (bytenr >= group->key.objectid + group->key.offset ||
7582 bytenr + num_bytes <= group->key.objectid)
7583 continue;
7584
7585 extent_key.objectid = bytenr;
7586 extent_key.offset = num_bytes;
7587 extent_key.type = BTRFS_EXTENT_ITEM_KEY;
7588 nr_extent = 1;
7589 ret = get_new_locations(reloc_inode, &extent_key,
7590 group->key.objectid, 1,
7591 &new_extent, &nr_extent);
7592 if (ret > 0)
7593 continue;
7594 BUG_ON(ret < 0);
7595
7596 BUG_ON(ref->extents[ext_index].bytenr != bytenr);
7597 BUG_ON(ref->extents[ext_index].num_bytes != num_bytes);
7598 ref->extents[ext_index].bytenr = new_extent->disk_bytenr;
7599 ref->extents[ext_index].num_bytes = new_extent->disk_num_bytes;
7600
1a40e23b
ZY
7601 btrfs_set_file_extent_disk_bytenr(leaf, fi,
7602 new_extent->disk_bytenr);
7603 btrfs_set_file_extent_disk_num_bytes(leaf, fi,
7604 new_extent->disk_num_bytes);
1a40e23b
ZY
7605 btrfs_mark_buffer_dirty(leaf);
7606
7607 ret = btrfs_inc_extent_ref(trans, root,
7608 new_extent->disk_bytenr,
7609 new_extent->disk_num_bytes,
7610 leaf->start,
7611 root->root_key.objectid,
3bb1a1bc 7612 trans->transid, key.objectid);
1a40e23b 7613 BUG_ON(ret);
56bec294 7614
1a40e23b
ZY
7615 ret = btrfs_free_extent(trans, root,
7616 bytenr, num_bytes, leaf->start,
7617 btrfs_header_owner(leaf),
7618 btrfs_header_generation(leaf),
3bb1a1bc 7619 key.objectid, 0);
1a40e23b
ZY
7620 BUG_ON(ret);
7621 cond_resched();
7622 }
7623 kfree(new_extent);
7624 BUG_ON(ext_index + 1 != ref->nritems);
7625 btrfs_free_leaf_ref(root, ref);
7626 return 0;
7627}
7628
f82d02d9
YZ
7629int btrfs_free_reloc_root(struct btrfs_trans_handle *trans,
7630 struct btrfs_root *root)
1a40e23b
ZY
7631{
7632 struct btrfs_root *reloc_root;
f82d02d9 7633 int ret;
1a40e23b
ZY
7634
7635 if (root->reloc_root) {
7636 reloc_root = root->reloc_root;
7637 root->reloc_root = NULL;
7638 list_add(&reloc_root->dead_list,
7639 &root->fs_info->dead_reloc_roots);
f82d02d9
YZ
7640
7641 btrfs_set_root_bytenr(&reloc_root->root_item,
7642 reloc_root->node->start);
7643 btrfs_set_root_level(&root->root_item,
7644 btrfs_header_level(reloc_root->node));
7645 memset(&reloc_root->root_item.drop_progress, 0,
7646 sizeof(struct btrfs_disk_key));
7647 reloc_root->root_item.drop_level = 0;
7648
7649 ret = btrfs_update_root(trans, root->fs_info->tree_root,
7650 &reloc_root->root_key,
7651 &reloc_root->root_item);
7652 BUG_ON(ret);
1a40e23b
ZY
7653 }
7654 return 0;
7655}
7656
7657int btrfs_drop_dead_reloc_roots(struct btrfs_root *root)
7658{
7659 struct btrfs_trans_handle *trans;
7660 struct btrfs_root *reloc_root;
7661 struct btrfs_root *prev_root = NULL;
7662 struct list_head dead_roots;
7663 int ret;
7664 unsigned long nr;
7665
7666 INIT_LIST_HEAD(&dead_roots);
7667 list_splice_init(&root->fs_info->dead_reloc_roots, &dead_roots);
7668
7669 while (!list_empty(&dead_roots)) {
7670 reloc_root = list_entry(dead_roots.prev,
7671 struct btrfs_root, dead_list);
7672 list_del_init(&reloc_root->dead_list);
7673
7674 BUG_ON(reloc_root->commit_root != NULL);
7675 while (1) {
7a7eaa40 7676 trans = btrfs_join_transaction(root);
3612b495 7677 BUG_ON(IS_ERR(trans));
1a40e23b
ZY
7678
7679 mutex_lock(&root->fs_info->drop_mutex);
7680 ret = btrfs_drop_snapshot(trans, reloc_root);
7681 if (ret != -EAGAIN)
7682 break;
7683 mutex_unlock(&root->fs_info->drop_mutex);
7684
7685 nr = trans->blocks_used;
7686 ret = btrfs_end_transaction(trans, root);
7687 BUG_ON(ret);
7688 btrfs_btree_balance_dirty(root, nr);
7689 }
7690
7691 free_extent_buffer(reloc_root->node);
7692
7693 ret = btrfs_del_root(trans, root->fs_info->tree_root,
7694 &reloc_root->root_key);
7695 BUG_ON(ret);
7696 mutex_unlock(&root->fs_info->drop_mutex);
7697
7698 nr = trans->blocks_used;
7699 ret = btrfs_end_transaction(trans, root);
7700 BUG_ON(ret);
7701 btrfs_btree_balance_dirty(root, nr);
7702
7703 kfree(prev_root);
7704 prev_root = reloc_root;
7705 }
7706 if (prev_root) {
7707 btrfs_remove_leaf_refs(prev_root, (u64)-1, 0);
7708 kfree(prev_root);
7709 }
7710 return 0;
7711}
7712
7713int btrfs_add_dead_reloc_root(struct btrfs_root *root)
7714{
7715 list_add(&root->dead_list, &root->fs_info->dead_reloc_roots);
7716 return 0;
7717}
7718
7719int btrfs_cleanup_reloc_trees(struct btrfs_root *root)
7720{
7721 struct btrfs_root *reloc_root;
7722 struct btrfs_trans_handle *trans;
7723 struct btrfs_key location;
7724 int found;
7725 int ret;
7726
7727 mutex_lock(&root->fs_info->tree_reloc_mutex);
7728 ret = btrfs_find_dead_roots(root, BTRFS_TREE_RELOC_OBJECTID, NULL);
7729 BUG_ON(ret);
7730 found = !list_empty(&root->fs_info->dead_reloc_roots);
7731 mutex_unlock(&root->fs_info->tree_reloc_mutex);
7732
7733 if (found) {
7734 trans = btrfs_start_transaction(root, 1);
98d5dc13 7735 BUG_ON(IS_ERR(trans));
1a40e23b
ZY
7736 ret = btrfs_commit_transaction(trans, root);
7737 BUG_ON(ret);
7738 }
7739
7740 location.objectid = BTRFS_DATA_RELOC_TREE_OBJECTID;
7741 location.offset = (u64)-1;
7742 location.type = BTRFS_ROOT_ITEM_KEY;
7743
7744 reloc_root = btrfs_read_fs_root_no_name(root->fs_info, &location);
7745 BUG_ON(!reloc_root);
66b4ffd1
JB
7746 ret = btrfs_orphan_cleanup(reloc_root);
7747 BUG_ON(ret);
1a40e23b
ZY
7748 return 0;
7749}
7750
d397712b 7751static noinline int init_reloc_tree(struct btrfs_trans_handle *trans,
1a40e23b
ZY
7752 struct btrfs_root *root)
7753{
7754 struct btrfs_root *reloc_root;
7755 struct extent_buffer *eb;
7756 struct btrfs_root_item *root_item;
7757 struct btrfs_key root_key;
7758 int ret;
7759
7760 BUG_ON(!root->ref_cows);
7761 if (root->reloc_root)
7762 return 0;
7763
7764 root_item = kmalloc(sizeof(*root_item), GFP_NOFS);
db5b493a
TI
7765 if (!root_item)
7766 return -ENOMEM;
1a40e23b
ZY
7767
7768 ret = btrfs_copy_root(trans, root, root->commit_root,
7769 &eb, BTRFS_TREE_RELOC_OBJECTID);
7770 BUG_ON(ret);
7771
7772 root_key.objectid = BTRFS_TREE_RELOC_OBJECTID;
7773 root_key.offset = root->root_key.objectid;
7774 root_key.type = BTRFS_ROOT_ITEM_KEY;
7775
7776 memcpy(root_item, &root->root_item, sizeof(root_item));
7777 btrfs_set_root_refs(root_item, 0);
7778 btrfs_set_root_bytenr(root_item, eb->start);
7779 btrfs_set_root_level(root_item, btrfs_header_level(eb));
84234f3a 7780 btrfs_set_root_generation(root_item, trans->transid);
1a40e23b
ZY
7781
7782 btrfs_tree_unlock(eb);
7783 free_extent_buffer(eb);
7784
7785 ret = btrfs_insert_root(trans, root->fs_info->tree_root,
7786 &root_key, root_item);
7787 BUG_ON(ret);
7788 kfree(root_item);
7789
7790 reloc_root = btrfs_read_fs_root_no_radix(root->fs_info->tree_root,
7791 &root_key);
db5b493a 7792 BUG_ON(IS_ERR(reloc_root));
1a40e23b
ZY
7793 reloc_root->last_trans = trans->transid;
7794 reloc_root->commit_root = NULL;
7795 reloc_root->ref_tree = &root->fs_info->reloc_ref_tree;
7796
7797 root->reloc_root = reloc_root;
7798 return 0;
7799}
7800
7801/*
7802 * Core function of space balance.
7803 *
7804 * The idea is using reloc trees to relocate tree blocks in reference
f82d02d9
YZ
7805 * counted roots. There is one reloc tree for each subvol, and all
7806 * reloc trees share same root key objectid. Reloc trees are snapshots
7807 * of the latest committed roots of subvols (root->commit_root).
7808 *
7809 * To relocate a tree block referenced by a subvol, there are two steps.
7810 * COW the block through subvol's reloc tree, then update block pointer
7811 * in the subvol to point to the new block. Since all reloc trees share
7812 * same root key objectid, doing special handing for tree blocks owned
7813 * by them is easy. Once a tree block has been COWed in one reloc tree,
7814 * we can use the resulting new block directly when the same block is
7815 * required to COW again through other reloc trees. By this way, relocated
7816 * tree blocks are shared between reloc trees, so they are also shared
7817 * between subvols.
1a40e23b 7818 */
d397712b 7819static noinline int relocate_one_path(struct btrfs_trans_handle *trans,
1a40e23b
ZY
7820 struct btrfs_root *root,
7821 struct btrfs_path *path,
7822 struct btrfs_key *first_key,
7823 struct btrfs_ref_path *ref_path,
7824 struct btrfs_block_group_cache *group,
7825 struct inode *reloc_inode)
7826{
7827 struct btrfs_root *reloc_root;
7828 struct extent_buffer *eb = NULL;
7829 struct btrfs_key *keys;
7830 u64 *nodes;
7831 int level;
f82d02d9 7832 int shared_level;
1a40e23b 7833 int lowest_level = 0;
1a40e23b
ZY
7834 int ret;
7835
7836 if (ref_path->owner_objectid < BTRFS_FIRST_FREE_OBJECTID)
7837 lowest_level = ref_path->owner_objectid;
7838
f82d02d9 7839 if (!root->ref_cows) {
1a40e23b
ZY
7840 path->lowest_level = lowest_level;
7841 ret = btrfs_search_slot(trans, root, first_key, path, 0, 1);
7842 BUG_ON(ret < 0);
7843 path->lowest_level = 0;
7844 btrfs_release_path(root, path);
7845 return 0;
7846 }
7847
1a40e23b
ZY
7848 mutex_lock(&root->fs_info->tree_reloc_mutex);
7849 ret = init_reloc_tree(trans, root);
7850 BUG_ON(ret);
7851 reloc_root = root->reloc_root;
7852
f82d02d9
YZ
7853 shared_level = ref_path->shared_level;
7854 ref_path->shared_level = BTRFS_MAX_LEVEL - 1;
1a40e23b 7855
f82d02d9
YZ
7856 keys = ref_path->node_keys;
7857 nodes = ref_path->new_nodes;
7858 memset(&keys[shared_level + 1], 0,
7859 sizeof(*keys) * (BTRFS_MAX_LEVEL - shared_level - 1));
7860 memset(&nodes[shared_level + 1], 0,
7861 sizeof(*nodes) * (BTRFS_MAX_LEVEL - shared_level - 1));
1a40e23b 7862
f82d02d9
YZ
7863 if (nodes[lowest_level] == 0) {
7864 path->lowest_level = lowest_level;
7865 ret = btrfs_search_slot(trans, reloc_root, first_key, path,
7866 0, 1);
7867 BUG_ON(ret);
7868 for (level = lowest_level; level < BTRFS_MAX_LEVEL; level++) {
7869 eb = path->nodes[level];
7870 if (!eb || eb == reloc_root->node)
7871 break;
7872 nodes[level] = eb->start;
7873 if (level == 0)
7874 btrfs_item_key_to_cpu(eb, &keys[level], 0);
7875 else
7876 btrfs_node_key_to_cpu(eb, &keys[level], 0);
7877 }
2b82032c
YZ
7878 if (nodes[0] &&
7879 ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
f82d02d9
YZ
7880 eb = path->nodes[0];
7881 ret = replace_extents_in_leaf(trans, reloc_root, eb,
7882 group, reloc_inode);
7883 BUG_ON(ret);
7884 }
7885 btrfs_release_path(reloc_root, path);
7886 } else {
1a40e23b 7887 ret = btrfs_merge_path(trans, reloc_root, keys, nodes,
f82d02d9 7888 lowest_level);
1a40e23b
ZY
7889 BUG_ON(ret);
7890 }
7891
1a40e23b
ZY
7892 /*
7893 * replace tree blocks in the fs tree with tree blocks in
7894 * the reloc tree.
7895 */
7896 ret = btrfs_merge_path(trans, root, keys, nodes, lowest_level);
7897 BUG_ON(ret < 0);
7898
7899 if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
f82d02d9
YZ
7900 ret = btrfs_search_slot(trans, reloc_root, first_key, path,
7901 0, 0);
7902 BUG_ON(ret);
7903 extent_buffer_get(path->nodes[0]);
7904 eb = path->nodes[0];
7905 btrfs_release_path(reloc_root, path);
1a40e23b
ZY
7906 ret = invalidate_extent_cache(reloc_root, eb, group, root);
7907 BUG_ON(ret);
7908 free_extent_buffer(eb);
7909 }
1a40e23b 7910
f82d02d9 7911 mutex_unlock(&root->fs_info->tree_reloc_mutex);
1a40e23b 7912 path->lowest_level = 0;
1a40e23b
ZY
7913 return 0;
7914}
7915
d397712b 7916static noinline int relocate_tree_block(struct btrfs_trans_handle *trans,
1a40e23b
ZY
7917 struct btrfs_root *root,
7918 struct btrfs_path *path,
7919 struct btrfs_key *first_key,
7920 struct btrfs_ref_path *ref_path)
7921{
7922 int ret;
1a40e23b
ZY
7923
7924 ret = relocate_one_path(trans, root, path, first_key,
7925 ref_path, NULL, NULL);
7926 BUG_ON(ret);
7927
1a40e23b
ZY
7928 return 0;
7929}
7930
d397712b 7931static noinline int del_extent_zero(struct btrfs_trans_handle *trans,
1a40e23b
ZY
7932 struct btrfs_root *extent_root,
7933 struct btrfs_path *path,
7934 struct btrfs_key *extent_key)
7935{
7936 int ret;
7937
1a40e23b
ZY
7938 ret = btrfs_search_slot(trans, extent_root, extent_key, path, -1, 1);
7939 if (ret)
7940 goto out;
7941 ret = btrfs_del_item(trans, extent_root, path);
7942out:
7943 btrfs_release_path(extent_root, path);
1a40e23b
ZY
7944 return ret;
7945}
7946
d397712b 7947static noinline struct btrfs_root *read_ref_root(struct btrfs_fs_info *fs_info,
1a40e23b
ZY
7948 struct btrfs_ref_path *ref_path)
7949{
7950 struct btrfs_key root_key;
7951
7952 root_key.objectid = ref_path->root_objectid;
7953 root_key.type = BTRFS_ROOT_ITEM_KEY;
7954 if (is_cowonly_root(ref_path->root_objectid))
7955 root_key.offset = 0;
7956 else
7957 root_key.offset = (u64)-1;
7958
7959 return btrfs_read_fs_root_no_name(fs_info, &root_key);
7960}
7961
d397712b 7962static noinline int relocate_one_extent(struct btrfs_root *extent_root,
1a40e23b
ZY
7963 struct btrfs_path *path,
7964 struct btrfs_key *extent_key,
7965 struct btrfs_block_group_cache *group,
7966 struct inode *reloc_inode, int pass)
7967{
7968 struct btrfs_trans_handle *trans;
7969 struct btrfs_root *found_root;
7970 struct btrfs_ref_path *ref_path = NULL;
7971 struct disk_extent *new_extents = NULL;
7972 int nr_extents = 0;
7973 int loops;
7974 int ret;
7975 int level;
7976 struct btrfs_key first_key;
7977 u64 prev_block = 0;
7978
1a40e23b
ZY
7979
7980 trans = btrfs_start_transaction(extent_root, 1);
98d5dc13 7981 BUG_ON(IS_ERR(trans));
1a40e23b
ZY
7982
7983 if (extent_key->objectid == 0) {
7984 ret = del_extent_zero(trans, extent_root, path, extent_key);
7985 goto out;
7986 }
7987
7988 ref_path = kmalloc(sizeof(*ref_path), GFP_NOFS);
7989 if (!ref_path) {
d397712b
CM
7990 ret = -ENOMEM;
7991 goto out;
1a40e23b
ZY
7992 }
7993
7994 for (loops = 0; ; loops++) {
7995 if (loops == 0) {
7996 ret = btrfs_first_ref_path(trans, extent_root, ref_path,
7997 extent_key->objectid);
7998 } else {
7999 ret = btrfs_next_ref_path(trans, extent_root, ref_path);
8000 }
8001 if (ret < 0)
8002 goto out;
8003 if (ret > 0)
8004 break;
8005
8006 if (ref_path->root_objectid == BTRFS_TREE_LOG_OBJECTID ||
8007 ref_path->root_objectid == BTRFS_TREE_RELOC_OBJECTID)
8008 continue;
8009
8010 found_root = read_ref_root(extent_root->fs_info, ref_path);
8011 BUG_ON(!found_root);
8012 /*
8013 * for reference counted tree, only process reference paths
8014 * rooted at the latest committed root.
8015 */
8016 if (found_root->ref_cows &&
8017 ref_path->root_generation != found_root->root_key.offset)
8018 continue;
8019
8020 if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
8021 if (pass == 0) {
8022 /*
8023 * copy data extents to new locations
8024 */
8025 u64 group_start = group->key.objectid;
8026 ret = relocate_data_extent(reloc_inode,
8027 extent_key,
8028 group_start);
8029 if (ret < 0)
8030 goto out;
8031 break;
8032 }
8033 level = 0;
8034 } else {
8035 level = ref_path->owner_objectid;
8036 }
8037
8038 if (prev_block != ref_path->nodes[level]) {
8039 struct extent_buffer *eb;
8040 u64 block_start = ref_path->nodes[level];
8041 u64 block_size = btrfs_level_size(found_root, level);
8042
8043 eb = read_tree_block(found_root, block_start,
8044 block_size, 0);
97d9a8a4
TI
8045 if (!eb) {
8046 ret = -EIO;
8047 goto out;
8048 }
1a40e23b
ZY
8049 btrfs_tree_lock(eb);
8050 BUG_ON(level != btrfs_header_level(eb));
8051
8052 if (level == 0)
8053 btrfs_item_key_to_cpu(eb, &first_key, 0);
8054 else
8055 btrfs_node_key_to_cpu(eb, &first_key, 0);
8056
8057 btrfs_tree_unlock(eb);
8058 free_extent_buffer(eb);
8059 prev_block = block_start;
8060 }
8061
24562425 8062 mutex_lock(&extent_root->fs_info->trans_mutex);
e4404d6e 8063 btrfs_record_root_in_trans(found_root);
24562425 8064 mutex_unlock(&extent_root->fs_info->trans_mutex);
e4404d6e
YZ
8065 if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
8066 /*
8067 * try to update data extent references while
8068 * keeping metadata shared between snapshots.
8069 */
8070 if (pass == 1) {
8071 ret = relocate_one_path(trans, found_root,
8072 path, &first_key, ref_path,
8073 group, reloc_inode);
8074 if (ret < 0)
8075 goto out;
8076 continue;
8077 }
1a40e23b
ZY
8078 /*
8079 * use fallback method to process the remaining
8080 * references.
8081 */
8082 if (!new_extents) {
8083 u64 group_start = group->key.objectid;
d899e052
YZ
8084 new_extents = kmalloc(sizeof(*new_extents),
8085 GFP_NOFS);
8d413713
TI
8086 if (!new_extents) {
8087 ret = -ENOMEM;
8088 goto out;
8089 }
d899e052 8090 nr_extents = 1;
1a40e23b
ZY
8091 ret = get_new_locations(reloc_inode,
8092 extent_key,
d899e052 8093 group_start, 1,
1a40e23b
ZY
8094 &new_extents,
8095 &nr_extents);
d899e052 8096 if (ret)
1a40e23b
ZY
8097 goto out;
8098 }
1a40e23b
ZY
8099 ret = replace_one_extent(trans, found_root,
8100 path, extent_key,
8101 &first_key, ref_path,
8102 new_extents, nr_extents);
e4404d6e 8103 } else {
1a40e23b
ZY
8104 ret = relocate_tree_block(trans, found_root, path,
8105 &first_key, ref_path);
1a40e23b
ZY
8106 }
8107 if (ret < 0)
8108 goto out;
8109 }
8110 ret = 0;
8111out:
8112 btrfs_end_transaction(trans, extent_root);
8113 kfree(new_extents);
8114 kfree(ref_path);
1a40e23b
ZY
8115 return ret;
8116}
5d4f98a2 8117#endif
1a40e23b 8118
ec44a35c
CM
8119static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
8120{
8121 u64 num_devices;
8122 u64 stripped = BTRFS_BLOCK_GROUP_RAID0 |
8123 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
8124
cd02dca5
CM
8125 /*
8126 * we add in the count of missing devices because we want
8127 * to make sure that any RAID levels on a degraded FS
8128 * continue to be honored.
8129 */
8130 num_devices = root->fs_info->fs_devices->rw_devices +
8131 root->fs_info->fs_devices->missing_devices;
8132
ec44a35c
CM
8133 if (num_devices == 1) {
8134 stripped |= BTRFS_BLOCK_GROUP_DUP;
8135 stripped = flags & ~stripped;
8136
8137 /* turn raid0 into single device chunks */
8138 if (flags & BTRFS_BLOCK_GROUP_RAID0)
8139 return stripped;
8140
8141 /* turn mirroring into duplication */
8142 if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
8143 BTRFS_BLOCK_GROUP_RAID10))
8144 return stripped | BTRFS_BLOCK_GROUP_DUP;
8145 return flags;
8146 } else {
8147 /* they already had raid on here, just return */
ec44a35c
CM
8148 if (flags & stripped)
8149 return flags;
8150
8151 stripped |= BTRFS_BLOCK_GROUP_DUP;
8152 stripped = flags & ~stripped;
8153
8154 /* switch duplicated blocks with raid1 */
8155 if (flags & BTRFS_BLOCK_GROUP_DUP)
8156 return stripped | BTRFS_BLOCK_GROUP_RAID1;
8157
8158 /* turn single device chunks into raid0 */
8159 return stripped | BTRFS_BLOCK_GROUP_RAID0;
8160 }
8161 return flags;
8162}
8163
f0486c68 8164static int set_block_group_ro(struct btrfs_block_group_cache *cache)
0ef3e66b 8165{
f0486c68
YZ
8166 struct btrfs_space_info *sinfo = cache->space_info;
8167 u64 num_bytes;
8168 int ret = -ENOSPC;
0ef3e66b 8169
f0486c68
YZ
8170 if (cache->ro)
8171 return 0;
c286ac48 8172
f0486c68
YZ
8173 spin_lock(&sinfo->lock);
8174 spin_lock(&cache->lock);
8175 num_bytes = cache->key.offset - cache->reserved - cache->pinned -
8176 cache->bytes_super - btrfs_block_group_used(&cache->item);
8177
8178 if (sinfo->bytes_used + sinfo->bytes_reserved + sinfo->bytes_pinned +
8179 sinfo->bytes_may_use + sinfo->bytes_readonly +
65e5341b 8180 cache->reserved_pinned + num_bytes <= sinfo->total_bytes) {
f0486c68
YZ
8181 sinfo->bytes_readonly += num_bytes;
8182 sinfo->bytes_reserved += cache->reserved_pinned;
8183 cache->reserved_pinned = 0;
8184 cache->ro = 1;
8185 ret = 0;
8186 }
65e5341b 8187
f0486c68
YZ
8188 spin_unlock(&cache->lock);
8189 spin_unlock(&sinfo->lock);
8190 return ret;
8191}
7d9eb12c 8192
f0486c68
YZ
8193int btrfs_set_block_group_ro(struct btrfs_root *root,
8194 struct btrfs_block_group_cache *cache)
c286ac48 8195
f0486c68
YZ
8196{
8197 struct btrfs_trans_handle *trans;
8198 u64 alloc_flags;
8199 int ret;
7d9eb12c 8200
f0486c68 8201 BUG_ON(cache->ro);
0ef3e66b 8202
7a7eaa40 8203 trans = btrfs_join_transaction(root);
f0486c68 8204 BUG_ON(IS_ERR(trans));
5d4f98a2 8205
f0486c68
YZ
8206 alloc_flags = update_block_group_flags(root, cache->flags);
8207 if (alloc_flags != cache->flags)
0e4f8f88
CM
8208 do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags,
8209 CHUNK_ALLOC_FORCE);
5d4f98a2 8210
f0486c68
YZ
8211 ret = set_block_group_ro(cache);
8212 if (!ret)
8213 goto out;
8214 alloc_flags = get_alloc_profile(root, cache->space_info->flags);
0e4f8f88
CM
8215 ret = do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags,
8216 CHUNK_ALLOC_FORCE);
f0486c68
YZ
8217 if (ret < 0)
8218 goto out;
8219 ret = set_block_group_ro(cache);
8220out:
8221 btrfs_end_transaction(trans, root);
8222 return ret;
8223}
5d4f98a2 8224
c87f08ca
CM
8225int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
8226 struct btrfs_root *root, u64 type)
8227{
8228 u64 alloc_flags = get_alloc_profile(root, type);
0e4f8f88
CM
8229 return do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags,
8230 CHUNK_ALLOC_FORCE);
c87f08ca
CM
8231}
8232
6d07bcec
MX
8233/*
8234 * helper to account the unused space of all the readonly block group in the
8235 * list. takes mirrors into account.
8236 */
8237static u64 __btrfs_get_ro_block_group_free_space(struct list_head *groups_list)
8238{
8239 struct btrfs_block_group_cache *block_group;
8240 u64 free_bytes = 0;
8241 int factor;
8242
8243 list_for_each_entry(block_group, groups_list, list) {
8244 spin_lock(&block_group->lock);
8245
8246 if (!block_group->ro) {
8247 spin_unlock(&block_group->lock);
8248 continue;
8249 }
8250
8251 if (block_group->flags & (BTRFS_BLOCK_GROUP_RAID1 |
8252 BTRFS_BLOCK_GROUP_RAID10 |
8253 BTRFS_BLOCK_GROUP_DUP))
8254 factor = 2;
8255 else
8256 factor = 1;
8257
8258 free_bytes += (block_group->key.offset -
8259 btrfs_block_group_used(&block_group->item)) *
8260 factor;
8261
8262 spin_unlock(&block_group->lock);
8263 }
8264
8265 return free_bytes;
8266}
8267
8268/*
8269 * helper to account the unused space of all the readonly block group in the
8270 * space_info. takes mirrors into account.
8271 */
8272u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
8273{
8274 int i;
8275 u64 free_bytes = 0;
8276
8277 spin_lock(&sinfo->lock);
8278
8279 for(i = 0; i < BTRFS_NR_RAID_TYPES; i++)
8280 if (!list_empty(&sinfo->block_groups[i]))
8281 free_bytes += __btrfs_get_ro_block_group_free_space(
8282 &sinfo->block_groups[i]);
8283
8284 spin_unlock(&sinfo->lock);
8285
8286 return free_bytes;
8287}
8288
f0486c68
YZ
8289int btrfs_set_block_group_rw(struct btrfs_root *root,
8290 struct btrfs_block_group_cache *cache)
5d4f98a2 8291{
f0486c68
YZ
8292 struct btrfs_space_info *sinfo = cache->space_info;
8293 u64 num_bytes;
8294
8295 BUG_ON(!cache->ro);
8296
8297 spin_lock(&sinfo->lock);
8298 spin_lock(&cache->lock);
8299 num_bytes = cache->key.offset - cache->reserved - cache->pinned -
8300 cache->bytes_super - btrfs_block_group_used(&cache->item);
8301 sinfo->bytes_readonly -= num_bytes;
8302 cache->ro = 0;
8303 spin_unlock(&cache->lock);
8304 spin_unlock(&sinfo->lock);
5d4f98a2
YZ
8305 return 0;
8306}
8307
ba1bf481
JB
8308/*
8309 * checks to see if its even possible to relocate this block group.
8310 *
8311 * @return - -1 if it's not a good idea to relocate this block group, 0 if its
8312 * ok to go ahead and try.
8313 */
8314int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
1a40e23b 8315{
ba1bf481
JB
8316 struct btrfs_block_group_cache *block_group;
8317 struct btrfs_space_info *space_info;
8318 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
8319 struct btrfs_device *device;
8320 int full = 0;
8321 int ret = 0;
1a40e23b 8322
ba1bf481 8323 block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
1a40e23b 8324
ba1bf481
JB
8325 /* odd, couldn't find the block group, leave it alone */
8326 if (!block_group)
8327 return -1;
1a40e23b 8328
ba1bf481
JB
8329 /* no bytes used, we're good */
8330 if (!btrfs_block_group_used(&block_group->item))
1a40e23b
ZY
8331 goto out;
8332
ba1bf481
JB
8333 space_info = block_group->space_info;
8334 spin_lock(&space_info->lock);
17d217fe 8335
ba1bf481 8336 full = space_info->full;
17d217fe 8337
ba1bf481
JB
8338 /*
8339 * if this is the last block group we have in this space, we can't
7ce618db
CM
8340 * relocate it unless we're able to allocate a new chunk below.
8341 *
8342 * Otherwise, we need to make sure we have room in the space to handle
8343 * all of the extents from this block group. If we can, we're good
ba1bf481 8344 */
7ce618db
CM
8345 if ((space_info->total_bytes != block_group->key.offset) &&
8346 (space_info->bytes_used + space_info->bytes_reserved +
ba1bf481
JB
8347 space_info->bytes_pinned + space_info->bytes_readonly +
8348 btrfs_block_group_used(&block_group->item) <
7ce618db 8349 space_info->total_bytes)) {
ba1bf481
JB
8350 spin_unlock(&space_info->lock);
8351 goto out;
17d217fe 8352 }
ba1bf481 8353 spin_unlock(&space_info->lock);
ea8c2819 8354
ba1bf481
JB
8355 /*
8356 * ok we don't have enough space, but maybe we have free space on our
8357 * devices to allocate new chunks for relocation, so loop through our
8358 * alloc devices and guess if we have enough space. However, if we
8359 * were marked as full, then we know there aren't enough chunks, and we
8360 * can just return.
8361 */
8362 ret = -1;
8363 if (full)
8364 goto out;
ea8c2819 8365
ba1bf481
JB
8366 mutex_lock(&root->fs_info->chunk_mutex);
8367 list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
8368 u64 min_free = btrfs_block_group_used(&block_group->item);
7bfc837d 8369 u64 dev_offset;
56bec294 8370
ba1bf481
JB
8371 /*
8372 * check to make sure we can actually find a chunk with enough
8373 * space to fit our block group in.
8374 */
8375 if (device->total_bytes > device->bytes_used + min_free) {
8376 ret = find_free_dev_extent(NULL, device, min_free,
7bfc837d 8377 &dev_offset, NULL);
ba1bf481 8378 if (!ret)
73e48b27 8379 break;
ba1bf481 8380 ret = -1;
725c8463 8381 }
edbd8d4e 8382 }
ba1bf481 8383 mutex_unlock(&root->fs_info->chunk_mutex);
edbd8d4e 8384out:
ba1bf481 8385 btrfs_put_block_group(block_group);
edbd8d4e
CM
8386 return ret;
8387}
8388
b2950863
CH
8389static int find_first_block_group(struct btrfs_root *root,
8390 struct btrfs_path *path, struct btrfs_key *key)
0b86a832 8391{
925baedd 8392 int ret = 0;
0b86a832
CM
8393 struct btrfs_key found_key;
8394 struct extent_buffer *leaf;
8395 int slot;
edbd8d4e 8396
0b86a832
CM
8397 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
8398 if (ret < 0)
925baedd
CM
8399 goto out;
8400
d397712b 8401 while (1) {
0b86a832 8402 slot = path->slots[0];
edbd8d4e 8403 leaf = path->nodes[0];
0b86a832
CM
8404 if (slot >= btrfs_header_nritems(leaf)) {
8405 ret = btrfs_next_leaf(root, path);
8406 if (ret == 0)
8407 continue;
8408 if (ret < 0)
925baedd 8409 goto out;
0b86a832 8410 break;
edbd8d4e 8411 }
0b86a832 8412 btrfs_item_key_to_cpu(leaf, &found_key, slot);
edbd8d4e 8413
0b86a832 8414 if (found_key.objectid >= key->objectid &&
925baedd
CM
8415 found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
8416 ret = 0;
8417 goto out;
8418 }
0b86a832 8419 path->slots[0]++;
edbd8d4e 8420 }
925baedd 8421out:
0b86a832 8422 return ret;
edbd8d4e
CM
8423}
8424
0af3d00b
JB
8425void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
8426{
8427 struct btrfs_block_group_cache *block_group;
8428 u64 last = 0;
8429
8430 while (1) {
8431 struct inode *inode;
8432
8433 block_group = btrfs_lookup_first_block_group(info, last);
8434 while (block_group) {
8435 spin_lock(&block_group->lock);
8436 if (block_group->iref)
8437 break;
8438 spin_unlock(&block_group->lock);
8439 block_group = next_block_group(info->tree_root,
8440 block_group);
8441 }
8442 if (!block_group) {
8443 if (last == 0)
8444 break;
8445 last = 0;
8446 continue;
8447 }
8448
8449 inode = block_group->inode;
8450 block_group->iref = 0;
8451 block_group->inode = NULL;
8452 spin_unlock(&block_group->lock);
8453 iput(inode);
8454 last = block_group->key.objectid + block_group->key.offset;
8455 btrfs_put_block_group(block_group);
8456 }
8457}
8458
1a40e23b
ZY
8459int btrfs_free_block_groups(struct btrfs_fs_info *info)
8460{
8461 struct btrfs_block_group_cache *block_group;
4184ea7f 8462 struct btrfs_space_info *space_info;
11833d66 8463 struct btrfs_caching_control *caching_ctl;
1a40e23b
ZY
8464 struct rb_node *n;
8465
11833d66
YZ
8466 down_write(&info->extent_commit_sem);
8467 while (!list_empty(&info->caching_block_groups)) {
8468 caching_ctl = list_entry(info->caching_block_groups.next,
8469 struct btrfs_caching_control, list);
8470 list_del(&caching_ctl->list);
8471 put_caching_control(caching_ctl);
8472 }
8473 up_write(&info->extent_commit_sem);
8474
1a40e23b
ZY
8475 spin_lock(&info->block_group_cache_lock);
8476 while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
8477 block_group = rb_entry(n, struct btrfs_block_group_cache,
8478 cache_node);
1a40e23b
ZY
8479 rb_erase(&block_group->cache_node,
8480 &info->block_group_cache_tree);
d899e052
YZ
8481 spin_unlock(&info->block_group_cache_lock);
8482
80eb234a 8483 down_write(&block_group->space_info->groups_sem);
1a40e23b 8484 list_del(&block_group->list);
80eb234a 8485 up_write(&block_group->space_info->groups_sem);
d2fb3437 8486
817d52f8 8487 if (block_group->cached == BTRFS_CACHE_STARTED)
11833d66 8488 wait_block_group_cache_done(block_group);
817d52f8 8489
3c14874a
JB
8490 /*
8491 * We haven't cached this block group, which means we could
8492 * possibly have excluded extents on this block group.
8493 */
8494 if (block_group->cached == BTRFS_CACHE_NO)
8495 free_excluded_extents(info->extent_root, block_group);
8496
817d52f8 8497 btrfs_remove_free_space_cache(block_group);
11dfe35a 8498 btrfs_put_block_group(block_group);
d899e052
YZ
8499
8500 spin_lock(&info->block_group_cache_lock);
1a40e23b
ZY
8501 }
8502 spin_unlock(&info->block_group_cache_lock);
4184ea7f
CM
8503
8504 /* now that all the block groups are freed, go through and
8505 * free all the space_info structs. This is only called during
8506 * the final stages of unmount, and so we know nobody is
8507 * using them. We call synchronize_rcu() once before we start,
8508 * just to be on the safe side.
8509 */
8510 synchronize_rcu();
8511
8929ecfa
YZ
8512 release_global_block_rsv(info);
8513
4184ea7f
CM
8514 while(!list_empty(&info->space_info)) {
8515 space_info = list_entry(info->space_info.next,
8516 struct btrfs_space_info,
8517 list);
f0486c68
YZ
8518 if (space_info->bytes_pinned > 0 ||
8519 space_info->bytes_reserved > 0) {
8520 WARN_ON(1);
8521 dump_space_info(space_info, 0, 0);
8522 }
4184ea7f
CM
8523 list_del(&space_info->list);
8524 kfree(space_info);
8525 }
1a40e23b
ZY
8526 return 0;
8527}
8528
b742bb82
YZ
8529static void __link_block_group(struct btrfs_space_info *space_info,
8530 struct btrfs_block_group_cache *cache)
8531{
8532 int index = get_block_group_index(cache);
8533
8534 down_write(&space_info->groups_sem);
8535 list_add_tail(&cache->list, &space_info->block_groups[index]);
8536 up_write(&space_info->groups_sem);
8537}
8538
9078a3e1
CM
8539int btrfs_read_block_groups(struct btrfs_root *root)
8540{
8541 struct btrfs_path *path;
8542 int ret;
9078a3e1 8543 struct btrfs_block_group_cache *cache;
be744175 8544 struct btrfs_fs_info *info = root->fs_info;
6324fbf3 8545 struct btrfs_space_info *space_info;
9078a3e1
CM
8546 struct btrfs_key key;
8547 struct btrfs_key found_key;
5f39d397 8548 struct extent_buffer *leaf;
0af3d00b
JB
8549 int need_clear = 0;
8550 u64 cache_gen;
96b5179d 8551
be744175 8552 root = info->extent_root;
9078a3e1 8553 key.objectid = 0;
0b86a832 8554 key.offset = 0;
9078a3e1 8555 btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY);
9078a3e1
CM
8556 path = btrfs_alloc_path();
8557 if (!path)
8558 return -ENOMEM;
8559
0af3d00b
JB
8560 cache_gen = btrfs_super_cache_generation(&root->fs_info->super_copy);
8561 if (cache_gen != 0 &&
8562 btrfs_super_generation(&root->fs_info->super_copy) != cache_gen)
8563 need_clear = 1;
88c2ba3b
JB
8564 if (btrfs_test_opt(root, CLEAR_CACHE))
8565 need_clear = 1;
8216ef86
JB
8566 if (!btrfs_test_opt(root, SPACE_CACHE) && cache_gen)
8567 printk(KERN_INFO "btrfs: disk space caching is enabled\n");
0af3d00b 8568
d397712b 8569 while (1) {
0b86a832 8570 ret = find_first_block_group(root, path, &key);
b742bb82
YZ
8571 if (ret > 0)
8572 break;
0b86a832
CM
8573 if (ret != 0)
8574 goto error;
5f39d397
CM
8575 leaf = path->nodes[0];
8576 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
8f18cf13 8577 cache = kzalloc(sizeof(*cache), GFP_NOFS);
9078a3e1 8578 if (!cache) {
0b86a832 8579 ret = -ENOMEM;
f0486c68 8580 goto error;
9078a3e1 8581 }
3e1ad54f 8582
d2fb3437 8583 atomic_set(&cache->count, 1);
c286ac48 8584 spin_lock_init(&cache->lock);
6226cb0a 8585 spin_lock_init(&cache->tree_lock);
817d52f8 8586 cache->fs_info = info;
0f9dd46c 8587 INIT_LIST_HEAD(&cache->list);
fa9c0d79 8588 INIT_LIST_HEAD(&cache->cluster_list);
96303081 8589
0af3d00b
JB
8590 if (need_clear)
8591 cache->disk_cache_state = BTRFS_DC_CLEAR;
8592
96303081
JB
8593 /*
8594 * we only want to have 32k of ram per block group for keeping
8595 * track of free space, and if we pass 1/2 of that we want to
8596 * start converting things over to using bitmaps
8597 */
8598 cache->extents_thresh = ((1024 * 32) / 2) /
8599 sizeof(struct btrfs_free_space);
8600
5f39d397
CM
8601 read_extent_buffer(leaf, &cache->item,
8602 btrfs_item_ptr_offset(leaf, path->slots[0]),
8603 sizeof(cache->item));
9078a3e1 8604 memcpy(&cache->key, &found_key, sizeof(found_key));
0b86a832 8605
9078a3e1
CM
8606 key.objectid = found_key.objectid + found_key.offset;
8607 btrfs_release_path(root, path);
0b86a832 8608 cache->flags = btrfs_block_group_flags(&cache->item);
817d52f8
JB
8609 cache->sectorsize = root->sectorsize;
8610
3c14874a
JB
8611 /*
8612 * We need to exclude the super stripes now so that the space
8613 * info has super bytes accounted for, otherwise we'll think
8614 * we have more space than we actually do.
8615 */
8616 exclude_super_stripes(root, cache);
8617
817d52f8
JB
8618 /*
8619 * check for two cases, either we are full, and therefore
8620 * don't need to bother with the caching work since we won't
8621 * find any space, or we are empty, and we can just add all
8622 * the space in and be done with it. This saves us _alot_ of
8623 * time, particularly in the full case.
8624 */
8625 if (found_key.offset == btrfs_block_group_used(&cache->item)) {
11833d66 8626 cache->last_byte_to_unpin = (u64)-1;
817d52f8 8627 cache->cached = BTRFS_CACHE_FINISHED;
1b2da372 8628 free_excluded_extents(root, cache);
817d52f8 8629 } else if (btrfs_block_group_used(&cache->item) == 0) {
11833d66 8630 cache->last_byte_to_unpin = (u64)-1;
817d52f8
JB
8631 cache->cached = BTRFS_CACHE_FINISHED;
8632 add_new_free_space(cache, root->fs_info,
8633 found_key.objectid,
8634 found_key.objectid +
8635 found_key.offset);
11833d66 8636 free_excluded_extents(root, cache);
817d52f8 8637 }
96b5179d 8638
6324fbf3
CM
8639 ret = update_space_info(info, cache->flags, found_key.offset,
8640 btrfs_block_group_used(&cache->item),
8641 &space_info);
8642 BUG_ON(ret);
8643 cache->space_info = space_info;
1b2da372 8644 spin_lock(&cache->space_info->lock);
f0486c68 8645 cache->space_info->bytes_readonly += cache->bytes_super;
1b2da372
JB
8646 spin_unlock(&cache->space_info->lock);
8647
b742bb82 8648 __link_block_group(space_info, cache);
0f9dd46c
JB
8649
8650 ret = btrfs_add_block_group_cache(root->fs_info, cache);
8651 BUG_ON(ret);
75ccf47d
CM
8652
8653 set_avail_alloc_bits(root->fs_info, cache->flags);
2b82032c 8654 if (btrfs_chunk_readonly(root, cache->key.objectid))
f0486c68 8655 set_block_group_ro(cache);
9078a3e1 8656 }
b742bb82
YZ
8657
8658 list_for_each_entry_rcu(space_info, &root->fs_info->space_info, list) {
8659 if (!(get_alloc_profile(root, space_info->flags) &
8660 (BTRFS_BLOCK_GROUP_RAID10 |
8661 BTRFS_BLOCK_GROUP_RAID1 |
8662 BTRFS_BLOCK_GROUP_DUP)))
8663 continue;
8664 /*
8665 * avoid allocating from un-mirrored block group if there are
8666 * mirrored block groups.
8667 */
8668 list_for_each_entry(cache, &space_info->block_groups[3], list)
f0486c68 8669 set_block_group_ro(cache);
b742bb82 8670 list_for_each_entry(cache, &space_info->block_groups[4], list)
f0486c68 8671 set_block_group_ro(cache);
9078a3e1 8672 }
f0486c68
YZ
8673
8674 init_global_block_rsv(info);
0b86a832
CM
8675 ret = 0;
8676error:
9078a3e1 8677 btrfs_free_path(path);
0b86a832 8678 return ret;
9078a3e1 8679}
6324fbf3
CM
8680
8681int btrfs_make_block_group(struct btrfs_trans_handle *trans,
8682 struct btrfs_root *root, u64 bytes_used,
e17cade2 8683 u64 type, u64 chunk_objectid, u64 chunk_offset,
6324fbf3
CM
8684 u64 size)
8685{
8686 int ret;
6324fbf3
CM
8687 struct btrfs_root *extent_root;
8688 struct btrfs_block_group_cache *cache;
6324fbf3
CM
8689
8690 extent_root = root->fs_info->extent_root;
6324fbf3 8691
12fcfd22 8692 root->fs_info->last_trans_log_full_commit = trans->transid;
e02119d5 8693
8f18cf13 8694 cache = kzalloc(sizeof(*cache), GFP_NOFS);
0f9dd46c
JB
8695 if (!cache)
8696 return -ENOMEM;
8697
e17cade2 8698 cache->key.objectid = chunk_offset;
6324fbf3 8699 cache->key.offset = size;
d2fb3437 8700 cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
96303081 8701 cache->sectorsize = root->sectorsize;
0af3d00b 8702 cache->fs_info = root->fs_info;
96303081
JB
8703
8704 /*
8705 * we only want to have 32k of ram per block group for keeping track
8706 * of free space, and if we pass 1/2 of that we want to start
8707 * converting things over to using bitmaps
8708 */
8709 cache->extents_thresh = ((1024 * 32) / 2) /
8710 sizeof(struct btrfs_free_space);
d2fb3437 8711 atomic_set(&cache->count, 1);
c286ac48 8712 spin_lock_init(&cache->lock);
6226cb0a 8713 spin_lock_init(&cache->tree_lock);
0f9dd46c 8714 INIT_LIST_HEAD(&cache->list);
fa9c0d79 8715 INIT_LIST_HEAD(&cache->cluster_list);
0ef3e66b 8716
6324fbf3 8717 btrfs_set_block_group_used(&cache->item, bytes_used);
6324fbf3
CM
8718 btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
8719 cache->flags = type;
8720 btrfs_set_block_group_flags(&cache->item, type);
8721
11833d66 8722 cache->last_byte_to_unpin = (u64)-1;
817d52f8 8723 cache->cached = BTRFS_CACHE_FINISHED;
11833d66 8724 exclude_super_stripes(root, cache);
96303081 8725
817d52f8
JB
8726 add_new_free_space(cache, root->fs_info, chunk_offset,
8727 chunk_offset + size);
8728
11833d66
YZ
8729 free_excluded_extents(root, cache);
8730
6324fbf3
CM
8731 ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
8732 &cache->space_info);
8733 BUG_ON(ret);
1b2da372
JB
8734
8735 spin_lock(&cache->space_info->lock);
f0486c68 8736 cache->space_info->bytes_readonly += cache->bytes_super;
1b2da372
JB
8737 spin_unlock(&cache->space_info->lock);
8738
b742bb82 8739 __link_block_group(cache->space_info, cache);
6324fbf3 8740
0f9dd46c
JB
8741 ret = btrfs_add_block_group_cache(root->fs_info, cache);
8742 BUG_ON(ret);
c286ac48 8743
6324fbf3
CM
8744 ret = btrfs_insert_item(trans, extent_root, &cache->key, &cache->item,
8745 sizeof(cache->item));
8746 BUG_ON(ret);
8747
d18a2c44 8748 set_avail_alloc_bits(extent_root->fs_info, type);
925baedd 8749
6324fbf3
CM
8750 return 0;
8751}
1a40e23b
ZY
8752
8753int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
8754 struct btrfs_root *root, u64 group_start)
8755{
8756 struct btrfs_path *path;
8757 struct btrfs_block_group_cache *block_group;
44fb5511 8758 struct btrfs_free_cluster *cluster;
0af3d00b 8759 struct btrfs_root *tree_root = root->fs_info->tree_root;
1a40e23b 8760 struct btrfs_key key;
0af3d00b 8761 struct inode *inode;
1a40e23b 8762 int ret;
89a55897 8763 int factor;
1a40e23b 8764
1a40e23b
ZY
8765 root = root->fs_info->extent_root;
8766
8767 block_group = btrfs_lookup_block_group(root->fs_info, group_start);
8768 BUG_ON(!block_group);
c146afad 8769 BUG_ON(!block_group->ro);
1a40e23b 8770
9f7c43c9 8771 /*
8772 * Free the reserved super bytes from this block group before
8773 * remove it.
8774 */
8775 free_excluded_extents(root, block_group);
8776
1a40e23b 8777 memcpy(&key, &block_group->key, sizeof(key));
89a55897
JB
8778 if (block_group->flags & (BTRFS_BLOCK_GROUP_DUP |
8779 BTRFS_BLOCK_GROUP_RAID1 |
8780 BTRFS_BLOCK_GROUP_RAID10))
8781 factor = 2;
8782 else
8783 factor = 1;
1a40e23b 8784
44fb5511
CM
8785 /* make sure this block group isn't part of an allocation cluster */
8786 cluster = &root->fs_info->data_alloc_cluster;
8787 spin_lock(&cluster->refill_lock);
8788 btrfs_return_cluster_to_free_space(block_group, cluster);
8789 spin_unlock(&cluster->refill_lock);
8790
8791 /*
8792 * make sure this block group isn't part of a metadata
8793 * allocation cluster
8794 */
8795 cluster = &root->fs_info->meta_alloc_cluster;
8796 spin_lock(&cluster->refill_lock);
8797 btrfs_return_cluster_to_free_space(block_group, cluster);
8798 spin_unlock(&cluster->refill_lock);
8799
1a40e23b
ZY
8800 path = btrfs_alloc_path();
8801 BUG_ON(!path);
8802
0af3d00b
JB
8803 inode = lookup_free_space_inode(root, block_group, path);
8804 if (!IS_ERR(inode)) {
8805 btrfs_orphan_add(trans, inode);
8806 clear_nlink(inode);
8807 /* One for the block groups ref */
8808 spin_lock(&block_group->lock);
8809 if (block_group->iref) {
8810 block_group->iref = 0;
8811 block_group->inode = NULL;
8812 spin_unlock(&block_group->lock);
8813 iput(inode);
8814 } else {
8815 spin_unlock(&block_group->lock);
8816 }
8817 /* One for our lookup ref */
8818 iput(inode);
8819 }
8820
8821 key.objectid = BTRFS_FREE_SPACE_OBJECTID;
8822 key.offset = block_group->key.objectid;
8823 key.type = 0;
8824
8825 ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
8826 if (ret < 0)
8827 goto out;
8828 if (ret > 0)
8829 btrfs_release_path(tree_root, path);
8830 if (ret == 0) {
8831 ret = btrfs_del_item(trans, tree_root, path);
8832 if (ret)
8833 goto out;
8834 btrfs_release_path(tree_root, path);
8835 }
8836
3dfdb934 8837 spin_lock(&root->fs_info->block_group_cache_lock);
1a40e23b
ZY
8838 rb_erase(&block_group->cache_node,
8839 &root->fs_info->block_group_cache_tree);
3dfdb934 8840 spin_unlock(&root->fs_info->block_group_cache_lock);
817d52f8 8841
80eb234a 8842 down_write(&block_group->space_info->groups_sem);
44fb5511
CM
8843 /*
8844 * we must use list_del_init so people can check to see if they
8845 * are still on the list after taking the semaphore
8846 */
8847 list_del_init(&block_group->list);
80eb234a 8848 up_write(&block_group->space_info->groups_sem);
1a40e23b 8849
817d52f8 8850 if (block_group->cached == BTRFS_CACHE_STARTED)
11833d66 8851 wait_block_group_cache_done(block_group);
817d52f8
JB
8852
8853 btrfs_remove_free_space_cache(block_group);
8854
c146afad
YZ
8855 spin_lock(&block_group->space_info->lock);
8856 block_group->space_info->total_bytes -= block_group->key.offset;
8857 block_group->space_info->bytes_readonly -= block_group->key.offset;
89a55897 8858 block_group->space_info->disk_total -= block_group->key.offset * factor;
c146afad 8859 spin_unlock(&block_group->space_info->lock);
283bb197 8860
0af3d00b
JB
8861 memcpy(&key, &block_group->key, sizeof(key));
8862
283bb197 8863 btrfs_clear_space_info_full(root->fs_info);
c146afad 8864
fa9c0d79
CM
8865 btrfs_put_block_group(block_group);
8866 btrfs_put_block_group(block_group);
1a40e23b
ZY
8867
8868 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
8869 if (ret > 0)
8870 ret = -EIO;
8871 if (ret < 0)
8872 goto out;
8873
8874 ret = btrfs_del_item(trans, root, path);
8875out:
8876 btrfs_free_path(path);
8877 return ret;
8878}
acce952b 8879
c59021f8 8880int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
8881{
8882 struct btrfs_space_info *space_info;
1aba86d6 8883 struct btrfs_super_block *disk_super;
8884 u64 features;
8885 u64 flags;
8886 int mixed = 0;
c59021f8 8887 int ret;
8888
1aba86d6 8889 disk_super = &fs_info->super_copy;
8890 if (!btrfs_super_root(disk_super))
8891 return 1;
c59021f8 8892
1aba86d6 8893 features = btrfs_super_incompat_flags(disk_super);
8894 if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
8895 mixed = 1;
c59021f8 8896
1aba86d6 8897 flags = BTRFS_BLOCK_GROUP_SYSTEM;
8898 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
c59021f8 8899 if (ret)
1aba86d6 8900 goto out;
c59021f8 8901
1aba86d6 8902 if (mixed) {
8903 flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
8904 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8905 } else {
8906 flags = BTRFS_BLOCK_GROUP_METADATA;
8907 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8908 if (ret)
8909 goto out;
8910
8911 flags = BTRFS_BLOCK_GROUP_DATA;
8912 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8913 }
8914out:
c59021f8 8915 return ret;
8916}
8917
acce952b 8918int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
8919{
8920 return unpin_extent_range(root, start, end);
8921}
8922
8923int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr,
5378e607 8924 u64 num_bytes, u64 *actual_bytes)
acce952b 8925{
5378e607 8926 return btrfs_discard_extent(root, bytenr, num_bytes, actual_bytes);
acce952b 8927}
f7039b1d
LD
8928
8929int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
8930{
8931 struct btrfs_fs_info *fs_info = root->fs_info;
8932 struct btrfs_block_group_cache *cache = NULL;
8933 u64 group_trimmed;
8934 u64 start;
8935 u64 end;
8936 u64 trimmed = 0;
8937 int ret = 0;
8938
8939 cache = btrfs_lookup_block_group(fs_info, range->start);
8940
8941 while (cache) {
8942 if (cache->key.objectid >= (range->start + range->len)) {
8943 btrfs_put_block_group(cache);
8944 break;
8945 }
8946
8947 start = max(range->start, cache->key.objectid);
8948 end = min(range->start + range->len,
8949 cache->key.objectid + cache->key.offset);
8950
8951 if (end - start >= range->minlen) {
8952 if (!block_group_cache_done(cache)) {
8953 ret = cache_block_group(cache, NULL, root, 0);
8954 if (!ret)
8955 wait_block_group_cache_done(cache);
8956 }
8957 ret = btrfs_trim_block_group(cache,
8958 &group_trimmed,
8959 start,
8960 end,
8961 range->minlen);
8962
8963 trimmed += group_trimmed;
8964 if (ret) {
8965 btrfs_put_block_group(cache);
8966 break;
8967 }
8968 }
8969
8970 cache = next_block_group(fs_info->tree_root, cache);
8971 }
8972
8973 range->len = trimmed;
8974 return ret;
8975}