2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/writeback.h>
21 #include <linux/blkdev.h>
26 #include "print-tree.h"
27 #include "transaction.h"
30 #include "ref-cache.h"
32 #define PENDING_EXTENT_INSERT 0
33 #define PENDING_EXTENT_DELETE 1
34 #define PENDING_BACKREF_UPDATE 2
36 struct pending_extent_op {
47 static int finish_current_insert(struct btrfs_trans_handle *trans, struct
48 btrfs_root *extent_root, int all);
49 static int del_pending_extents(struct btrfs_trans_handle *trans, struct
50 btrfs_root *extent_root, int all);
51 static struct btrfs_block_group_cache *
52 __btrfs_find_block_group(struct btrfs_root *root,
53 struct btrfs_block_group_cache *hint,
54 u64 search_start, int data, int owner);
56 static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
58 return (cache->flags & bits) == bits;
62 * this adds the block group to the fs_info rb tree for the block group
65 int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
66 struct btrfs_block_group_cache *block_group)
69 struct rb_node *parent = NULL;
70 struct btrfs_block_group_cache *cache;
72 spin_lock(&info->block_group_cache_lock);
73 p = &info->block_group_cache_tree.rb_node;
77 cache = rb_entry(parent, struct btrfs_block_group_cache,
79 if (block_group->key.objectid < cache->key.objectid) {
81 } else if (block_group->key.objectid > cache->key.objectid) {
84 spin_unlock(&info->block_group_cache_lock);
89 rb_link_node(&block_group->cache_node, parent, p);
90 rb_insert_color(&block_group->cache_node,
91 &info->block_group_cache_tree);
92 spin_unlock(&info->block_group_cache_lock);
98 * This will return the block group at or after bytenr if contains is 0, else
99 * it will return the block group that contains the bytenr
101 static struct btrfs_block_group_cache *
102 block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
105 struct btrfs_block_group_cache *cache, *ret = NULL;
109 spin_lock(&info->block_group_cache_lock);
110 n = info->block_group_cache_tree.rb_node;
113 cache = rb_entry(n, struct btrfs_block_group_cache,
115 end = cache->key.objectid + cache->key.offset - 1;
116 start = cache->key.objectid;
118 if (bytenr < start) {
119 if (!contains && (!ret || start < ret->key.objectid))
122 } else if (bytenr > start) {
123 if (contains && bytenr <= end) {
133 spin_unlock(&info->block_group_cache_lock);
139 * this is only called by cache_block_group, since we could have freed extents
140 * we need to check the pinned_extents for any extents that can't be used yet
141 * since their free space will be released as soon as the transaction commits.
143 static int add_new_free_space(struct btrfs_block_group_cache *block_group,
144 struct btrfs_fs_info *info, u64 start, u64 end)
146 u64 extent_start, extent_end, size;
149 mutex_lock(&info->pinned_mutex);
150 while (start < end) {
151 ret = find_first_extent_bit(&info->pinned_extents, start,
152 &extent_start, &extent_end,
157 if (extent_start == start) {
158 start = extent_end + 1;
159 } else if (extent_start > start && extent_start < end) {
160 size = extent_start - start;
161 ret = btrfs_add_free_space_lock(block_group, start,
164 start = extent_end + 1;
172 ret = btrfs_add_free_space_lock(block_group, start, size);
175 mutex_unlock(&info->pinned_mutex);
180 static int cache_block_group(struct btrfs_root *root,
181 struct btrfs_block_group_cache *block_group)
183 struct btrfs_path *path;
185 struct btrfs_key key;
186 struct extent_buffer *leaf;
195 root = root->fs_info->extent_root;
197 if (block_group->cached)
200 path = btrfs_alloc_path();
206 * we get into deadlocks with paths held by callers of this function.
207 * since the alloc_mutex is protecting things right now, just
208 * skip the locking here
210 path->skip_locking = 1;
211 first_free = max_t(u64, block_group->key.objectid,
212 BTRFS_SUPER_INFO_OFFSET + BTRFS_SUPER_INFO_SIZE);
213 key.objectid = block_group->key.objectid;
215 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
216 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
219 ret = btrfs_previous_item(root, path, 0, BTRFS_EXTENT_ITEM_KEY);
223 leaf = path->nodes[0];
224 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
225 if (key.objectid + key.offset > first_free)
226 first_free = key.objectid + key.offset;
229 leaf = path->nodes[0];
230 slot = path->slots[0];
231 if (slot >= btrfs_header_nritems(leaf)) {
232 ret = btrfs_next_leaf(root, path);
240 btrfs_item_key_to_cpu(leaf, &key, slot);
241 if (key.objectid < block_group->key.objectid)
244 if (key.objectid >= block_group->key.objectid +
245 block_group->key.offset)
248 if (btrfs_key_type(&key) == BTRFS_EXTENT_ITEM_KEY) {
254 add_new_free_space(block_group, root->fs_info, last,
257 last = key.objectid + key.offset;
266 add_new_free_space(block_group, root->fs_info, last,
267 block_group->key.objectid +
268 block_group->key.offset);
270 block_group->cached = 1;
273 btrfs_free_path(path);
278 * return the block group that starts at or after bytenr
280 struct btrfs_block_group_cache *btrfs_lookup_first_block_group(struct
284 struct btrfs_block_group_cache *cache;
286 cache = block_group_cache_tree_search(info, bytenr, 0);
292 * return the block group that contains teh given bytenr
294 struct btrfs_block_group_cache *btrfs_lookup_block_group(struct
298 struct btrfs_block_group_cache *cache;
300 cache = block_group_cache_tree_search(info, bytenr, 1);
305 static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
308 struct list_head *head = &info->space_info;
309 struct list_head *cur;
310 struct btrfs_space_info *found;
311 list_for_each(cur, head) {
312 found = list_entry(cur, struct btrfs_space_info, list);
313 if (found->flags == flags)
319 static u64 div_factor(u64 num, int factor)
328 static struct btrfs_block_group_cache *
329 __btrfs_find_block_group(struct btrfs_root *root,
330 struct btrfs_block_group_cache *hint,
331 u64 search_start, int data, int owner)
333 struct btrfs_block_group_cache *cache;
334 struct btrfs_block_group_cache *found_group = NULL;
335 struct btrfs_fs_info *info = root->fs_info;
343 if (data & BTRFS_BLOCK_GROUP_METADATA)
347 struct btrfs_block_group_cache *shint;
348 shint = btrfs_lookup_first_block_group(info, search_start);
349 if (shint && block_group_bits(shint, data) && !shint->ro) {
350 spin_lock(&shint->lock);
351 used = btrfs_block_group_used(&shint->item);
352 if (used + shint->pinned + shint->reserved <
353 div_factor(shint->key.offset, factor)) {
354 spin_unlock(&shint->lock);
357 spin_unlock(&shint->lock);
360 if (hint && !hint->ro && block_group_bits(hint, data)) {
361 spin_lock(&hint->lock);
362 used = btrfs_block_group_used(&hint->item);
363 if (used + hint->pinned + hint->reserved <
364 div_factor(hint->key.offset, factor)) {
365 spin_unlock(&hint->lock);
368 spin_unlock(&hint->lock);
369 last = hint->key.objectid + hint->key.offset;
372 last = max(hint->key.objectid, search_start);
378 cache = btrfs_lookup_first_block_group(root->fs_info, last);
382 spin_lock(&cache->lock);
383 last = cache->key.objectid + cache->key.offset;
384 used = btrfs_block_group_used(&cache->item);
386 if (!cache->ro && block_group_bits(cache, data)) {
387 free_check = div_factor(cache->key.offset, factor);
388 if (used + cache->pinned + cache->reserved <
391 spin_unlock(&cache->lock);
395 spin_unlock(&cache->lock);
403 if (!full_search && factor < 10) {
413 struct btrfs_block_group_cache *btrfs_find_block_group(struct btrfs_root *root,
414 struct btrfs_block_group_cache
415 *hint, u64 search_start,
419 struct btrfs_block_group_cache *ret;
420 ret = __btrfs_find_block_group(root, hint, search_start, data, owner);
424 /* simple helper to search for an existing extent at a given offset */
425 int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len)
428 struct btrfs_key key;
429 struct btrfs_path *path;
431 path = btrfs_alloc_path();
433 key.objectid = start;
435 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
436 ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
438 btrfs_free_path(path);
443 * Back reference rules. Back refs have three main goals:
445 * 1) differentiate between all holders of references to an extent so that
446 * when a reference is dropped we can make sure it was a valid reference
447 * before freeing the extent.
449 * 2) Provide enough information to quickly find the holders of an extent
450 * if we notice a given block is corrupted or bad.
452 * 3) Make it easy to migrate blocks for FS shrinking or storage pool
453 * maintenance. This is actually the same as #2, but with a slightly
454 * different use case.
456 * File extents can be referenced by:
458 * - multiple snapshots, subvolumes, or different generations in one subvol
459 * - different files inside a single subvolume
460 * - different offsets inside a file (bookend extents in file.c)
462 * The extent ref structure has fields for:
464 * - Objectid of the subvolume root
465 * - Generation number of the tree holding the reference
466 * - objectid of the file holding the reference
467 * - number of references holding by parent node (alway 1 for tree blocks)
469 * Btree leaf may hold multiple references to a file extent. In most cases,
470 * these references are from same file and the corresponding offsets inside
471 * the file are close together.
473 * When a file extent is allocated the fields are filled in:
474 * (root_key.objectid, trans->transid, inode objectid, 1)
476 * When a leaf is cow'd new references are added for every file extent found
477 * in the leaf. It looks similar to the create case, but trans->transid will
478 * be different when the block is cow'd.
480 * (root_key.objectid, trans->transid, inode objectid,
481 * number of references in the leaf)
483 * When a file extent is removed either during snapshot deletion or
484 * file truncation, we find the corresponding back reference and check
485 * the following fields:
487 * (btrfs_header_owner(leaf), btrfs_header_generation(leaf),
490 * Btree extents can be referenced by:
492 * - Different subvolumes
493 * - Different generations of the same subvolume
495 * When a tree block is created, back references are inserted:
497 * (root->root_key.objectid, trans->transid, level, 1)
499 * When a tree block is cow'd, new back references are added for all the
500 * blocks it points to. If the tree block isn't in reference counted root,
501 * the old back references are removed. These new back references are of
502 * the form (trans->transid will have increased since creation):
504 * (root->root_key.objectid, trans->transid, level, 1)
506 * When a backref is in deleting, the following fields are checked:
508 * if backref was for a tree root:
509 * (btrfs_header_owner(itself), btrfs_header_generation(itself), level)
511 * (btrfs_header_owner(parent), btrfs_header_generation(parent), level)
513 * Back Reference Key composing:
515 * The key objectid corresponds to the first byte in the extent, the key
516 * type is set to BTRFS_EXTENT_REF_KEY, and the key offset is the first
517 * byte of parent extent. If a extent is tree root, the key offset is set
518 * to the key objectid.
521 static int noinline lookup_extent_backref(struct btrfs_trans_handle *trans,
522 struct btrfs_root *root,
523 struct btrfs_path *path,
524 u64 bytenr, u64 parent,
525 u64 ref_root, u64 ref_generation,
526 u64 owner_objectid, int del)
528 struct btrfs_key key;
529 struct btrfs_extent_ref *ref;
530 struct extent_buffer *leaf;
534 key.objectid = bytenr;
535 key.type = BTRFS_EXTENT_REF_KEY;
538 ret = btrfs_search_slot(trans, root, &key, path, del ? -1 : 0, 1);
546 leaf = path->nodes[0];
547 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_ref);
548 ref_objectid = btrfs_ref_objectid(leaf, ref);
549 if (btrfs_ref_root(leaf, ref) != ref_root ||
550 btrfs_ref_generation(leaf, ref) != ref_generation ||
551 (ref_objectid != owner_objectid &&
552 ref_objectid != BTRFS_MULTIPLE_OBJECTIDS)) {
562 static int noinline insert_extent_backref(struct btrfs_trans_handle *trans,
563 struct btrfs_root *root,
564 struct btrfs_path *path,
565 u64 bytenr, u64 parent,
566 u64 ref_root, u64 ref_generation,
569 struct btrfs_key key;
570 struct extent_buffer *leaf;
571 struct btrfs_extent_ref *ref;
575 key.objectid = bytenr;
576 key.type = BTRFS_EXTENT_REF_KEY;
579 ret = btrfs_insert_empty_item(trans, root, path, &key, sizeof(*ref));
581 leaf = path->nodes[0];
582 ref = btrfs_item_ptr(leaf, path->slots[0],
583 struct btrfs_extent_ref);
584 btrfs_set_ref_root(leaf, ref, ref_root);
585 btrfs_set_ref_generation(leaf, ref, ref_generation);
586 btrfs_set_ref_objectid(leaf, ref, owner_objectid);
587 btrfs_set_ref_num_refs(leaf, ref, 1);
588 } else if (ret == -EEXIST) {
590 BUG_ON(owner_objectid < BTRFS_FIRST_FREE_OBJECTID);
591 leaf = path->nodes[0];
592 ref = btrfs_item_ptr(leaf, path->slots[0],
593 struct btrfs_extent_ref);
594 if (btrfs_ref_root(leaf, ref) != ref_root ||
595 btrfs_ref_generation(leaf, ref) != ref_generation) {
601 num_refs = btrfs_ref_num_refs(leaf, ref);
602 BUG_ON(num_refs == 0);
603 btrfs_set_ref_num_refs(leaf, ref, num_refs + 1);
605 existing_owner = btrfs_ref_objectid(leaf, ref);
606 if (existing_owner != owner_objectid &&
607 existing_owner != BTRFS_MULTIPLE_OBJECTIDS) {
608 btrfs_set_ref_objectid(leaf, ref,
609 BTRFS_MULTIPLE_OBJECTIDS);
615 btrfs_mark_buffer_dirty(path->nodes[0]);
617 btrfs_release_path(root, path);
621 static int noinline remove_extent_backref(struct btrfs_trans_handle *trans,
622 struct btrfs_root *root,
623 struct btrfs_path *path)
625 struct extent_buffer *leaf;
626 struct btrfs_extent_ref *ref;
630 leaf = path->nodes[0];
631 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_ref);
632 num_refs = btrfs_ref_num_refs(leaf, ref);
633 BUG_ON(num_refs == 0);
636 ret = btrfs_del_item(trans, root, path);
638 btrfs_set_ref_num_refs(leaf, ref, num_refs);
639 btrfs_mark_buffer_dirty(leaf);
641 btrfs_release_path(root, path);
645 static int __btrfs_update_extent_ref(struct btrfs_trans_handle *trans,
646 struct btrfs_root *root, u64 bytenr,
647 u64 orig_parent, u64 parent,
648 u64 orig_root, u64 ref_root,
649 u64 orig_generation, u64 ref_generation,
653 struct btrfs_root *extent_root = root->fs_info->extent_root;
654 struct btrfs_path *path;
656 if (root == root->fs_info->extent_root) {
657 struct pending_extent_op *extent_op;
660 BUG_ON(owner_objectid >= BTRFS_MAX_LEVEL);
661 num_bytes = btrfs_level_size(root, (int)owner_objectid);
662 mutex_lock(&root->fs_info->extent_ins_mutex);
663 if (test_range_bit(&root->fs_info->extent_ins, bytenr,
664 bytenr + num_bytes - 1, EXTENT_WRITEBACK, 0)) {
666 ret = get_state_private(&root->fs_info->extent_ins,
669 extent_op = (struct pending_extent_op *)
671 BUG_ON(extent_op->parent != orig_parent);
672 BUG_ON(extent_op->generation != orig_generation);
674 extent_op->parent = parent;
675 extent_op->generation = ref_generation;
677 extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
680 extent_op->type = PENDING_BACKREF_UPDATE;
681 extent_op->bytenr = bytenr;
682 extent_op->num_bytes = num_bytes;
683 extent_op->parent = parent;
684 extent_op->orig_parent = orig_parent;
685 extent_op->generation = ref_generation;
686 extent_op->orig_generation = orig_generation;
687 extent_op->level = (int)owner_objectid;
689 set_extent_bits(&root->fs_info->extent_ins,
690 bytenr, bytenr + num_bytes - 1,
691 EXTENT_WRITEBACK, GFP_NOFS);
692 set_state_private(&root->fs_info->extent_ins,
693 bytenr, (unsigned long)extent_op);
695 mutex_unlock(&root->fs_info->extent_ins_mutex);
699 path = btrfs_alloc_path();
702 ret = lookup_extent_backref(trans, extent_root, path,
703 bytenr, orig_parent, orig_root,
704 orig_generation, owner_objectid, 1);
707 ret = remove_extent_backref(trans, extent_root, path);
710 ret = insert_extent_backref(trans, extent_root, path, bytenr,
711 parent, ref_root, ref_generation,
714 finish_current_insert(trans, extent_root, 0);
715 del_pending_extents(trans, extent_root, 0);
717 btrfs_free_path(path);
721 int btrfs_update_extent_ref(struct btrfs_trans_handle *trans,
722 struct btrfs_root *root, u64 bytenr,
723 u64 orig_parent, u64 parent,
724 u64 ref_root, u64 ref_generation,
728 if (ref_root == BTRFS_TREE_LOG_OBJECTID &&
729 owner_objectid < BTRFS_FIRST_FREE_OBJECTID)
731 ret = __btrfs_update_extent_ref(trans, root, bytenr, orig_parent,
732 parent, ref_root, ref_root,
733 ref_generation, ref_generation,
738 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
739 struct btrfs_root *root, u64 bytenr,
740 u64 orig_parent, u64 parent,
741 u64 orig_root, u64 ref_root,
742 u64 orig_generation, u64 ref_generation,
745 struct btrfs_path *path;
747 struct btrfs_key key;
748 struct extent_buffer *l;
749 struct btrfs_extent_item *item;
752 path = btrfs_alloc_path();
757 key.objectid = bytenr;
758 key.type = BTRFS_EXTENT_ITEM_KEY;
759 key.offset = (u64)-1;
761 ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key, path,
765 BUG_ON(ret == 0 || path->slots[0] == 0);
770 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
771 if (key.objectid != bytenr) {
772 btrfs_print_leaf(root->fs_info->extent_root, path->nodes[0]);
773 printk("wanted %Lu found %Lu\n", bytenr, key.objectid);
776 BUG_ON(key.type != BTRFS_EXTENT_ITEM_KEY);
778 item = btrfs_item_ptr(l, path->slots[0], struct btrfs_extent_item);
779 refs = btrfs_extent_refs(l, item);
780 btrfs_set_extent_refs(l, item, refs + 1);
781 btrfs_mark_buffer_dirty(path->nodes[0]);
783 btrfs_release_path(root->fs_info->extent_root, path);
786 ret = insert_extent_backref(trans, root->fs_info->extent_root,
787 path, bytenr, parent,
788 ref_root, ref_generation,
791 finish_current_insert(trans, root->fs_info->extent_root, 0);
792 del_pending_extents(trans, root->fs_info->extent_root, 0);
794 btrfs_free_path(path);
798 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
799 struct btrfs_root *root,
800 u64 bytenr, u64 num_bytes, u64 parent,
801 u64 ref_root, u64 ref_generation,
805 if (ref_root == BTRFS_TREE_LOG_OBJECTID &&
806 owner_objectid < BTRFS_FIRST_FREE_OBJECTID)
808 ret = __btrfs_inc_extent_ref(trans, root, bytenr, 0, parent,
809 0, ref_root, 0, ref_generation,
814 int btrfs_extent_post_op(struct btrfs_trans_handle *trans,
815 struct btrfs_root *root)
817 finish_current_insert(trans, root->fs_info->extent_root, 1);
818 del_pending_extents(trans, root->fs_info->extent_root, 1);
822 int btrfs_lookup_extent_ref(struct btrfs_trans_handle *trans,
823 struct btrfs_root *root, u64 bytenr,
824 u64 num_bytes, u32 *refs)
826 struct btrfs_path *path;
828 struct btrfs_key key;
829 struct extent_buffer *l;
830 struct btrfs_extent_item *item;
832 WARN_ON(num_bytes < root->sectorsize);
833 path = btrfs_alloc_path();
835 key.objectid = bytenr;
836 key.offset = num_bytes;
837 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
838 ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key, path,
843 btrfs_print_leaf(root, path->nodes[0]);
844 printk("failed to find block number %Lu\n", bytenr);
848 item = btrfs_item_ptr(l, path->slots[0], struct btrfs_extent_item);
849 *refs = btrfs_extent_refs(l, item);
851 btrfs_free_path(path);
855 int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
856 struct btrfs_root *root, u64 bytenr)
858 struct btrfs_root *extent_root = root->fs_info->extent_root;
859 struct btrfs_path *path;
860 struct extent_buffer *leaf;
861 struct btrfs_extent_ref *ref_item;
862 struct btrfs_key key;
863 struct btrfs_key found_key;
869 key.objectid = bytenr;
870 key.offset = (u64)-1;
871 key.type = BTRFS_EXTENT_ITEM_KEY;
873 path = btrfs_alloc_path();
874 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
880 if (path->slots[0] == 0)
884 leaf = path->nodes[0];
885 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
887 if (found_key.objectid != bytenr ||
888 found_key.type != BTRFS_EXTENT_ITEM_KEY)
891 last_snapshot = btrfs_root_last_snapshot(&root->root_item);
893 leaf = path->nodes[0];
894 nritems = btrfs_header_nritems(leaf);
895 if (path->slots[0] >= nritems) {
896 ret = btrfs_next_leaf(extent_root, path);
903 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
904 if (found_key.objectid != bytenr)
907 if (found_key.type != BTRFS_EXTENT_REF_KEY) {
912 ref_item = btrfs_item_ptr(leaf, path->slots[0],
913 struct btrfs_extent_ref);
914 ref_root = btrfs_ref_root(leaf, ref_item);
915 if (ref_root != root->root_key.objectid &&
916 ref_root != BTRFS_TREE_LOG_OBJECTID) {
920 if (btrfs_ref_generation(leaf, ref_item) <= last_snapshot) {
929 btrfs_free_path(path);
933 int btrfs_cache_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
934 struct extent_buffer *buf, u32 nr_extents)
936 struct btrfs_key key;
937 struct btrfs_file_extent_item *fi;
948 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
950 root_gen = root->root_key.offset;
953 root_gen = trans->transid - 1;
956 level = btrfs_header_level(buf);
957 nritems = btrfs_header_nritems(buf);
960 struct btrfs_leaf_ref *ref;
961 struct btrfs_extent_info *info;
963 ref = btrfs_alloc_leaf_ref(root, nr_extents);
969 ref->root_gen = root_gen;
970 ref->bytenr = buf->start;
971 ref->owner = btrfs_header_owner(buf);
972 ref->generation = btrfs_header_generation(buf);
973 ref->nritems = nr_extents;
976 for (i = 0; nr_extents > 0 && i < nritems; i++) {
978 btrfs_item_key_to_cpu(buf, &key, i);
979 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
981 fi = btrfs_item_ptr(buf, i,
982 struct btrfs_file_extent_item);
983 if (btrfs_file_extent_type(buf, fi) ==
984 BTRFS_FILE_EXTENT_INLINE)
986 disk_bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
987 if (disk_bytenr == 0)
990 info->bytenr = disk_bytenr;
992 btrfs_file_extent_disk_num_bytes(buf, fi);
993 info->objectid = key.objectid;
994 info->offset = key.offset;
998 ret = btrfs_add_leaf_ref(root, ref, shared);
999 if (ret == -EEXIST && shared) {
1000 struct btrfs_leaf_ref *old;
1001 old = btrfs_lookup_leaf_ref(root, ref->bytenr);
1003 btrfs_remove_leaf_ref(root, old);
1004 btrfs_free_leaf_ref(root, old);
1005 ret = btrfs_add_leaf_ref(root, ref, shared);
1008 btrfs_free_leaf_ref(root, ref);
1014 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
1015 struct extent_buffer *orig_buf, struct extent_buffer *buf,
1022 u64 orig_generation;
1024 u32 nr_file_extents = 0;
1025 struct btrfs_key key;
1026 struct btrfs_file_extent_item *fi;
1031 int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
1032 u64, u64, u64, u64, u64, u64, u64, u64);
1034 ref_root = btrfs_header_owner(buf);
1035 ref_generation = btrfs_header_generation(buf);
1036 orig_root = btrfs_header_owner(orig_buf);
1037 orig_generation = btrfs_header_generation(orig_buf);
1039 nritems = btrfs_header_nritems(buf);
1040 level = btrfs_header_level(buf);
1042 if (root->ref_cows) {
1043 process_func = __btrfs_inc_extent_ref;
1046 root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
1049 root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID)
1051 process_func = __btrfs_update_extent_ref;
1054 for (i = 0; i < nritems; i++) {
1057 btrfs_item_key_to_cpu(buf, &key, i);
1058 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
1060 fi = btrfs_item_ptr(buf, i,
1061 struct btrfs_file_extent_item);
1062 if (btrfs_file_extent_type(buf, fi) ==
1063 BTRFS_FILE_EXTENT_INLINE)
1065 bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
1071 ret = process_func(trans, root, bytenr,
1072 orig_buf->start, buf->start,
1073 orig_root, ref_root,
1074 orig_generation, ref_generation,
1083 bytenr = btrfs_node_blockptr(buf, i);
1084 ret = process_func(trans, root, bytenr,
1085 orig_buf->start, buf->start,
1086 orig_root, ref_root,
1087 orig_generation, ref_generation,
1099 *nr_extents = nr_file_extents;
1101 *nr_extents = nritems;
1109 int btrfs_update_ref(struct btrfs_trans_handle *trans,
1110 struct btrfs_root *root, struct extent_buffer *orig_buf,
1111 struct extent_buffer *buf, int start_slot, int nr)
1118 u64 orig_generation;
1119 struct btrfs_key key;
1120 struct btrfs_file_extent_item *fi;
1126 BUG_ON(start_slot < 0);
1127 BUG_ON(start_slot + nr > btrfs_header_nritems(buf));
1129 ref_root = btrfs_header_owner(buf);
1130 ref_generation = btrfs_header_generation(buf);
1131 orig_root = btrfs_header_owner(orig_buf);
1132 orig_generation = btrfs_header_generation(orig_buf);
1133 level = btrfs_header_level(buf);
1135 if (!root->ref_cows) {
1137 root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
1140 root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID)
1144 for (i = 0, slot = start_slot; i < nr; i++, slot++) {
1147 btrfs_item_key_to_cpu(buf, &key, slot);
1148 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
1150 fi = btrfs_item_ptr(buf, slot,
1151 struct btrfs_file_extent_item);
1152 if (btrfs_file_extent_type(buf, fi) ==
1153 BTRFS_FILE_EXTENT_INLINE)
1155 bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
1158 ret = __btrfs_update_extent_ref(trans, root, bytenr,
1159 orig_buf->start, buf->start,
1160 orig_root, ref_root,
1161 orig_generation, ref_generation,
1166 bytenr = btrfs_node_blockptr(buf, slot);
1167 ret = __btrfs_update_extent_ref(trans, root, bytenr,
1168 orig_buf->start, buf->start,
1169 orig_root, ref_root,
1170 orig_generation, ref_generation,
1182 static int write_one_cache_group(struct btrfs_trans_handle *trans,
1183 struct btrfs_root *root,
1184 struct btrfs_path *path,
1185 struct btrfs_block_group_cache *cache)
1189 struct btrfs_root *extent_root = root->fs_info->extent_root;
1191 struct extent_buffer *leaf;
1193 ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
1198 leaf = path->nodes[0];
1199 bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
1200 write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
1201 btrfs_mark_buffer_dirty(leaf);
1202 btrfs_release_path(extent_root, path);
1204 finish_current_insert(trans, extent_root, 0);
1205 pending_ret = del_pending_extents(trans, extent_root, 0);
1214 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
1215 struct btrfs_root *root)
1217 struct btrfs_block_group_cache *cache, *entry;
1221 struct btrfs_path *path;
1224 path = btrfs_alloc_path();
1230 spin_lock(&root->fs_info->block_group_cache_lock);
1231 for (n = rb_first(&root->fs_info->block_group_cache_tree);
1232 n; n = rb_next(n)) {
1233 entry = rb_entry(n, struct btrfs_block_group_cache,
1240 spin_unlock(&root->fs_info->block_group_cache_lock);
1246 last += cache->key.offset;
1248 err = write_one_cache_group(trans, root,
1251 * if we fail to write the cache group, we want
1252 * to keep it marked dirty in hopes that a later
1260 btrfs_free_path(path);
1264 static int update_space_info(struct btrfs_fs_info *info, u64 flags,
1265 u64 total_bytes, u64 bytes_used,
1266 struct btrfs_space_info **space_info)
1268 struct btrfs_space_info *found;
1270 found = __find_space_info(info, flags);
1272 spin_lock(&found->lock);
1273 found->total_bytes += total_bytes;
1274 found->bytes_used += bytes_used;
1276 spin_unlock(&found->lock);
1277 *space_info = found;
1280 found = kmalloc(sizeof(*found), GFP_NOFS);
1284 list_add(&found->list, &info->space_info);
1285 INIT_LIST_HEAD(&found->block_groups);
1286 init_rwsem(&found->groups_sem);
1287 spin_lock_init(&found->lock);
1288 found->flags = flags;
1289 found->total_bytes = total_bytes;
1290 found->bytes_used = bytes_used;
1291 found->bytes_pinned = 0;
1292 found->bytes_reserved = 0;
1294 found->force_alloc = 0;
1295 *space_info = found;
1299 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
1301 u64 extra_flags = flags & (BTRFS_BLOCK_GROUP_RAID0 |
1302 BTRFS_BLOCK_GROUP_RAID1 |
1303 BTRFS_BLOCK_GROUP_RAID10 |
1304 BTRFS_BLOCK_GROUP_DUP);
1306 if (flags & BTRFS_BLOCK_GROUP_DATA)
1307 fs_info->avail_data_alloc_bits |= extra_flags;
1308 if (flags & BTRFS_BLOCK_GROUP_METADATA)
1309 fs_info->avail_metadata_alloc_bits |= extra_flags;
1310 if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
1311 fs_info->avail_system_alloc_bits |= extra_flags;
1315 static u64 reduce_alloc_profile(struct btrfs_root *root, u64 flags)
1317 u64 num_devices = root->fs_info->fs_devices->num_devices;
1319 if (num_devices == 1)
1320 flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0);
1321 if (num_devices < 4)
1322 flags &= ~BTRFS_BLOCK_GROUP_RAID10;
1324 if ((flags & BTRFS_BLOCK_GROUP_DUP) &&
1325 (flags & (BTRFS_BLOCK_GROUP_RAID1 |
1326 BTRFS_BLOCK_GROUP_RAID10))) {
1327 flags &= ~BTRFS_BLOCK_GROUP_DUP;
1330 if ((flags & BTRFS_BLOCK_GROUP_RAID1) &&
1331 (flags & BTRFS_BLOCK_GROUP_RAID10)) {
1332 flags &= ~BTRFS_BLOCK_GROUP_RAID1;
1335 if ((flags & BTRFS_BLOCK_GROUP_RAID0) &&
1336 ((flags & BTRFS_BLOCK_GROUP_RAID1) |
1337 (flags & BTRFS_BLOCK_GROUP_RAID10) |
1338 (flags & BTRFS_BLOCK_GROUP_DUP)))
1339 flags &= ~BTRFS_BLOCK_GROUP_RAID0;
1343 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
1344 struct btrfs_root *extent_root, u64 alloc_bytes,
1345 u64 flags, int force)
1347 struct btrfs_space_info *space_info;
1351 int ret = 0, waited = 0;
1353 flags = reduce_alloc_profile(extent_root, flags);
1355 space_info = __find_space_info(extent_root->fs_info, flags);
1357 ret = update_space_info(extent_root->fs_info, flags,
1361 BUG_ON(!space_info);
1363 spin_lock(&space_info->lock);
1364 if (space_info->force_alloc) {
1366 space_info->force_alloc = 0;
1368 if (space_info->full) {
1369 spin_unlock(&space_info->lock);
1373 thresh = div_factor(space_info->total_bytes, 6);
1375 (space_info->bytes_used + space_info->bytes_pinned +
1376 space_info->bytes_reserved + alloc_bytes) < thresh) {
1377 spin_unlock(&space_info->lock);
1381 spin_unlock(&space_info->lock);
1383 ret = mutex_trylock(&extent_root->fs_info->chunk_mutex);
1384 if (!ret && !force) {
1387 mutex_lock(&extent_root->fs_info->chunk_mutex);
1392 spin_lock(&space_info->lock);
1393 if (space_info->full) {
1394 spin_unlock(&space_info->lock);
1397 spin_unlock(&space_info->lock);
1400 ret = btrfs_alloc_chunk(trans, extent_root, &start, &num_bytes, flags);
1402 printk("space info full %Lu\n", flags);
1403 space_info->full = 1;
1407 ret = btrfs_make_block_group(trans, extent_root, 0, flags,
1408 BTRFS_FIRST_CHUNK_TREE_OBJECTID, start, num_bytes);
1411 mutex_unlock(&extent_root->fs_info->chunk_mutex);
1416 static int update_block_group(struct btrfs_trans_handle *trans,
1417 struct btrfs_root *root,
1418 u64 bytenr, u64 num_bytes, int alloc,
1421 struct btrfs_block_group_cache *cache;
1422 struct btrfs_fs_info *info = root->fs_info;
1423 u64 total = num_bytes;
1428 cache = btrfs_lookup_block_group(info, bytenr);
1432 byte_in_group = bytenr - cache->key.objectid;
1433 WARN_ON(byte_in_group > cache->key.offset);
1435 spin_lock(&cache->space_info->lock);
1436 spin_lock(&cache->lock);
1438 old_val = btrfs_block_group_used(&cache->item);
1439 num_bytes = min(total, cache->key.offset - byte_in_group);
1441 old_val += num_bytes;
1442 cache->space_info->bytes_used += num_bytes;
1443 btrfs_set_block_group_used(&cache->item, old_val);
1444 spin_unlock(&cache->lock);
1445 spin_unlock(&cache->space_info->lock);
1447 old_val -= num_bytes;
1448 cache->space_info->bytes_used -= num_bytes;
1449 btrfs_set_block_group_used(&cache->item, old_val);
1450 spin_unlock(&cache->lock);
1451 spin_unlock(&cache->space_info->lock);
1454 ret = btrfs_add_free_space(cache, bytenr,
1461 bytenr += num_bytes;
1466 static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
1468 struct btrfs_block_group_cache *cache;
1470 cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
1474 return cache->key.objectid;
1477 int btrfs_update_pinned_extents(struct btrfs_root *root,
1478 u64 bytenr, u64 num, int pin)
1481 struct btrfs_block_group_cache *cache;
1482 struct btrfs_fs_info *fs_info = root->fs_info;
1484 WARN_ON(!mutex_is_locked(&root->fs_info->pinned_mutex));
1486 set_extent_dirty(&fs_info->pinned_extents,
1487 bytenr, bytenr + num - 1, GFP_NOFS);
1489 clear_extent_dirty(&fs_info->pinned_extents,
1490 bytenr, bytenr + num - 1, GFP_NOFS);
1493 cache = btrfs_lookup_block_group(fs_info, bytenr);
1495 len = min(num, cache->key.offset -
1496 (bytenr - cache->key.objectid));
1498 spin_lock(&cache->space_info->lock);
1499 spin_lock(&cache->lock);
1500 cache->pinned += len;
1501 cache->space_info->bytes_pinned += len;
1502 spin_unlock(&cache->lock);
1503 spin_unlock(&cache->space_info->lock);
1504 fs_info->total_pinned += len;
1506 spin_lock(&cache->space_info->lock);
1507 spin_lock(&cache->lock);
1508 cache->pinned -= len;
1509 cache->space_info->bytes_pinned -= len;
1510 spin_unlock(&cache->lock);
1511 spin_unlock(&cache->space_info->lock);
1512 fs_info->total_pinned -= len;
1520 static int update_reserved_extents(struct btrfs_root *root,
1521 u64 bytenr, u64 num, int reserve)
1524 struct btrfs_block_group_cache *cache;
1525 struct btrfs_fs_info *fs_info = root->fs_info;
1528 cache = btrfs_lookup_block_group(fs_info, bytenr);
1530 len = min(num, cache->key.offset -
1531 (bytenr - cache->key.objectid));
1533 spin_lock(&cache->space_info->lock);
1534 spin_lock(&cache->lock);
1536 cache->reserved += len;
1537 cache->space_info->bytes_reserved += len;
1539 cache->reserved -= len;
1540 cache->space_info->bytes_reserved -= len;
1542 spin_unlock(&cache->lock);
1543 spin_unlock(&cache->space_info->lock);
1550 int btrfs_copy_pinned(struct btrfs_root *root, struct extent_io_tree *copy)
1555 struct extent_io_tree *pinned_extents = &root->fs_info->pinned_extents;
1558 mutex_lock(&root->fs_info->pinned_mutex);
1560 ret = find_first_extent_bit(pinned_extents, last,
1561 &start, &end, EXTENT_DIRTY);
1564 set_extent_dirty(copy, start, end, GFP_NOFS);
1567 mutex_unlock(&root->fs_info->pinned_mutex);
1571 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
1572 struct btrfs_root *root,
1573 struct extent_io_tree *unpin)
1578 struct btrfs_block_group_cache *cache;
1580 mutex_lock(&root->fs_info->pinned_mutex);
1582 ret = find_first_extent_bit(unpin, 0, &start, &end,
1586 btrfs_update_pinned_extents(root, start, end + 1 - start, 0);
1587 clear_extent_dirty(unpin, start, end, GFP_NOFS);
1588 cache = btrfs_lookup_block_group(root->fs_info, start);
1590 btrfs_add_free_space(cache, start, end - start + 1);
1591 if (need_resched()) {
1592 mutex_unlock(&root->fs_info->pinned_mutex);
1594 mutex_lock(&root->fs_info->pinned_mutex);
1597 mutex_unlock(&root->fs_info->pinned_mutex);
1601 static int finish_current_insert(struct btrfs_trans_handle *trans,
1602 struct btrfs_root *extent_root, int all)
1608 struct btrfs_fs_info *info = extent_root->fs_info;
1609 struct btrfs_path *path;
1610 struct btrfs_extent_ref *ref;
1611 struct pending_extent_op *extent_op;
1612 struct btrfs_key key;
1613 struct btrfs_extent_item extent_item;
1617 btrfs_set_stack_extent_refs(&extent_item, 1);
1618 path = btrfs_alloc_path();
1621 mutex_lock(&info->extent_ins_mutex);
1622 ret = find_first_extent_bit(&info->extent_ins, search, &start,
1623 &end, EXTENT_WRITEBACK);
1625 mutex_unlock(&info->extent_ins_mutex);
1626 if (search && all) {
1633 ret = try_lock_extent(&info->extent_ins, start, end, GFP_NOFS);
1636 mutex_unlock(&info->extent_ins_mutex);
1642 ret = get_state_private(&info->extent_ins, start, &priv);
1644 extent_op = (struct pending_extent_op *)(unsigned long)priv;
1646 mutex_unlock(&info->extent_ins_mutex);
1648 if (extent_op->type == PENDING_EXTENT_INSERT) {
1649 key.objectid = start;
1650 key.offset = end + 1 - start;
1651 key.type = BTRFS_EXTENT_ITEM_KEY;
1652 err = btrfs_insert_item(trans, extent_root, &key,
1653 &extent_item, sizeof(extent_item));
1656 mutex_lock(&info->extent_ins_mutex);
1657 clear_extent_bits(&info->extent_ins, start, end,
1658 EXTENT_WRITEBACK, GFP_NOFS);
1659 mutex_unlock(&info->extent_ins_mutex);
1661 err = insert_extent_backref(trans, extent_root, path,
1662 start, extent_op->parent,
1663 extent_root->root_key.objectid,
1664 extent_op->generation,
1667 } else if (extent_op->type == PENDING_BACKREF_UPDATE) {
1668 err = lookup_extent_backref(trans, extent_root, path,
1669 start, extent_op->orig_parent,
1670 extent_root->root_key.objectid,
1671 extent_op->orig_generation,
1672 extent_op->level, 0);
1675 mutex_lock(&info->extent_ins_mutex);
1676 clear_extent_bits(&info->extent_ins, start, end,
1677 EXTENT_WRITEBACK, GFP_NOFS);
1678 mutex_unlock(&info->extent_ins_mutex);
1680 key.objectid = start;
1681 key.offset = extent_op->parent;
1682 key.type = BTRFS_EXTENT_REF_KEY;
1683 err = btrfs_set_item_key_safe(trans, extent_root, path,
1686 ref = btrfs_item_ptr(path->nodes[0], path->slots[0],
1687 struct btrfs_extent_ref);
1688 btrfs_set_ref_generation(path->nodes[0], ref,
1689 extent_op->generation);
1690 btrfs_mark_buffer_dirty(path->nodes[0]);
1691 btrfs_release_path(extent_root, path);
1696 unlock_extent(&info->extent_ins, start, end, GFP_NOFS);
1704 btrfs_free_path(path);
1708 static int pin_down_bytes(struct btrfs_trans_handle *trans,
1709 struct btrfs_root *root,
1710 u64 bytenr, u64 num_bytes, int is_data)
1713 struct extent_buffer *buf;
1718 buf = btrfs_find_tree_block(root, bytenr, num_bytes);
1722 /* we can reuse a block if it hasn't been written
1723 * and it is from this transaction. We can't
1724 * reuse anything from the tree log root because
1725 * it has tiny sub-transactions.
1727 if (btrfs_buffer_uptodate(buf, 0) &&
1728 btrfs_try_tree_lock(buf)) {
1729 u64 header_owner = btrfs_header_owner(buf);
1730 u64 header_transid = btrfs_header_generation(buf);
1731 if (header_owner != BTRFS_TREE_LOG_OBJECTID &&
1732 header_owner != BTRFS_TREE_RELOC_OBJECTID &&
1733 header_transid == trans->transid &&
1734 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
1735 clean_tree_block(NULL, root, buf);
1736 btrfs_tree_unlock(buf);
1737 free_extent_buffer(buf);
1740 btrfs_tree_unlock(buf);
1742 free_extent_buffer(buf);
1744 btrfs_update_pinned_extents(root, bytenr, num_bytes, 1);
1751 * remove an extent from the root, returns 0 on success
1753 static int __free_extent(struct btrfs_trans_handle *trans,
1754 struct btrfs_root *root,
1755 u64 bytenr, u64 num_bytes, u64 parent,
1756 u64 root_objectid, u64 ref_generation,
1757 u64 owner_objectid, int pin, int mark_free)
1759 struct btrfs_path *path;
1760 struct btrfs_key key;
1761 struct btrfs_fs_info *info = root->fs_info;
1762 struct btrfs_root *extent_root = info->extent_root;
1763 struct extent_buffer *leaf;
1765 int extent_slot = 0;
1766 int found_extent = 0;
1768 struct btrfs_extent_item *ei;
1771 key.objectid = bytenr;
1772 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
1773 key.offset = num_bytes;
1774 path = btrfs_alloc_path();
1779 ret = lookup_extent_backref(trans, extent_root, path,
1780 bytenr, parent, root_objectid,
1781 ref_generation, owner_objectid, 1);
1783 struct btrfs_key found_key;
1784 extent_slot = path->slots[0];
1785 while(extent_slot > 0) {
1787 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1789 if (found_key.objectid != bytenr)
1791 if (found_key.type == BTRFS_EXTENT_ITEM_KEY &&
1792 found_key.offset == num_bytes) {
1796 if (path->slots[0] - extent_slot > 5)
1799 if (!found_extent) {
1800 ret = remove_extent_backref(trans, extent_root, path);
1802 btrfs_release_path(extent_root, path);
1803 ret = btrfs_search_slot(trans, extent_root,
1806 extent_slot = path->slots[0];
1809 btrfs_print_leaf(extent_root, path->nodes[0]);
1811 printk("Unable to find ref byte nr %Lu root %Lu "
1812 "gen %Lu owner %Lu\n", bytenr,
1813 root_objectid, ref_generation, owner_objectid);
1816 leaf = path->nodes[0];
1817 ei = btrfs_item_ptr(leaf, extent_slot,
1818 struct btrfs_extent_item);
1819 refs = btrfs_extent_refs(leaf, ei);
1822 btrfs_set_extent_refs(leaf, ei, refs);
1824 btrfs_mark_buffer_dirty(leaf);
1826 if (refs == 0 && found_extent && path->slots[0] == extent_slot + 1) {
1827 struct btrfs_extent_ref *ref;
1828 ref = btrfs_item_ptr(leaf, path->slots[0],
1829 struct btrfs_extent_ref);
1830 BUG_ON(btrfs_ref_num_refs(leaf, ref) != 1);
1831 /* if the back ref and the extent are next to each other
1832 * they get deleted below in one shot
1834 path->slots[0] = extent_slot;
1836 } else if (found_extent) {
1837 /* otherwise delete the extent back ref */
1838 ret = remove_extent_backref(trans, extent_root, path);
1840 /* if refs are 0, we need to setup the path for deletion */
1842 btrfs_release_path(extent_root, path);
1843 ret = btrfs_search_slot(trans, extent_root, &key, path,
1852 #ifdef BIO_RW_DISCARD
1853 u64 map_length = num_bytes;
1854 struct btrfs_multi_bio *multi = NULL;
1858 mutex_lock(&root->fs_info->pinned_mutex);
1859 ret = pin_down_bytes(trans, root, bytenr, num_bytes,
1860 owner_objectid >= BTRFS_FIRST_FREE_OBJECTID);
1861 mutex_unlock(&root->fs_info->pinned_mutex);
1867 /* block accounting for super block */
1868 spin_lock_irq(&info->delalloc_lock);
1869 super_used = btrfs_super_bytes_used(&info->super_copy);
1870 btrfs_set_super_bytes_used(&info->super_copy,
1871 super_used - num_bytes);
1872 spin_unlock_irq(&info->delalloc_lock);
1874 /* block accounting for root item */
1875 root_used = btrfs_root_used(&root->root_item);
1876 btrfs_set_root_used(&root->root_item,
1877 root_used - num_bytes);
1878 ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
1881 btrfs_release_path(extent_root, path);
1882 ret = update_block_group(trans, root, bytenr, num_bytes, 0,
1886 #ifdef BIO_RW_DISCARD
1887 /* Tell the block device(s) that the sectors can be discarded */
1888 ret = btrfs_map_block(&root->fs_info->mapping_tree, READ,
1889 bytenr, &map_length, &multi, 0);
1891 struct btrfs_bio_stripe *stripe = multi->stripes;
1894 if (map_length > num_bytes)
1895 map_length = num_bytes;
1897 for (i = 0; i < multi->num_stripes; i++, stripe++) {
1898 blkdev_issue_discard(stripe->dev->bdev,
1899 stripe->physical >> 9,
1906 btrfs_free_path(path);
1907 finish_current_insert(trans, extent_root, 0);
1912 * find all the blocks marked as pending in the radix tree and remove
1913 * them from the extent map
1915 static int del_pending_extents(struct btrfs_trans_handle *trans, struct
1916 btrfs_root *extent_root, int all)
1924 struct extent_io_tree *pending_del;
1925 struct extent_io_tree *extent_ins;
1926 struct pending_extent_op *extent_op;
1927 struct btrfs_fs_info *info = extent_root->fs_info;
1929 extent_ins = &extent_root->fs_info->extent_ins;
1930 pending_del = &extent_root->fs_info->pending_del;
1933 mutex_lock(&info->extent_ins_mutex);
1934 ret = find_first_extent_bit(pending_del, search, &start, &end,
1937 mutex_unlock(&info->extent_ins_mutex);
1938 if (all && search) {
1945 ret = try_lock_extent(extent_ins, start, end, GFP_NOFS);
1948 mutex_unlock(&info->extent_ins_mutex);
1954 ret = get_state_private(pending_del, start, &priv);
1956 extent_op = (struct pending_extent_op *)(unsigned long)priv;
1958 clear_extent_bits(pending_del, start, end, EXTENT_WRITEBACK,
1960 if (!test_range_bit(extent_ins, start, end,
1961 EXTENT_WRITEBACK, 0)) {
1962 mutex_unlock(&info->extent_ins_mutex);
1964 ret = __free_extent(trans, extent_root,
1965 start, end + 1 - start,
1966 extent_op->orig_parent,
1967 extent_root->root_key.objectid,
1968 extent_op->orig_generation,
1969 extent_op->level, 1, 0);
1974 ret = get_state_private(&info->extent_ins, start,
1977 extent_op = (struct pending_extent_op *)
1978 (unsigned long)priv;
1980 clear_extent_bits(&info->extent_ins, start, end,
1981 EXTENT_WRITEBACK, GFP_NOFS);
1983 mutex_unlock(&info->extent_ins_mutex);
1985 if (extent_op->type == PENDING_BACKREF_UPDATE)
1988 mutex_lock(&extent_root->fs_info->pinned_mutex);
1989 ret = pin_down_bytes(trans, extent_root, start,
1990 end + 1 - start, 0);
1991 mutex_unlock(&extent_root->fs_info->pinned_mutex);
1993 ret = update_block_group(trans, extent_root, start,
1994 end + 1 - start, 0, ret > 0);
2001 unlock_extent(extent_ins, start, end, GFP_NOFS);
2013 * remove an extent from the root, returns 0 on success
2015 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
2016 struct btrfs_root *root,
2017 u64 bytenr, u64 num_bytes, u64 parent,
2018 u64 root_objectid, u64 ref_generation,
2019 u64 owner_objectid, int pin)
2021 struct btrfs_root *extent_root = root->fs_info->extent_root;
2025 WARN_ON(num_bytes < root->sectorsize);
2026 if (root == extent_root) {
2027 struct pending_extent_op *extent_op;
2029 extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
2032 extent_op->type = PENDING_EXTENT_DELETE;
2033 extent_op->bytenr = bytenr;
2034 extent_op->num_bytes = num_bytes;
2035 extent_op->parent = parent;
2036 extent_op->orig_parent = parent;
2037 extent_op->generation = ref_generation;
2038 extent_op->orig_generation = ref_generation;
2039 extent_op->level = (int)owner_objectid;
2041 mutex_lock(&root->fs_info->extent_ins_mutex);
2042 set_extent_bits(&root->fs_info->pending_del,
2043 bytenr, bytenr + num_bytes - 1,
2044 EXTENT_WRITEBACK, GFP_NOFS);
2045 set_state_private(&root->fs_info->pending_del,
2046 bytenr, (unsigned long)extent_op);
2047 mutex_unlock(&root->fs_info->extent_ins_mutex);
2050 /* if metadata always pin */
2051 if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID) {
2052 if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
2053 struct btrfs_block_group_cache *cache;
2055 /* btrfs_free_reserved_extent */
2056 cache = btrfs_lookup_block_group(root->fs_info, bytenr);
2058 btrfs_add_free_space(cache, bytenr, num_bytes);
2059 update_reserved_extents(root, bytenr, num_bytes, 0);
2065 /* if data pin when any transaction has committed this */
2066 if (ref_generation != trans->transid)
2069 ret = __free_extent(trans, root, bytenr, num_bytes, parent,
2070 root_objectid, ref_generation,
2071 owner_objectid, pin, pin == 0);
2073 finish_current_insert(trans, root->fs_info->extent_root, 0);
2074 pending_ret = del_pending_extents(trans, root->fs_info->extent_root, 0);
2075 return ret ? ret : pending_ret;
2078 int btrfs_free_extent(struct btrfs_trans_handle *trans,
2079 struct btrfs_root *root,
2080 u64 bytenr, u64 num_bytes, u64 parent,
2081 u64 root_objectid, u64 ref_generation,
2082 u64 owner_objectid, int pin)
2086 ret = __btrfs_free_extent(trans, root, bytenr, num_bytes, parent,
2087 root_objectid, ref_generation,
2088 owner_objectid, pin);
2092 static u64 stripe_align(struct btrfs_root *root, u64 val)
2094 u64 mask = ((u64)root->stripesize - 1);
2095 u64 ret = (val + mask) & ~mask;
2100 * walks the btree of allocated extents and find a hole of a given size.
2101 * The key ins is changed to record the hole:
2102 * ins->objectid == block start
2103 * ins->flags = BTRFS_EXTENT_ITEM_KEY
2104 * ins->offset == number of blocks
2105 * Any available blocks before search_start are skipped.
2107 static int noinline find_free_extent(struct btrfs_trans_handle *trans,
2108 struct btrfs_root *orig_root,
2109 u64 num_bytes, u64 empty_size,
2110 u64 search_start, u64 search_end,
2111 u64 hint_byte, struct btrfs_key *ins,
2112 u64 exclude_start, u64 exclude_nr,
2116 struct btrfs_root * root = orig_root->fs_info->extent_root;
2117 u64 total_needed = num_bytes;
2118 u64 *last_ptr = NULL;
2119 u64 last_wanted = 0;
2120 struct btrfs_block_group_cache *block_group = NULL;
2121 int chunk_alloc_done = 0;
2122 int empty_cluster = 2 * 1024 * 1024;
2123 int allowed_chunk_alloc = 0;
2124 struct list_head *head = NULL, *cur = NULL;
2127 struct btrfs_space_info *space_info;
2129 WARN_ON(num_bytes < root->sectorsize);
2130 btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
2134 if (orig_root->ref_cows || empty_size)
2135 allowed_chunk_alloc = 1;
2137 if (data & BTRFS_BLOCK_GROUP_METADATA) {
2138 last_ptr = &root->fs_info->last_alloc;
2139 empty_cluster = 64 * 1024;
2142 if ((data & BTRFS_BLOCK_GROUP_DATA) && btrfs_test_opt(root, SSD))
2143 last_ptr = &root->fs_info->last_data_alloc;
2147 hint_byte = *last_ptr;
2148 last_wanted = *last_ptr;
2150 empty_size += empty_cluster;
2154 search_start = max(search_start, first_logical_byte(root, 0));
2155 search_start = max(search_start, hint_byte);
2157 if (last_wanted && search_start != last_wanted) {
2159 empty_size += empty_cluster;
2162 total_needed += empty_size;
2163 block_group = btrfs_lookup_block_group(root->fs_info, search_start);
2165 block_group = btrfs_lookup_first_block_group(root->fs_info,
2167 space_info = __find_space_info(root->fs_info, data);
2169 down_read(&space_info->groups_sem);
2171 struct btrfs_free_space *free_space;
2173 * the only way this happens if our hint points to a block
2174 * group thats not of the proper type, while looping this
2175 * should never happen
2181 goto new_group_no_lock;
2183 mutex_lock(&block_group->alloc_mutex);
2184 if (unlikely(!block_group_bits(block_group, data)))
2187 ret = cache_block_group(root, block_group);
2189 mutex_unlock(&block_group->alloc_mutex);
2193 if (block_group->ro)
2196 free_space = btrfs_find_free_space(block_group, search_start,
2199 u64 start = block_group->key.objectid;
2200 u64 end = block_group->key.objectid +
2201 block_group->key.offset;
2203 search_start = stripe_align(root, free_space->offset);
2205 /* move on to the next group */
2206 if (search_start + num_bytes >= search_end)
2209 /* move on to the next group */
2210 if (search_start + num_bytes > end)
2213 if (last_wanted && search_start != last_wanted) {
2214 total_needed += empty_cluster;
2215 empty_size += empty_cluster;
2218 * if search_start is still in this block group
2219 * then we just re-search this block group
2221 if (search_start >= start &&
2222 search_start < end) {
2223 mutex_unlock(&block_group->alloc_mutex);
2227 /* else we go to the next block group */
2231 if (exclude_nr > 0 &&
2232 (search_start + num_bytes > exclude_start &&
2233 search_start < exclude_start + exclude_nr)) {
2234 search_start = exclude_start + exclude_nr;
2236 * if search_start is still in this block group
2237 * then we just re-search this block group
2239 if (search_start >= start &&
2240 search_start < end) {
2241 mutex_unlock(&block_group->alloc_mutex);
2246 /* else we go to the next block group */
2250 ins->objectid = search_start;
2251 ins->offset = num_bytes;
2253 btrfs_remove_free_space_lock(block_group, search_start,
2255 /* we are all good, lets return */
2256 mutex_unlock(&block_group->alloc_mutex);
2260 mutex_unlock(&block_group->alloc_mutex);
2262 /* don't try to compare new allocations against the
2263 * last allocation any more
2268 * Here's how this works.
2269 * loop == 0: we were searching a block group via a hint
2270 * and didn't find anything, so we start at
2271 * the head of the block groups and keep searching
2272 * loop == 1: we're searching through all of the block groups
2273 * if we hit the head again we have searched
2274 * all of the block groups for this space and we
2275 * need to try and allocate, if we cant error out.
2276 * loop == 2: we allocated more space and are looping through
2277 * all of the block groups again.
2280 head = &space_info->block_groups;
2283 } else if (loop == 1 && cur == head) {
2286 /* at this point we give up on the empty_size
2287 * allocations and just try to allocate the min
2290 * The extra_loop field was set if an empty_size
2291 * allocation was attempted above, and if this
2292 * is try we need to try the loop again without
2293 * the additional empty_size.
2295 total_needed -= empty_size;
2297 keep_going = extra_loop;
2300 if (allowed_chunk_alloc && !chunk_alloc_done) {
2301 up_read(&space_info->groups_sem);
2302 ret = do_chunk_alloc(trans, root, num_bytes +
2303 2 * 1024 * 1024, data, 1);
2304 down_read(&space_info->groups_sem);
2307 head = &space_info->block_groups;
2309 * we've allocated a new chunk, keep
2313 chunk_alloc_done = 1;
2314 } else if (!allowed_chunk_alloc) {
2315 space_info->force_alloc = 1;
2324 } else if (cur == head) {
2328 block_group = list_entry(cur, struct btrfs_block_group_cache,
2330 search_start = block_group->key.objectid;
2334 /* we found what we needed */
2335 if (ins->objectid) {
2336 if (!(data & BTRFS_BLOCK_GROUP_DATA))
2337 trans->block_group = block_group;
2340 *last_ptr = ins->objectid + ins->offset;
2346 up_read(&space_info->groups_sem);
2350 static void dump_space_info(struct btrfs_space_info *info, u64 bytes)
2352 struct btrfs_block_group_cache *cache;
2353 struct list_head *l;
2355 printk(KERN_INFO "space_info has %Lu free, is %sfull\n",
2356 info->total_bytes - info->bytes_used - info->bytes_pinned -
2357 info->bytes_reserved, (info->full) ? "" : "not ");
2359 down_read(&info->groups_sem);
2360 list_for_each(l, &info->block_groups) {
2361 cache = list_entry(l, struct btrfs_block_group_cache, list);
2362 spin_lock(&cache->lock);
2363 printk(KERN_INFO "block group %Lu has %Lu bytes, %Lu used "
2364 "%Lu pinned %Lu reserved\n",
2365 cache->key.objectid, cache->key.offset,
2366 btrfs_block_group_used(&cache->item),
2367 cache->pinned, cache->reserved);
2368 btrfs_dump_free_space(cache, bytes);
2369 spin_unlock(&cache->lock);
2371 up_read(&info->groups_sem);
2374 static int __btrfs_reserve_extent(struct btrfs_trans_handle *trans,
2375 struct btrfs_root *root,
2376 u64 num_bytes, u64 min_alloc_size,
2377 u64 empty_size, u64 hint_byte,
2378 u64 search_end, struct btrfs_key *ins,
2382 u64 search_start = 0;
2384 struct btrfs_fs_info *info = root->fs_info;
2387 alloc_profile = info->avail_data_alloc_bits &
2388 info->data_alloc_profile;
2389 data = BTRFS_BLOCK_GROUP_DATA | alloc_profile;
2390 } else if (root == root->fs_info->chunk_root) {
2391 alloc_profile = info->avail_system_alloc_bits &
2392 info->system_alloc_profile;
2393 data = BTRFS_BLOCK_GROUP_SYSTEM | alloc_profile;
2395 alloc_profile = info->avail_metadata_alloc_bits &
2396 info->metadata_alloc_profile;
2397 data = BTRFS_BLOCK_GROUP_METADATA | alloc_profile;
2400 data = reduce_alloc_profile(root, data);
2402 * the only place that sets empty_size is btrfs_realloc_node, which
2403 * is not called recursively on allocations
2405 if (empty_size || root->ref_cows) {
2406 if (!(data & BTRFS_BLOCK_GROUP_METADATA)) {
2407 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
2409 BTRFS_BLOCK_GROUP_METADATA |
2410 (info->metadata_alloc_profile &
2411 info->avail_metadata_alloc_bits), 0);
2413 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
2414 num_bytes + 2 * 1024 * 1024, data, 0);
2417 WARN_ON(num_bytes < root->sectorsize);
2418 ret = find_free_extent(trans, root, num_bytes, empty_size,
2419 search_start, search_end, hint_byte, ins,
2420 trans->alloc_exclude_start,
2421 trans->alloc_exclude_nr, data);
2423 if (ret == -ENOSPC && num_bytes > min_alloc_size) {
2424 num_bytes = num_bytes >> 1;
2425 num_bytes = num_bytes & ~(root->sectorsize - 1);
2426 num_bytes = max(num_bytes, min_alloc_size);
2427 do_chunk_alloc(trans, root->fs_info->extent_root,
2428 num_bytes, data, 1);
2432 struct btrfs_space_info *sinfo;
2434 sinfo = __find_space_info(root->fs_info, data);
2435 printk("allocation failed flags %Lu, wanted %Lu\n",
2437 dump_space_info(sinfo, num_bytes);
2444 int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len)
2446 struct btrfs_block_group_cache *cache;
2448 cache = btrfs_lookup_block_group(root->fs_info, start);
2450 printk(KERN_ERR "Unable to find block group for %Lu\n", start);
2453 btrfs_add_free_space(cache, start, len);
2454 update_reserved_extents(root, start, len, 0);
2458 int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
2459 struct btrfs_root *root,
2460 u64 num_bytes, u64 min_alloc_size,
2461 u64 empty_size, u64 hint_byte,
2462 u64 search_end, struct btrfs_key *ins,
2466 ret = __btrfs_reserve_extent(trans, root, num_bytes, min_alloc_size,
2467 empty_size, hint_byte, search_end, ins,
2469 update_reserved_extents(root, ins->objectid, ins->offset, 1);
2473 static int __btrfs_alloc_reserved_extent(struct btrfs_trans_handle *trans,
2474 struct btrfs_root *root, u64 parent,
2475 u64 root_objectid, u64 ref_generation,
2476 u64 owner, struct btrfs_key *ins)
2482 u64 num_bytes = ins->offset;
2484 struct btrfs_fs_info *info = root->fs_info;
2485 struct btrfs_root *extent_root = info->extent_root;
2486 struct btrfs_extent_item *extent_item;
2487 struct btrfs_extent_ref *ref;
2488 struct btrfs_path *path;
2489 struct btrfs_key keys[2];
2492 parent = ins->objectid;
2494 /* block accounting for super block */
2495 spin_lock_irq(&info->delalloc_lock);
2496 super_used = btrfs_super_bytes_used(&info->super_copy);
2497 btrfs_set_super_bytes_used(&info->super_copy, super_used + num_bytes);
2498 spin_unlock_irq(&info->delalloc_lock);
2500 /* block accounting for root item */
2501 root_used = btrfs_root_used(&root->root_item);
2502 btrfs_set_root_used(&root->root_item, root_used + num_bytes);
2504 if (root == extent_root) {
2505 struct pending_extent_op *extent_op;
2507 extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
2510 extent_op->type = PENDING_EXTENT_INSERT;
2511 extent_op->bytenr = ins->objectid;
2512 extent_op->num_bytes = ins->offset;
2513 extent_op->parent = parent;
2514 extent_op->orig_parent = 0;
2515 extent_op->generation = ref_generation;
2516 extent_op->orig_generation = 0;
2517 extent_op->level = (int)owner;
2519 mutex_lock(&root->fs_info->extent_ins_mutex);
2520 set_extent_bits(&root->fs_info->extent_ins, ins->objectid,
2521 ins->objectid + ins->offset - 1,
2522 EXTENT_WRITEBACK, GFP_NOFS);
2523 set_state_private(&root->fs_info->extent_ins,
2524 ins->objectid, (unsigned long)extent_op);
2525 mutex_unlock(&root->fs_info->extent_ins_mutex);
2529 memcpy(&keys[0], ins, sizeof(*ins));
2530 keys[1].objectid = ins->objectid;
2531 keys[1].type = BTRFS_EXTENT_REF_KEY;
2532 keys[1].offset = parent;
2533 sizes[0] = sizeof(*extent_item);
2534 sizes[1] = sizeof(*ref);
2536 path = btrfs_alloc_path();
2539 ret = btrfs_insert_empty_items(trans, extent_root, path, keys,
2543 extent_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
2544 struct btrfs_extent_item);
2545 btrfs_set_extent_refs(path->nodes[0], extent_item, 1);
2546 ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
2547 struct btrfs_extent_ref);
2549 btrfs_set_ref_root(path->nodes[0], ref, root_objectid);
2550 btrfs_set_ref_generation(path->nodes[0], ref, ref_generation);
2551 btrfs_set_ref_objectid(path->nodes[0], ref, owner);
2552 btrfs_set_ref_num_refs(path->nodes[0], ref, 1);
2554 btrfs_mark_buffer_dirty(path->nodes[0]);
2556 trans->alloc_exclude_start = 0;
2557 trans->alloc_exclude_nr = 0;
2558 btrfs_free_path(path);
2559 finish_current_insert(trans, extent_root, 0);
2560 pending_ret = del_pending_extents(trans, extent_root, 0);
2570 ret = update_block_group(trans, root, ins->objectid, ins->offset, 1, 0);
2572 printk("update block group failed for %Lu %Lu\n",
2573 ins->objectid, ins->offset);
2580 int btrfs_alloc_reserved_extent(struct btrfs_trans_handle *trans,
2581 struct btrfs_root *root, u64 parent,
2582 u64 root_objectid, u64 ref_generation,
2583 u64 owner, struct btrfs_key *ins)
2587 if (root_objectid == BTRFS_TREE_LOG_OBJECTID)
2589 ret = __btrfs_alloc_reserved_extent(trans, root, parent, root_objectid,
2590 ref_generation, owner, ins);
2591 update_reserved_extents(root, ins->objectid, ins->offset, 0);
2596 * this is used by the tree logging recovery code. It records that
2597 * an extent has been allocated and makes sure to clear the free
2598 * space cache bits as well
2600 int btrfs_alloc_logged_extent(struct btrfs_trans_handle *trans,
2601 struct btrfs_root *root, u64 parent,
2602 u64 root_objectid, u64 ref_generation,
2603 u64 owner, struct btrfs_key *ins)
2606 struct btrfs_block_group_cache *block_group;
2608 block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
2609 mutex_lock(&block_group->alloc_mutex);
2610 cache_block_group(root, block_group);
2612 ret = btrfs_remove_free_space_lock(block_group, ins->objectid,
2614 mutex_unlock(&block_group->alloc_mutex);
2616 ret = __btrfs_alloc_reserved_extent(trans, root, parent, root_objectid,
2617 ref_generation, owner, ins);
2622 * finds a free extent and does all the dirty work required for allocation
2623 * returns the key for the extent through ins, and a tree buffer for
2624 * the first block of the extent through buf.
2626 * returns 0 if everything worked, non-zero otherwise.
2628 int btrfs_alloc_extent(struct btrfs_trans_handle *trans,
2629 struct btrfs_root *root,
2630 u64 num_bytes, u64 parent, u64 min_alloc_size,
2631 u64 root_objectid, u64 ref_generation,
2632 u64 owner_objectid, u64 empty_size, u64 hint_byte,
2633 u64 search_end, struct btrfs_key *ins, u64 data)
2637 ret = __btrfs_reserve_extent(trans, root, num_bytes,
2638 min_alloc_size, empty_size, hint_byte,
2639 search_end, ins, data);
2641 if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
2642 ret = __btrfs_alloc_reserved_extent(trans, root, parent,
2643 root_objectid, ref_generation,
2644 owner_objectid, ins);
2648 update_reserved_extents(root, ins->objectid, ins->offset, 1);
2653 struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
2654 struct btrfs_root *root,
2655 u64 bytenr, u32 blocksize)
2657 struct extent_buffer *buf;
2659 buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
2661 return ERR_PTR(-ENOMEM);
2662 btrfs_set_header_generation(buf, trans->transid);
2663 btrfs_tree_lock(buf);
2664 clean_tree_block(trans, root, buf);
2665 btrfs_set_buffer_uptodate(buf);
2666 if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
2667 set_extent_dirty(&root->dirty_log_pages, buf->start,
2668 buf->start + buf->len - 1, GFP_NOFS);
2670 set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
2671 buf->start + buf->len - 1, GFP_NOFS);
2673 trans->blocks_used++;
2678 * helper function to allocate a block for a given tree
2679 * returns the tree buffer or NULL.
2681 struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
2682 struct btrfs_root *root,
2683 u32 blocksize, u64 parent,
2690 struct btrfs_key ins;
2692 struct extent_buffer *buf;
2694 ret = btrfs_alloc_extent(trans, root, blocksize, parent, blocksize,
2695 root_objectid, ref_generation, level,
2696 empty_size, hint, (u64)-1, &ins, 0);
2699 return ERR_PTR(ret);
2702 buf = btrfs_init_new_buffer(trans, root, ins.objectid, blocksize);
2706 int btrfs_drop_leaf_ref(struct btrfs_trans_handle *trans,
2707 struct btrfs_root *root, struct extent_buffer *leaf)
2710 u64 leaf_generation;
2711 struct btrfs_key key;
2712 struct btrfs_file_extent_item *fi;
2717 BUG_ON(!btrfs_is_leaf(leaf));
2718 nritems = btrfs_header_nritems(leaf);
2719 leaf_owner = btrfs_header_owner(leaf);
2720 leaf_generation = btrfs_header_generation(leaf);
2722 for (i = 0; i < nritems; i++) {
2726 btrfs_item_key_to_cpu(leaf, &key, i);
2727 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
2729 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
2730 if (btrfs_file_extent_type(leaf, fi) ==
2731 BTRFS_FILE_EXTENT_INLINE)
2734 * FIXME make sure to insert a trans record that
2735 * repeats the snapshot del on crash
2737 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
2738 if (disk_bytenr == 0)
2741 ret = __btrfs_free_extent(trans, root, disk_bytenr,
2742 btrfs_file_extent_disk_num_bytes(leaf, fi),
2743 leaf->start, leaf_owner, leaf_generation,
2747 atomic_inc(&root->fs_info->throttle_gen);
2748 wake_up(&root->fs_info->transaction_throttle);
2754 static int noinline cache_drop_leaf_ref(struct btrfs_trans_handle *trans,
2755 struct btrfs_root *root,
2756 struct btrfs_leaf_ref *ref)
2760 struct btrfs_extent_info *info = ref->extents;
2762 for (i = 0; i < ref->nritems; i++) {
2763 ret = __btrfs_free_extent(trans, root, info->bytenr,
2764 info->num_bytes, ref->bytenr,
2765 ref->owner, ref->generation,
2768 atomic_inc(&root->fs_info->throttle_gen);
2769 wake_up(&root->fs_info->transaction_throttle);
2779 int drop_snap_lookup_refcount(struct btrfs_root *root, u64 start, u64 len,
2784 ret = btrfs_lookup_extent_ref(NULL, root, start, len, refs);
2787 #if 0 // some debugging code in case we see problems here
2788 /* if the refs count is one, it won't get increased again. But
2789 * if the ref count is > 1, someone may be decreasing it at
2790 * the same time we are.
2793 struct extent_buffer *eb = NULL;
2794 eb = btrfs_find_create_tree_block(root, start, len);
2796 btrfs_tree_lock(eb);
2798 mutex_lock(&root->fs_info->alloc_mutex);
2799 ret = lookup_extent_ref(NULL, root, start, len, refs);
2801 mutex_unlock(&root->fs_info->alloc_mutex);
2804 btrfs_tree_unlock(eb);
2805 free_extent_buffer(eb);
2808 printk("block %llu went down to one during drop_snap\n",
2809 (unsigned long long)start);
2820 * helper function for drop_snapshot, this walks down the tree dropping ref
2821 * counts as it goes.
2823 static int noinline walk_down_tree(struct btrfs_trans_handle *trans,
2824 struct btrfs_root *root,
2825 struct btrfs_path *path, int *level)
2831 struct extent_buffer *next;
2832 struct extent_buffer *cur;
2833 struct extent_buffer *parent;
2834 struct btrfs_leaf_ref *ref;
2839 WARN_ON(*level < 0);
2840 WARN_ON(*level >= BTRFS_MAX_LEVEL);
2841 ret = drop_snap_lookup_refcount(root, path->nodes[*level]->start,
2842 path->nodes[*level]->len, &refs);
2848 * walk down to the last node level and free all the leaves
2850 while(*level >= 0) {
2851 WARN_ON(*level < 0);
2852 WARN_ON(*level >= BTRFS_MAX_LEVEL);
2853 cur = path->nodes[*level];
2855 if (btrfs_header_level(cur) != *level)
2858 if (path->slots[*level] >=
2859 btrfs_header_nritems(cur))
2862 ret = btrfs_drop_leaf_ref(trans, root, cur);
2866 bytenr = btrfs_node_blockptr(cur, path->slots[*level]);
2867 ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]);
2868 blocksize = btrfs_level_size(root, *level - 1);
2870 ret = drop_snap_lookup_refcount(root, bytenr, blocksize, &refs);
2873 parent = path->nodes[*level];
2874 root_owner = btrfs_header_owner(parent);
2875 root_gen = btrfs_header_generation(parent);
2876 path->slots[*level]++;
2878 ret = __btrfs_free_extent(trans, root, bytenr,
2879 blocksize, parent->start,
2880 root_owner, root_gen,
2884 atomic_inc(&root->fs_info->throttle_gen);
2885 wake_up(&root->fs_info->transaction_throttle);
2891 * at this point, we have a single ref, and since the
2892 * only place referencing this extent is a dead root
2893 * the reference count should never go higher.
2894 * So, we don't need to check it again
2897 ref = btrfs_lookup_leaf_ref(root, bytenr);
2898 if (ref && ref->generation != ptr_gen) {
2899 btrfs_free_leaf_ref(root, ref);
2903 ret = cache_drop_leaf_ref(trans, root, ref);
2905 btrfs_remove_leaf_ref(root, ref);
2906 btrfs_free_leaf_ref(root, ref);
2910 if (printk_ratelimit()) {
2911 printk("leaf ref miss for bytenr %llu\n",
2912 (unsigned long long)bytenr);
2915 next = btrfs_find_tree_block(root, bytenr, blocksize);
2916 if (!next || !btrfs_buffer_uptodate(next, ptr_gen)) {
2917 free_extent_buffer(next);
2919 next = read_tree_block(root, bytenr, blocksize,
2924 * this is a debugging check and can go away
2925 * the ref should never go all the way down to 1
2928 ret = lookup_extent_ref(NULL, root, bytenr, blocksize,
2934 WARN_ON(*level <= 0);
2935 if (path->nodes[*level-1])
2936 free_extent_buffer(path->nodes[*level-1]);
2937 path->nodes[*level-1] = next;
2938 *level = btrfs_header_level(next);
2939 path->slots[*level] = 0;
2943 WARN_ON(*level < 0);
2944 WARN_ON(*level >= BTRFS_MAX_LEVEL);
2946 if (path->nodes[*level] == root->node) {
2947 parent = path->nodes[*level];
2948 bytenr = path->nodes[*level]->start;
2950 parent = path->nodes[*level + 1];
2951 bytenr = btrfs_node_blockptr(parent, path->slots[*level + 1]);
2954 blocksize = btrfs_level_size(root, *level);
2955 root_owner = btrfs_header_owner(parent);
2956 root_gen = btrfs_header_generation(parent);
2958 ret = __btrfs_free_extent(trans, root, bytenr, blocksize,
2959 parent->start, root_owner, root_gen,
2961 free_extent_buffer(path->nodes[*level]);
2962 path->nodes[*level] = NULL;
2971 * helper function for drop_subtree, this function is similar to
2972 * walk_down_tree. The main difference is that it checks reference
2973 * counts while tree blocks are locked.
2975 static int noinline walk_down_subtree(struct btrfs_trans_handle *trans,
2976 struct btrfs_root *root,
2977 struct btrfs_path *path, int *level)
2979 struct extent_buffer *next;
2980 struct extent_buffer *cur;
2981 struct extent_buffer *parent;
2988 cur = path->nodes[*level];
2989 ret = btrfs_lookup_extent_ref(trans, root, cur->start, cur->len,
2995 while (*level >= 0) {
2996 cur = path->nodes[*level];
2998 ret = btrfs_drop_leaf_ref(trans, root, cur);
3000 clean_tree_block(trans, root, cur);
3003 if (path->slots[*level] >= btrfs_header_nritems(cur)) {
3004 clean_tree_block(trans, root, cur);
3008 bytenr = btrfs_node_blockptr(cur, path->slots[*level]);
3009 blocksize = btrfs_level_size(root, *level - 1);
3010 ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]);
3012 next = read_tree_block(root, bytenr, blocksize, ptr_gen);
3013 btrfs_tree_lock(next);
3015 ret = btrfs_lookup_extent_ref(trans, root, bytenr, blocksize,
3019 parent = path->nodes[*level];
3020 ret = btrfs_free_extent(trans, root, bytenr,
3021 blocksize, parent->start,
3022 btrfs_header_owner(parent),
3023 btrfs_header_generation(parent),
3026 path->slots[*level]++;
3027 btrfs_tree_unlock(next);
3028 free_extent_buffer(next);
3032 *level = btrfs_header_level(next);
3033 path->nodes[*level] = next;
3034 path->slots[*level] = 0;
3035 path->locks[*level] = 1;
3039 parent = path->nodes[*level + 1];
3040 bytenr = path->nodes[*level]->start;
3041 blocksize = path->nodes[*level]->len;
3043 ret = btrfs_free_extent(trans, root, bytenr, blocksize,
3044 parent->start, btrfs_header_owner(parent),
3045 btrfs_header_generation(parent), *level, 1);
3048 if (path->locks[*level]) {
3049 btrfs_tree_unlock(path->nodes[*level]);
3050 path->locks[*level] = 0;
3052 free_extent_buffer(path->nodes[*level]);
3053 path->nodes[*level] = NULL;
3060 * helper for dropping snapshots. This walks back up the tree in the path
3061 * to find the first node higher up where we haven't yet gone through
3064 static int noinline walk_up_tree(struct btrfs_trans_handle *trans,
3065 struct btrfs_root *root,
3066 struct btrfs_path *path,
3067 int *level, int max_level)
3071 struct btrfs_root_item *root_item = &root->root_item;
3076 for (i = *level; i < max_level && path->nodes[i]; i++) {
3077 slot = path->slots[i];
3078 if (slot < btrfs_header_nritems(path->nodes[i]) - 1) {
3079 struct extent_buffer *node;
3080 struct btrfs_disk_key disk_key;
3081 node = path->nodes[i];
3084 WARN_ON(*level == 0);
3085 btrfs_node_key(node, &disk_key, path->slots[i]);
3086 memcpy(&root_item->drop_progress,
3087 &disk_key, sizeof(disk_key));
3088 root_item->drop_level = i;
3091 struct extent_buffer *parent;
3092 if (path->nodes[*level] == root->node)
3093 parent = path->nodes[*level];
3095 parent = path->nodes[*level + 1];
3097 root_owner = btrfs_header_owner(parent);
3098 root_gen = btrfs_header_generation(parent);
3100 clean_tree_block(trans, root, path->nodes[*level]);
3101 ret = btrfs_free_extent(trans, root,
3102 path->nodes[*level]->start,
3103 path->nodes[*level]->len,
3104 parent->start, root_owner,
3105 root_gen, *level, 1);
3107 if (path->locks[*level]) {
3108 btrfs_tree_unlock(path->nodes[*level]);
3109 path->locks[*level] = 0;
3111 free_extent_buffer(path->nodes[*level]);
3112 path->nodes[*level] = NULL;
3120 * drop the reference count on the tree rooted at 'snap'. This traverses
3121 * the tree freeing any blocks that have a ref count of zero after being
3124 int btrfs_drop_snapshot(struct btrfs_trans_handle *trans, struct btrfs_root
3130 struct btrfs_path *path;
3133 struct btrfs_root_item *root_item = &root->root_item;
3135 WARN_ON(!mutex_is_locked(&root->fs_info->drop_mutex));
3136 path = btrfs_alloc_path();
3139 level = btrfs_header_level(root->node);
3141 if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
3142 path->nodes[level] = root->node;
3143 extent_buffer_get(root->node);
3144 path->slots[level] = 0;
3146 struct btrfs_key key;
3147 struct btrfs_disk_key found_key;
3148 struct extent_buffer *node;
3150 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
3151 level = root_item->drop_level;
3152 path->lowest_level = level;
3153 wret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3158 node = path->nodes[level];
3159 btrfs_node_key(node, &found_key, path->slots[level]);
3160 WARN_ON(memcmp(&found_key, &root_item->drop_progress,
3161 sizeof(found_key)));
3163 * unlock our path, this is safe because only this
3164 * function is allowed to delete this snapshot
3166 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
3167 if (path->nodes[i] && path->locks[i]) {
3169 btrfs_tree_unlock(path->nodes[i]);
3174 wret = walk_down_tree(trans, root, path, &level);
3180 wret = walk_up_tree(trans, root, path, &level,
3186 if (trans->transaction->in_commit) {
3190 atomic_inc(&root->fs_info->throttle_gen);
3191 wake_up(&root->fs_info->transaction_throttle);
3193 for (i = 0; i <= orig_level; i++) {
3194 if (path->nodes[i]) {
3195 free_extent_buffer(path->nodes[i]);
3196 path->nodes[i] = NULL;
3200 btrfs_free_path(path);
3204 int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
3205 struct btrfs_root *root,
3206 struct extent_buffer *node,
3207 struct extent_buffer *parent)
3209 struct btrfs_path *path;
3215 path = btrfs_alloc_path();
3218 BUG_ON(!btrfs_tree_locked(parent));
3219 parent_level = btrfs_header_level(parent);
3220 extent_buffer_get(parent);
3221 path->nodes[parent_level] = parent;
3222 path->slots[parent_level] = btrfs_header_nritems(parent);
3224 BUG_ON(!btrfs_tree_locked(node));
3225 level = btrfs_header_level(node);
3226 extent_buffer_get(node);
3227 path->nodes[level] = node;
3228 path->slots[level] = 0;
3231 wret = walk_down_subtree(trans, root, path, &level);
3237 wret = walk_up_tree(trans, root, path, &level, parent_level);
3244 btrfs_free_path(path);
3248 static unsigned long calc_ra(unsigned long start, unsigned long last,
3251 return min(last, start + nr - 1);
3254 static int noinline relocate_inode_pages(struct inode *inode, u64 start,
3259 unsigned long first_index;
3260 unsigned long last_index;
3263 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
3264 struct file_ra_state *ra;
3265 struct btrfs_ordered_extent *ordered;
3266 unsigned int total_read = 0;
3267 unsigned int total_dirty = 0;
3270 ra = kzalloc(sizeof(*ra), GFP_NOFS);
3272 mutex_lock(&inode->i_mutex);
3273 first_index = start >> PAGE_CACHE_SHIFT;
3274 last_index = (start + len - 1) >> PAGE_CACHE_SHIFT;
3276 /* make sure the dirty trick played by the caller work */
3277 ret = invalidate_inode_pages2_range(inode->i_mapping,
3278 first_index, last_index);
3282 file_ra_state_init(ra, inode->i_mapping);
3284 for (i = first_index ; i <= last_index; i++) {
3285 if (total_read % ra->ra_pages == 0) {
3286 btrfs_force_ra(inode->i_mapping, ra, NULL, i,
3287 calc_ra(i, last_index, ra->ra_pages));
3291 if (((u64)i << PAGE_CACHE_SHIFT) > i_size_read(inode))
3293 page = grab_cache_page(inode->i_mapping, i);
3298 if (!PageUptodate(page)) {
3299 btrfs_readpage(NULL, page);
3301 if (!PageUptodate(page)) {
3303 page_cache_release(page);
3308 wait_on_page_writeback(page);
3310 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
3311 page_end = page_start + PAGE_CACHE_SIZE - 1;
3312 lock_extent(io_tree, page_start, page_end, GFP_NOFS);
3314 ordered = btrfs_lookup_ordered_extent(inode, page_start);
3316 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
3318 page_cache_release(page);
3319 btrfs_start_ordered_extent(inode, ordered, 1);
3320 btrfs_put_ordered_extent(ordered);
3323 set_page_extent_mapped(page);
3325 btrfs_set_extent_delalloc(inode, page_start, page_end);
3326 if (i == first_index)
3327 set_extent_bits(io_tree, page_start, page_end,
3328 EXTENT_BOUNDARY, GFP_NOFS);
3330 set_page_dirty(page);
3333 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
3335 page_cache_release(page);
3340 mutex_unlock(&inode->i_mutex);
3341 balance_dirty_pages_ratelimited_nr(inode->i_mapping, total_dirty);
3345 static int noinline relocate_data_extent(struct inode *reloc_inode,
3346 struct btrfs_key *extent_key,
3349 struct btrfs_root *root = BTRFS_I(reloc_inode)->root;
3350 struct extent_map_tree *em_tree = &BTRFS_I(reloc_inode)->extent_tree;
3351 struct extent_map *em;
3352 u64 start = extent_key->objectid - offset;
3353 u64 end = start + extent_key->offset - 1;
3355 em = alloc_extent_map(GFP_NOFS);
3356 BUG_ON(!em || IS_ERR(em));
3359 em->len = extent_key->offset;
3360 em->block_len = extent_key->offset;
3361 em->block_start = extent_key->objectid;
3362 em->bdev = root->fs_info->fs_devices->latest_bdev;
3363 set_bit(EXTENT_FLAG_PINNED, &em->flags);
3365 /* setup extent map to cheat btrfs_readpage */
3366 lock_extent(&BTRFS_I(reloc_inode)->io_tree, start, end, GFP_NOFS);
3369 spin_lock(&em_tree->lock);
3370 ret = add_extent_mapping(em_tree, em);
3371 spin_unlock(&em_tree->lock);
3372 if (ret != -EEXIST) {
3373 free_extent_map(em);
3376 btrfs_drop_extent_cache(reloc_inode, start, end, 0);
3378 unlock_extent(&BTRFS_I(reloc_inode)->io_tree, start, end, GFP_NOFS);
3380 return relocate_inode_pages(reloc_inode, start, extent_key->offset);
3383 struct btrfs_ref_path {
3385 u64 nodes[BTRFS_MAX_LEVEL];
3387 u64 root_generation;
3394 struct btrfs_key node_keys[BTRFS_MAX_LEVEL];
3395 u64 new_nodes[BTRFS_MAX_LEVEL];
3398 struct disk_extent {
3409 static int is_cowonly_root(u64 root_objectid)
3411 if (root_objectid == BTRFS_ROOT_TREE_OBJECTID ||
3412 root_objectid == BTRFS_EXTENT_TREE_OBJECTID ||
3413 root_objectid == BTRFS_CHUNK_TREE_OBJECTID ||
3414 root_objectid == BTRFS_DEV_TREE_OBJECTID ||
3415 root_objectid == BTRFS_TREE_LOG_OBJECTID)
3420 static int noinline __next_ref_path(struct btrfs_trans_handle *trans,
3421 struct btrfs_root *extent_root,
3422 struct btrfs_ref_path *ref_path,
3425 struct extent_buffer *leaf;
3426 struct btrfs_path *path;
3427 struct btrfs_extent_ref *ref;
3428 struct btrfs_key key;
3429 struct btrfs_key found_key;
3435 path = btrfs_alloc_path();
3440 ref_path->lowest_level = -1;
3441 ref_path->current_level = -1;
3442 ref_path->shared_level = -1;
3446 level = ref_path->current_level - 1;
3447 while (level >= -1) {
3449 if (level < ref_path->lowest_level)
3453 bytenr = ref_path->nodes[level];
3455 bytenr = ref_path->extent_start;
3457 BUG_ON(bytenr == 0);
3459 parent = ref_path->nodes[level + 1];
3460 ref_path->nodes[level + 1] = 0;
3461 ref_path->current_level = level;
3462 BUG_ON(parent == 0);
3464 key.objectid = bytenr;
3465 key.offset = parent + 1;
3466 key.type = BTRFS_EXTENT_REF_KEY;
3468 ret = btrfs_search_slot(trans, extent_root, &key, path, 0, 0);
3473 leaf = path->nodes[0];
3474 nritems = btrfs_header_nritems(leaf);
3475 if (path->slots[0] >= nritems) {
3476 ret = btrfs_next_leaf(extent_root, path);
3481 leaf = path->nodes[0];
3484 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3485 if (found_key.objectid == bytenr &&
3486 found_key.type == BTRFS_EXTENT_REF_KEY) {
3487 if (level < ref_path->shared_level)
3488 ref_path->shared_level = level;
3493 btrfs_release_path(extent_root, path);
3496 /* reached lowest level */
3500 level = ref_path->current_level;
3501 while (level < BTRFS_MAX_LEVEL - 1) {
3504 bytenr = ref_path->nodes[level];
3506 bytenr = ref_path->extent_start;
3508 BUG_ON(bytenr == 0);
3510 key.objectid = bytenr;
3512 key.type = BTRFS_EXTENT_REF_KEY;
3514 ret = btrfs_search_slot(trans, extent_root, &key, path, 0, 0);
3518 leaf = path->nodes[0];
3519 nritems = btrfs_header_nritems(leaf);
3520 if (path->slots[0] >= nritems) {
3521 ret = btrfs_next_leaf(extent_root, path);
3525 /* the extent was freed by someone */
3526 if (ref_path->lowest_level == level)
3528 btrfs_release_path(extent_root, path);
3531 leaf = path->nodes[0];
3534 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3535 if (found_key.objectid != bytenr ||
3536 found_key.type != BTRFS_EXTENT_REF_KEY) {
3537 /* the extent was freed by someone */
3538 if (ref_path->lowest_level == level) {
3542 btrfs_release_path(extent_root, path);
3546 ref = btrfs_item_ptr(leaf, path->slots[0],
3547 struct btrfs_extent_ref);
3548 ref_objectid = btrfs_ref_objectid(leaf, ref);
3549 if (ref_objectid < BTRFS_FIRST_FREE_OBJECTID) {
3551 level = (int)ref_objectid;
3552 BUG_ON(level >= BTRFS_MAX_LEVEL);
3553 ref_path->lowest_level = level;
3554 ref_path->current_level = level;
3555 ref_path->nodes[level] = bytenr;
3557 WARN_ON(ref_objectid != level);
3560 WARN_ON(level != -1);
3564 if (ref_path->lowest_level == level) {
3565 ref_path->owner_objectid = ref_objectid;
3566 ref_path->num_refs = btrfs_ref_num_refs(leaf, ref);
3570 * the block is tree root or the block isn't in reference
3573 if (found_key.objectid == found_key.offset ||
3574 is_cowonly_root(btrfs_ref_root(leaf, ref))) {
3575 ref_path->root_objectid = btrfs_ref_root(leaf, ref);
3576 ref_path->root_generation =
3577 btrfs_ref_generation(leaf, ref);
3579 /* special reference from the tree log */
3580 ref_path->nodes[0] = found_key.offset;
3581 ref_path->current_level = 0;
3588 BUG_ON(ref_path->nodes[level] != 0);
3589 ref_path->nodes[level] = found_key.offset;
3590 ref_path->current_level = level;
3593 * the reference was created in the running transaction,
3594 * no need to continue walking up.
3596 if (btrfs_ref_generation(leaf, ref) == trans->transid) {
3597 ref_path->root_objectid = btrfs_ref_root(leaf, ref);
3598 ref_path->root_generation =
3599 btrfs_ref_generation(leaf, ref);
3604 btrfs_release_path(extent_root, path);
3607 /* reached max tree level, but no tree root found. */
3610 btrfs_free_path(path);
3614 static int btrfs_first_ref_path(struct btrfs_trans_handle *trans,
3615 struct btrfs_root *extent_root,
3616 struct btrfs_ref_path *ref_path,
3619 memset(ref_path, 0, sizeof(*ref_path));
3620 ref_path->extent_start = extent_start;
3622 return __next_ref_path(trans, extent_root, ref_path, 1);
3625 static int btrfs_next_ref_path(struct btrfs_trans_handle *trans,
3626 struct btrfs_root *extent_root,
3627 struct btrfs_ref_path *ref_path)
3629 return __next_ref_path(trans, extent_root, ref_path, 0);
3632 static int noinline get_new_locations(struct inode *reloc_inode,
3633 struct btrfs_key *extent_key,
3634 u64 offset, int no_fragment,
3635 struct disk_extent **extents,
3638 struct btrfs_root *root = BTRFS_I(reloc_inode)->root;
3639 struct btrfs_path *path;
3640 struct btrfs_file_extent_item *fi;
3641 struct extent_buffer *leaf;
3642 struct disk_extent *exts = *extents;
3643 struct btrfs_key found_key;
3648 int max = *nr_extents;
3651 WARN_ON(!no_fragment && *extents);
3654 exts = kmalloc(sizeof(*exts) * max, GFP_NOFS);
3659 path = btrfs_alloc_path();
3662 cur_pos = extent_key->objectid - offset;
3663 last_byte = extent_key->objectid + extent_key->offset;
3664 ret = btrfs_lookup_file_extent(NULL, root, path, reloc_inode->i_ino,
3674 leaf = path->nodes[0];
3675 nritems = btrfs_header_nritems(leaf);
3676 if (path->slots[0] >= nritems) {
3677 ret = btrfs_next_leaf(root, path);
3682 leaf = path->nodes[0];
3685 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3686 if (found_key.offset != cur_pos ||
3687 found_key.type != BTRFS_EXTENT_DATA_KEY ||
3688 found_key.objectid != reloc_inode->i_ino)
3691 fi = btrfs_item_ptr(leaf, path->slots[0],
3692 struct btrfs_file_extent_item);
3693 if (btrfs_file_extent_type(leaf, fi) !=
3694 BTRFS_FILE_EXTENT_REG ||
3695 btrfs_file_extent_disk_bytenr(leaf, fi) == 0)
3699 struct disk_extent *old = exts;
3701 exts = kzalloc(sizeof(*exts) * max, GFP_NOFS);
3702 memcpy(exts, old, sizeof(*exts) * nr);
3703 if (old != *extents)
3707 exts[nr].disk_bytenr =
3708 btrfs_file_extent_disk_bytenr(leaf, fi);
3709 exts[nr].disk_num_bytes =
3710 btrfs_file_extent_disk_num_bytes(leaf, fi);
3711 exts[nr].offset = btrfs_file_extent_offset(leaf, fi);
3712 exts[nr].num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
3713 exts[nr].ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
3714 exts[nr].compression = btrfs_file_extent_compression(leaf, fi);
3715 exts[nr].encryption = btrfs_file_extent_encryption(leaf, fi);
3716 exts[nr].other_encoding = btrfs_file_extent_other_encoding(leaf,
3718 BUG_ON(exts[nr].offset > 0);
3719 BUG_ON(exts[nr].compression || exts[nr].encryption);
3720 BUG_ON(exts[nr].num_bytes != exts[nr].disk_num_bytes);
3722 cur_pos += exts[nr].num_bytes;
3725 if (cur_pos + offset >= last_byte)
3735 WARN_ON(cur_pos + offset > last_byte);
3736 if (cur_pos + offset < last_byte) {
3742 btrfs_free_path(path);
3744 if (exts != *extents)
3753 static int noinline replace_one_extent(struct btrfs_trans_handle *trans,
3754 struct btrfs_root *root,
3755 struct btrfs_path *path,
3756 struct btrfs_key *extent_key,
3757 struct btrfs_key *leaf_key,
3758 struct btrfs_ref_path *ref_path,
3759 struct disk_extent *new_extents,
3762 struct extent_buffer *leaf;
3763 struct btrfs_file_extent_item *fi;
3764 struct inode *inode = NULL;
3765 struct btrfs_key key;
3773 int extent_locked = 0;
3777 memcpy(&key, leaf_key, sizeof(key));
3778 first_pos = INT_LIMIT(loff_t) - extent_key->offset;
3779 if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS) {
3780 if (key.objectid < ref_path->owner_objectid ||
3781 (key.objectid == ref_path->owner_objectid &&
3782 key.type < BTRFS_EXTENT_DATA_KEY)) {
3783 key.objectid = ref_path->owner_objectid;
3784 key.type = BTRFS_EXTENT_DATA_KEY;
3790 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
3794 leaf = path->nodes[0];
3795 nritems = btrfs_header_nritems(leaf);
3797 if (extent_locked && ret > 0) {
3799 * the file extent item was modified by someone
3800 * before the extent got locked.
3802 unlock_extent(&BTRFS_I(inode)->io_tree, lock_start,
3803 lock_end, GFP_NOFS);
3807 if (path->slots[0] >= nritems) {
3808 if (++nr_scaned > 2)
3811 BUG_ON(extent_locked);
3812 ret = btrfs_next_leaf(root, path);
3817 leaf = path->nodes[0];
3818 nritems = btrfs_header_nritems(leaf);
3821 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3823 if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS) {
3824 if ((key.objectid > ref_path->owner_objectid) ||
3825 (key.objectid == ref_path->owner_objectid &&
3826 key.type > BTRFS_EXTENT_DATA_KEY) ||
3827 (key.offset >= first_pos + extent_key->offset))
3831 if (inode && key.objectid != inode->i_ino) {
3832 BUG_ON(extent_locked);
3833 btrfs_release_path(root, path);
3834 mutex_unlock(&inode->i_mutex);
3840 if (key.type != BTRFS_EXTENT_DATA_KEY) {
3845 fi = btrfs_item_ptr(leaf, path->slots[0],
3846 struct btrfs_file_extent_item);
3847 extent_type = btrfs_file_extent_type(leaf, fi);
3848 if ((extent_type != BTRFS_FILE_EXTENT_REG &&
3849 extent_type != BTRFS_FILE_EXTENT_PREALLOC) ||
3850 (btrfs_file_extent_disk_bytenr(leaf, fi) !=
3851 extent_key->objectid)) {
3857 num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
3858 ext_offset = btrfs_file_extent_offset(leaf, fi);
3860 if (first_pos > key.offset - ext_offset)
3861 first_pos = key.offset - ext_offset;
3863 if (!extent_locked) {
3864 lock_start = key.offset;
3865 lock_end = lock_start + num_bytes - 1;
3867 if (lock_start > key.offset ||
3868 lock_end + 1 < key.offset + num_bytes) {
3869 unlock_extent(&BTRFS_I(inode)->io_tree,
3870 lock_start, lock_end, GFP_NOFS);
3876 btrfs_release_path(root, path);
3878 inode = btrfs_iget_locked(root->fs_info->sb,
3879 key.objectid, root);
3880 if (inode->i_state & I_NEW) {
3881 BTRFS_I(inode)->root = root;
3882 BTRFS_I(inode)->location.objectid =
3884 BTRFS_I(inode)->location.type =
3885 BTRFS_INODE_ITEM_KEY;
3886 BTRFS_I(inode)->location.offset = 0;
3887 btrfs_read_locked_inode(inode);
3888 unlock_new_inode(inode);
3891 * some code call btrfs_commit_transaction while
3892 * holding the i_mutex, so we can't use mutex_lock
3895 if (is_bad_inode(inode) ||
3896 !mutex_trylock(&inode->i_mutex)) {
3899 key.offset = (u64)-1;
3904 if (!extent_locked) {
3905 struct btrfs_ordered_extent *ordered;
3907 btrfs_release_path(root, path);
3909 lock_extent(&BTRFS_I(inode)->io_tree, lock_start,
3910 lock_end, GFP_NOFS);
3911 ordered = btrfs_lookup_first_ordered_extent(inode,
3914 ordered->file_offset <= lock_end &&
3915 ordered->file_offset + ordered->len > lock_start) {
3916 unlock_extent(&BTRFS_I(inode)->io_tree,
3917 lock_start, lock_end, GFP_NOFS);
3918 btrfs_start_ordered_extent(inode, ordered, 1);
3919 btrfs_put_ordered_extent(ordered);
3920 key.offset += num_bytes;
3924 btrfs_put_ordered_extent(ordered);
3930 if (nr_extents == 1) {
3931 /* update extent pointer in place */
3932 btrfs_set_file_extent_disk_bytenr(leaf, fi,
3933 new_extents[0].disk_bytenr);
3934 btrfs_set_file_extent_disk_num_bytes(leaf, fi,
3935 new_extents[0].disk_num_bytes);
3936 btrfs_mark_buffer_dirty(leaf);
3938 btrfs_drop_extent_cache(inode, key.offset,
3939 key.offset + num_bytes - 1, 0);
3941 ret = btrfs_inc_extent_ref(trans, root,
3942 new_extents[0].disk_bytenr,
3943 new_extents[0].disk_num_bytes,
3945 root->root_key.objectid,
3950 ret = btrfs_free_extent(trans, root,
3951 extent_key->objectid,
3954 btrfs_header_owner(leaf),
3955 btrfs_header_generation(leaf),
3959 btrfs_release_path(root, path);
3960 key.offset += num_bytes;
3968 * drop old extent pointer at first, then insert the
3969 * new pointers one bye one
3971 btrfs_release_path(root, path);
3972 ret = btrfs_drop_extents(trans, root, inode, key.offset,
3973 key.offset + num_bytes,
3974 key.offset, &alloc_hint);
3977 for (i = 0; i < nr_extents; i++) {
3978 if (ext_offset >= new_extents[i].num_bytes) {
3979 ext_offset -= new_extents[i].num_bytes;
3982 extent_len = min(new_extents[i].num_bytes -
3983 ext_offset, num_bytes);
3985 ret = btrfs_insert_empty_item(trans, root,
3990 leaf = path->nodes[0];
3991 fi = btrfs_item_ptr(leaf, path->slots[0],
3992 struct btrfs_file_extent_item);
3993 btrfs_set_file_extent_generation(leaf, fi,
3995 btrfs_set_file_extent_type(leaf, fi,
3996 BTRFS_FILE_EXTENT_REG);
3997 btrfs_set_file_extent_disk_bytenr(leaf, fi,
3998 new_extents[i].disk_bytenr);
3999 btrfs_set_file_extent_disk_num_bytes(leaf, fi,
4000 new_extents[i].disk_num_bytes);
4001 btrfs_set_file_extent_ram_bytes(leaf, fi,
4002 new_extents[i].ram_bytes);
4004 btrfs_set_file_extent_compression(leaf, fi,
4005 new_extents[i].compression);
4006 btrfs_set_file_extent_encryption(leaf, fi,
4007 new_extents[i].encryption);
4008 btrfs_set_file_extent_other_encoding(leaf, fi,
4009 new_extents[i].other_encoding);
4011 btrfs_set_file_extent_num_bytes(leaf, fi,
4013 ext_offset += new_extents[i].offset;
4014 btrfs_set_file_extent_offset(leaf, fi,
4016 btrfs_mark_buffer_dirty(leaf);
4018 btrfs_drop_extent_cache(inode, key.offset,
4019 key.offset + extent_len - 1, 0);
4021 ret = btrfs_inc_extent_ref(trans, root,
4022 new_extents[i].disk_bytenr,
4023 new_extents[i].disk_num_bytes,
4025 root->root_key.objectid,
4026 trans->transid, key.objectid);
4028 btrfs_release_path(root, path);
4030 inode_add_bytes(inode, extent_len);
4033 num_bytes -= extent_len;
4034 key.offset += extent_len;
4039 BUG_ON(i >= nr_extents);
4043 if (extent_locked) {
4044 unlock_extent(&BTRFS_I(inode)->io_tree, lock_start,
4045 lock_end, GFP_NOFS);
4049 if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS &&
4050 key.offset >= first_pos + extent_key->offset)
4057 btrfs_release_path(root, path);
4059 mutex_unlock(&inode->i_mutex);
4060 if (extent_locked) {
4061 unlock_extent(&BTRFS_I(inode)->io_tree, lock_start,
4062 lock_end, GFP_NOFS);
4069 int btrfs_reloc_tree_cache_ref(struct btrfs_trans_handle *trans,
4070 struct btrfs_root *root,
4071 struct extent_buffer *buf, u64 orig_start)
4076 BUG_ON(btrfs_header_generation(buf) != trans->transid);
4077 BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
4079 level = btrfs_header_level(buf);
4081 struct btrfs_leaf_ref *ref;
4082 struct btrfs_leaf_ref *orig_ref;
4084 orig_ref = btrfs_lookup_leaf_ref(root, orig_start);
4088 ref = btrfs_alloc_leaf_ref(root, orig_ref->nritems);
4090 btrfs_free_leaf_ref(root, orig_ref);
4094 ref->nritems = orig_ref->nritems;
4095 memcpy(ref->extents, orig_ref->extents,
4096 sizeof(ref->extents[0]) * ref->nritems);
4098 btrfs_free_leaf_ref(root, orig_ref);
4100 ref->root_gen = trans->transid;
4101 ref->bytenr = buf->start;
4102 ref->owner = btrfs_header_owner(buf);
4103 ref->generation = btrfs_header_generation(buf);
4104 ret = btrfs_add_leaf_ref(root, ref, 0);
4106 btrfs_free_leaf_ref(root, ref);
4111 static int noinline invalidate_extent_cache(struct btrfs_root *root,
4112 struct extent_buffer *leaf,
4113 struct btrfs_block_group_cache *group,
4114 struct btrfs_root *target_root)
4116 struct btrfs_key key;
4117 struct inode *inode = NULL;
4118 struct btrfs_file_extent_item *fi;
4120 u64 skip_objectid = 0;
4124 nritems = btrfs_header_nritems(leaf);
4125 for (i = 0; i < nritems; i++) {
4126 btrfs_item_key_to_cpu(leaf, &key, i);
4127 if (key.objectid == skip_objectid ||
4128 key.type != BTRFS_EXTENT_DATA_KEY)
4130 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
4131 if (btrfs_file_extent_type(leaf, fi) ==
4132 BTRFS_FILE_EXTENT_INLINE)
4134 if (btrfs_file_extent_disk_bytenr(leaf, fi) == 0)
4136 if (!inode || inode->i_ino != key.objectid) {
4138 inode = btrfs_ilookup(target_root->fs_info->sb,
4139 key.objectid, target_root, 1);
4142 skip_objectid = key.objectid;
4145 num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
4147 lock_extent(&BTRFS_I(inode)->io_tree, key.offset,
4148 key.offset + num_bytes - 1, GFP_NOFS);
4149 btrfs_drop_extent_cache(inode, key.offset,
4150 key.offset + num_bytes - 1, 1);
4151 unlock_extent(&BTRFS_I(inode)->io_tree, key.offset,
4152 key.offset + num_bytes - 1, GFP_NOFS);
4159 static int noinline replace_extents_in_leaf(struct btrfs_trans_handle *trans,
4160 struct btrfs_root *root,
4161 struct extent_buffer *leaf,
4162 struct btrfs_block_group_cache *group,
4163 struct inode *reloc_inode)
4165 struct btrfs_key key;
4166 struct btrfs_key extent_key;
4167 struct btrfs_file_extent_item *fi;
4168 struct btrfs_leaf_ref *ref;
4169 struct disk_extent *new_extent;
4178 new_extent = kmalloc(sizeof(*new_extent), GFP_NOFS);
4179 BUG_ON(!new_extent);
4181 ref = btrfs_lookup_leaf_ref(root, leaf->start);
4185 nritems = btrfs_header_nritems(leaf);
4186 for (i = 0; i < nritems; i++) {
4187 btrfs_item_key_to_cpu(leaf, &key, i);
4188 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
4190 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
4191 if (btrfs_file_extent_type(leaf, fi) ==
4192 BTRFS_FILE_EXTENT_INLINE)
4194 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
4195 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
4200 if (bytenr >= group->key.objectid + group->key.offset ||
4201 bytenr + num_bytes <= group->key.objectid)
4204 extent_key.objectid = bytenr;
4205 extent_key.offset = num_bytes;
4206 extent_key.type = BTRFS_EXTENT_ITEM_KEY;
4208 ret = get_new_locations(reloc_inode, &extent_key,
4209 group->key.objectid, 1,
4210 &new_extent, &nr_extent);
4215 BUG_ON(ref->extents[ext_index].bytenr != bytenr);
4216 BUG_ON(ref->extents[ext_index].num_bytes != num_bytes);
4217 ref->extents[ext_index].bytenr = new_extent->disk_bytenr;
4218 ref->extents[ext_index].num_bytes = new_extent->disk_num_bytes;
4220 btrfs_set_file_extent_disk_bytenr(leaf, fi,
4221 new_extent->disk_bytenr);
4222 btrfs_set_file_extent_disk_num_bytes(leaf, fi,
4223 new_extent->disk_num_bytes);
4224 btrfs_mark_buffer_dirty(leaf);
4226 ret = btrfs_inc_extent_ref(trans, root,
4227 new_extent->disk_bytenr,
4228 new_extent->disk_num_bytes,
4230 root->root_key.objectid,
4231 trans->transid, key.objectid);
4233 ret = btrfs_free_extent(trans, root,
4234 bytenr, num_bytes, leaf->start,
4235 btrfs_header_owner(leaf),
4236 btrfs_header_generation(leaf),
4242 BUG_ON(ext_index + 1 != ref->nritems);
4243 btrfs_free_leaf_ref(root, ref);
4247 int btrfs_free_reloc_root(struct btrfs_trans_handle *trans,
4248 struct btrfs_root *root)
4250 struct btrfs_root *reloc_root;
4253 if (root->reloc_root) {
4254 reloc_root = root->reloc_root;
4255 root->reloc_root = NULL;
4256 list_add(&reloc_root->dead_list,
4257 &root->fs_info->dead_reloc_roots);
4259 btrfs_set_root_bytenr(&reloc_root->root_item,
4260 reloc_root->node->start);
4261 btrfs_set_root_level(&root->root_item,
4262 btrfs_header_level(reloc_root->node));
4263 memset(&reloc_root->root_item.drop_progress, 0,
4264 sizeof(struct btrfs_disk_key));
4265 reloc_root->root_item.drop_level = 0;
4267 ret = btrfs_update_root(trans, root->fs_info->tree_root,
4268 &reloc_root->root_key,
4269 &reloc_root->root_item);
4275 int btrfs_drop_dead_reloc_roots(struct btrfs_root *root)
4277 struct btrfs_trans_handle *trans;
4278 struct btrfs_root *reloc_root;
4279 struct btrfs_root *prev_root = NULL;
4280 struct list_head dead_roots;
4284 INIT_LIST_HEAD(&dead_roots);
4285 list_splice_init(&root->fs_info->dead_reloc_roots, &dead_roots);
4287 while (!list_empty(&dead_roots)) {
4288 reloc_root = list_entry(dead_roots.prev,
4289 struct btrfs_root, dead_list);
4290 list_del_init(&reloc_root->dead_list);
4292 BUG_ON(reloc_root->commit_root != NULL);
4294 trans = btrfs_join_transaction(root, 1);
4297 mutex_lock(&root->fs_info->drop_mutex);
4298 ret = btrfs_drop_snapshot(trans, reloc_root);
4301 mutex_unlock(&root->fs_info->drop_mutex);
4303 nr = trans->blocks_used;
4304 ret = btrfs_end_transaction(trans, root);
4306 btrfs_btree_balance_dirty(root, nr);
4309 free_extent_buffer(reloc_root->node);
4311 ret = btrfs_del_root(trans, root->fs_info->tree_root,
4312 &reloc_root->root_key);
4314 mutex_unlock(&root->fs_info->drop_mutex);
4316 nr = trans->blocks_used;
4317 ret = btrfs_end_transaction(trans, root);
4319 btrfs_btree_balance_dirty(root, nr);
4322 prev_root = reloc_root;
4325 btrfs_remove_leaf_refs(prev_root, (u64)-1, 0);
4331 int btrfs_add_dead_reloc_root(struct btrfs_root *root)
4333 list_add(&root->dead_list, &root->fs_info->dead_reloc_roots);
4337 int btrfs_cleanup_reloc_trees(struct btrfs_root *root)
4339 struct btrfs_root *reloc_root;
4340 struct btrfs_trans_handle *trans;
4341 struct btrfs_key location;
4345 mutex_lock(&root->fs_info->tree_reloc_mutex);
4346 ret = btrfs_find_dead_roots(root, BTRFS_TREE_RELOC_OBJECTID, NULL);
4348 found = !list_empty(&root->fs_info->dead_reloc_roots);
4349 mutex_unlock(&root->fs_info->tree_reloc_mutex);
4352 trans = btrfs_start_transaction(root, 1);
4354 ret = btrfs_commit_transaction(trans, root);
4358 location.objectid = BTRFS_DATA_RELOC_TREE_OBJECTID;
4359 location.offset = (u64)-1;
4360 location.type = BTRFS_ROOT_ITEM_KEY;
4362 reloc_root = btrfs_read_fs_root_no_name(root->fs_info, &location);
4363 BUG_ON(!reloc_root);
4364 btrfs_orphan_cleanup(reloc_root);
4368 static int noinline init_reloc_tree(struct btrfs_trans_handle *trans,
4369 struct btrfs_root *root)
4371 struct btrfs_root *reloc_root;
4372 struct extent_buffer *eb;
4373 struct btrfs_root_item *root_item;
4374 struct btrfs_key root_key;
4377 BUG_ON(!root->ref_cows);
4378 if (root->reloc_root)
4381 root_item = kmalloc(sizeof(*root_item), GFP_NOFS);
4384 ret = btrfs_copy_root(trans, root, root->commit_root,
4385 &eb, BTRFS_TREE_RELOC_OBJECTID);
4388 root_key.objectid = BTRFS_TREE_RELOC_OBJECTID;
4389 root_key.offset = root->root_key.objectid;
4390 root_key.type = BTRFS_ROOT_ITEM_KEY;
4392 memcpy(root_item, &root->root_item, sizeof(root_item));
4393 btrfs_set_root_refs(root_item, 0);
4394 btrfs_set_root_bytenr(root_item, eb->start);
4395 btrfs_set_root_level(root_item, btrfs_header_level(eb));
4396 btrfs_set_root_generation(root_item, trans->transid);
4398 btrfs_tree_unlock(eb);
4399 free_extent_buffer(eb);
4401 ret = btrfs_insert_root(trans, root->fs_info->tree_root,
4402 &root_key, root_item);
4406 reloc_root = btrfs_read_fs_root_no_radix(root->fs_info->tree_root,
4408 BUG_ON(!reloc_root);
4409 reloc_root->last_trans = trans->transid;
4410 reloc_root->commit_root = NULL;
4411 reloc_root->ref_tree = &root->fs_info->reloc_ref_tree;
4413 root->reloc_root = reloc_root;
4418 * Core function of space balance.
4420 * The idea is using reloc trees to relocate tree blocks in reference
4421 * counted roots. There is one reloc tree for each subvol, and all
4422 * reloc trees share same root key objectid. Reloc trees are snapshots
4423 * of the latest committed roots of subvols (root->commit_root).
4425 * To relocate a tree block referenced by a subvol, there are two steps.
4426 * COW the block through subvol's reloc tree, then update block pointer
4427 * in the subvol to point to the new block. Since all reloc trees share
4428 * same root key objectid, doing special handing for tree blocks owned
4429 * by them is easy. Once a tree block has been COWed in one reloc tree,
4430 * we can use the resulting new block directly when the same block is
4431 * required to COW again through other reloc trees. By this way, relocated
4432 * tree blocks are shared between reloc trees, so they are also shared
4435 static int noinline relocate_one_path(struct btrfs_trans_handle *trans,
4436 struct btrfs_root *root,
4437 struct btrfs_path *path,
4438 struct btrfs_key *first_key,
4439 struct btrfs_ref_path *ref_path,
4440 struct btrfs_block_group_cache *group,
4441 struct inode *reloc_inode)
4443 struct btrfs_root *reloc_root;
4444 struct extent_buffer *eb = NULL;
4445 struct btrfs_key *keys;
4449 int lowest_level = 0;
4452 if (ref_path->owner_objectid < BTRFS_FIRST_FREE_OBJECTID)
4453 lowest_level = ref_path->owner_objectid;
4455 if (!root->ref_cows) {
4456 path->lowest_level = lowest_level;
4457 ret = btrfs_search_slot(trans, root, first_key, path, 0, 1);
4459 path->lowest_level = 0;
4460 btrfs_release_path(root, path);
4464 mutex_lock(&root->fs_info->tree_reloc_mutex);
4465 ret = init_reloc_tree(trans, root);
4467 reloc_root = root->reloc_root;
4469 shared_level = ref_path->shared_level;
4470 ref_path->shared_level = BTRFS_MAX_LEVEL - 1;
4472 keys = ref_path->node_keys;
4473 nodes = ref_path->new_nodes;
4474 memset(&keys[shared_level + 1], 0,
4475 sizeof(*keys) * (BTRFS_MAX_LEVEL - shared_level - 1));
4476 memset(&nodes[shared_level + 1], 0,
4477 sizeof(*nodes) * (BTRFS_MAX_LEVEL - shared_level - 1));
4479 if (nodes[lowest_level] == 0) {
4480 path->lowest_level = lowest_level;
4481 ret = btrfs_search_slot(trans, reloc_root, first_key, path,
4484 for (level = lowest_level; level < BTRFS_MAX_LEVEL; level++) {
4485 eb = path->nodes[level];
4486 if (!eb || eb == reloc_root->node)
4488 nodes[level] = eb->start;
4490 btrfs_item_key_to_cpu(eb, &keys[level], 0);
4492 btrfs_node_key_to_cpu(eb, &keys[level], 0);
4494 if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
4495 eb = path->nodes[0];
4496 ret = replace_extents_in_leaf(trans, reloc_root, eb,
4497 group, reloc_inode);
4500 btrfs_release_path(reloc_root, path);
4502 ret = btrfs_merge_path(trans, reloc_root, keys, nodes,
4508 * replace tree blocks in the fs tree with tree blocks in
4511 ret = btrfs_merge_path(trans, root, keys, nodes, lowest_level);
4514 if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
4515 ret = btrfs_search_slot(trans, reloc_root, first_key, path,
4518 extent_buffer_get(path->nodes[0]);
4519 eb = path->nodes[0];
4520 btrfs_release_path(reloc_root, path);
4521 ret = invalidate_extent_cache(reloc_root, eb, group, root);
4523 free_extent_buffer(eb);
4526 mutex_unlock(&root->fs_info->tree_reloc_mutex);
4527 path->lowest_level = 0;
4531 static int noinline relocate_tree_block(struct btrfs_trans_handle *trans,
4532 struct btrfs_root *root,
4533 struct btrfs_path *path,
4534 struct btrfs_key *first_key,
4535 struct btrfs_ref_path *ref_path)
4539 ret = relocate_one_path(trans, root, path, first_key,
4540 ref_path, NULL, NULL);
4543 if (root == root->fs_info->extent_root)
4544 btrfs_extent_post_op(trans, root);
4549 static int noinline del_extent_zero(struct btrfs_trans_handle *trans,
4550 struct btrfs_root *extent_root,
4551 struct btrfs_path *path,
4552 struct btrfs_key *extent_key)
4556 ret = btrfs_search_slot(trans, extent_root, extent_key, path, -1, 1);
4559 ret = btrfs_del_item(trans, extent_root, path);
4561 btrfs_release_path(extent_root, path);
4565 static struct btrfs_root noinline *read_ref_root(struct btrfs_fs_info *fs_info,
4566 struct btrfs_ref_path *ref_path)
4568 struct btrfs_key root_key;
4570 root_key.objectid = ref_path->root_objectid;
4571 root_key.type = BTRFS_ROOT_ITEM_KEY;
4572 if (is_cowonly_root(ref_path->root_objectid))
4573 root_key.offset = 0;
4575 root_key.offset = (u64)-1;
4577 return btrfs_read_fs_root_no_name(fs_info, &root_key);
4580 static int noinline relocate_one_extent(struct btrfs_root *extent_root,
4581 struct btrfs_path *path,
4582 struct btrfs_key *extent_key,
4583 struct btrfs_block_group_cache *group,
4584 struct inode *reloc_inode, int pass)
4586 struct btrfs_trans_handle *trans;
4587 struct btrfs_root *found_root;
4588 struct btrfs_ref_path *ref_path = NULL;
4589 struct disk_extent *new_extents = NULL;
4594 struct btrfs_key first_key;
4598 trans = btrfs_start_transaction(extent_root, 1);
4601 if (extent_key->objectid == 0) {
4602 ret = del_extent_zero(trans, extent_root, path, extent_key);
4606 ref_path = kmalloc(sizeof(*ref_path), GFP_NOFS);
4612 for (loops = 0; ; loops++) {
4614 ret = btrfs_first_ref_path(trans, extent_root, ref_path,
4615 extent_key->objectid);
4617 ret = btrfs_next_ref_path(trans, extent_root, ref_path);
4624 if (ref_path->root_objectid == BTRFS_TREE_LOG_OBJECTID ||
4625 ref_path->root_objectid == BTRFS_TREE_RELOC_OBJECTID)
4628 found_root = read_ref_root(extent_root->fs_info, ref_path);
4629 BUG_ON(!found_root);
4631 * for reference counted tree, only process reference paths
4632 * rooted at the latest committed root.
4634 if (found_root->ref_cows &&
4635 ref_path->root_generation != found_root->root_key.offset)
4638 if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
4641 * copy data extents to new locations
4643 u64 group_start = group->key.objectid;
4644 ret = relocate_data_extent(reloc_inode,
4653 level = ref_path->owner_objectid;
4656 if (prev_block != ref_path->nodes[level]) {
4657 struct extent_buffer *eb;
4658 u64 block_start = ref_path->nodes[level];
4659 u64 block_size = btrfs_level_size(found_root, level);
4661 eb = read_tree_block(found_root, block_start,
4663 btrfs_tree_lock(eb);
4664 BUG_ON(level != btrfs_header_level(eb));
4667 btrfs_item_key_to_cpu(eb, &first_key, 0);
4669 btrfs_node_key_to_cpu(eb, &first_key, 0);
4671 btrfs_tree_unlock(eb);
4672 free_extent_buffer(eb);
4673 prev_block = block_start;
4676 if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID &&
4679 * use fallback method to process the remaining
4683 u64 group_start = group->key.objectid;
4684 new_extents = kmalloc(sizeof(*new_extents),
4687 ret = get_new_locations(reloc_inode,
4695 btrfs_record_root_in_trans(found_root);
4696 ret = replace_one_extent(trans, found_root,
4698 &first_key, ref_path,
4699 new_extents, nr_extents);
4705 btrfs_record_root_in_trans(found_root);
4706 if (ref_path->owner_objectid < BTRFS_FIRST_FREE_OBJECTID) {
4707 ret = relocate_tree_block(trans, found_root, path,
4708 &first_key, ref_path);
4711 * try to update data extent references while
4712 * keeping metadata shared between snapshots.
4714 ret = relocate_one_path(trans, found_root, path,
4715 &first_key, ref_path,
4716 group, reloc_inode);
4723 btrfs_end_transaction(trans, extent_root);
4729 static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
4732 u64 stripped = BTRFS_BLOCK_GROUP_RAID0 |
4733 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
4735 num_devices = root->fs_info->fs_devices->num_devices;
4736 if (num_devices == 1) {
4737 stripped |= BTRFS_BLOCK_GROUP_DUP;
4738 stripped = flags & ~stripped;
4740 /* turn raid0 into single device chunks */
4741 if (flags & BTRFS_BLOCK_GROUP_RAID0)
4744 /* turn mirroring into duplication */
4745 if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
4746 BTRFS_BLOCK_GROUP_RAID10))
4747 return stripped | BTRFS_BLOCK_GROUP_DUP;
4750 /* they already had raid on here, just return */
4751 if (flags & stripped)
4754 stripped |= BTRFS_BLOCK_GROUP_DUP;
4755 stripped = flags & ~stripped;
4757 /* switch duplicated blocks with raid1 */
4758 if (flags & BTRFS_BLOCK_GROUP_DUP)
4759 return stripped | BTRFS_BLOCK_GROUP_RAID1;
4761 /* turn single device chunks into raid0 */
4762 return stripped | BTRFS_BLOCK_GROUP_RAID0;
4767 int __alloc_chunk_for_shrink(struct btrfs_root *root,
4768 struct btrfs_block_group_cache *shrink_block_group,
4771 struct btrfs_trans_handle *trans;
4772 u64 new_alloc_flags;
4775 spin_lock(&shrink_block_group->lock);
4776 if (btrfs_block_group_used(&shrink_block_group->item) > 0) {
4777 spin_unlock(&shrink_block_group->lock);
4779 trans = btrfs_start_transaction(root, 1);
4780 spin_lock(&shrink_block_group->lock);
4782 new_alloc_flags = update_block_group_flags(root,
4783 shrink_block_group->flags);
4784 if (new_alloc_flags != shrink_block_group->flags) {
4786 btrfs_block_group_used(&shrink_block_group->item);
4788 calc = shrink_block_group->key.offset;
4790 spin_unlock(&shrink_block_group->lock);
4792 do_chunk_alloc(trans, root->fs_info->extent_root,
4793 calc + 2 * 1024 * 1024, new_alloc_flags, force);
4795 btrfs_end_transaction(trans, root);
4797 spin_unlock(&shrink_block_group->lock);
4801 static int __insert_orphan_inode(struct btrfs_trans_handle *trans,
4802 struct btrfs_root *root,
4803 u64 objectid, u64 size)
4805 struct btrfs_path *path;
4806 struct btrfs_inode_item *item;
4807 struct extent_buffer *leaf;
4810 path = btrfs_alloc_path();
4814 ret = btrfs_insert_empty_inode(trans, root, path, objectid);
4818 leaf = path->nodes[0];
4819 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_inode_item);
4820 memset_extent_buffer(leaf, 0, (unsigned long)item, sizeof(*item));
4821 btrfs_set_inode_generation(leaf, item, 1);
4822 btrfs_set_inode_size(leaf, item, size);
4823 btrfs_set_inode_mode(leaf, item, S_IFREG | 0600);
4824 btrfs_set_inode_flags(leaf, item, BTRFS_INODE_NODATASUM |
4825 BTRFS_INODE_NOCOMPRESS);
4826 btrfs_mark_buffer_dirty(leaf);
4827 btrfs_release_path(root, path);
4829 btrfs_free_path(path);
4833 static struct inode noinline *create_reloc_inode(struct btrfs_fs_info *fs_info,
4834 struct btrfs_block_group_cache *group)
4836 struct inode *inode = NULL;
4837 struct btrfs_trans_handle *trans;
4838 struct btrfs_root *root;
4839 struct btrfs_key root_key;
4840 u64 objectid = BTRFS_FIRST_FREE_OBJECTID;
4843 root_key.objectid = BTRFS_DATA_RELOC_TREE_OBJECTID;
4844 root_key.type = BTRFS_ROOT_ITEM_KEY;
4845 root_key.offset = (u64)-1;
4846 root = btrfs_read_fs_root_no_name(fs_info, &root_key);
4848 return ERR_CAST(root);
4850 trans = btrfs_start_transaction(root, 1);
4853 err = btrfs_find_free_objectid(trans, root, objectid, &objectid);
4857 err = __insert_orphan_inode(trans, root, objectid, group->key.offset);
4860 err = btrfs_insert_file_extent(trans, root, objectid, 0, 0, 0,
4861 group->key.offset, 0, group->key.offset,
4865 inode = btrfs_iget_locked(root->fs_info->sb, objectid, root);
4866 if (inode->i_state & I_NEW) {
4867 BTRFS_I(inode)->root = root;
4868 BTRFS_I(inode)->location.objectid = objectid;
4869 BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY;
4870 BTRFS_I(inode)->location.offset = 0;
4871 btrfs_read_locked_inode(inode);
4872 unlock_new_inode(inode);
4873 BUG_ON(is_bad_inode(inode));
4878 err = btrfs_orphan_add(trans, inode);
4880 btrfs_end_transaction(trans, root);
4884 inode = ERR_PTR(err);
4889 int btrfs_relocate_block_group(struct btrfs_root *root, u64 group_start)
4891 struct btrfs_trans_handle *trans;
4892 struct btrfs_path *path;
4893 struct btrfs_fs_info *info = root->fs_info;
4894 struct extent_buffer *leaf;
4895 struct inode *reloc_inode;
4896 struct btrfs_block_group_cache *block_group;
4897 struct btrfs_key key;
4906 root = root->fs_info->extent_root;
4908 block_group = btrfs_lookup_block_group(info, group_start);
4909 BUG_ON(!block_group);
4911 printk("btrfs relocating block group %llu flags %llu\n",
4912 (unsigned long long)block_group->key.objectid,
4913 (unsigned long long)block_group->flags);
4915 path = btrfs_alloc_path();
4918 reloc_inode = create_reloc_inode(info, block_group);
4919 BUG_ON(IS_ERR(reloc_inode));
4921 __alloc_chunk_for_shrink(root, block_group, 1);
4922 block_group->ro = 1;
4923 block_group->space_info->total_bytes -= block_group->key.offset;
4925 btrfs_start_delalloc_inodes(info->tree_root);
4926 btrfs_wait_ordered_extents(info->tree_root, 0);
4931 key.objectid = block_group->key.objectid;
4934 cur_byte = key.objectid;
4936 trans = btrfs_start_transaction(info->tree_root, 1);
4937 btrfs_commit_transaction(trans, info->tree_root);
4939 mutex_lock(&root->fs_info->cleaner_mutex);
4940 btrfs_clean_old_snapshots(info->tree_root);
4941 btrfs_remove_leaf_refs(info->tree_root, (u64)-1, 1);
4942 mutex_unlock(&root->fs_info->cleaner_mutex);
4945 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4949 leaf = path->nodes[0];
4950 nritems = btrfs_header_nritems(leaf);
4951 if (path->slots[0] >= nritems) {
4952 ret = btrfs_next_leaf(root, path);
4959 leaf = path->nodes[0];
4960 nritems = btrfs_header_nritems(leaf);
4963 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4965 if (key.objectid >= block_group->key.objectid +
4966 block_group->key.offset)
4969 if (progress && need_resched()) {
4970 btrfs_release_path(root, path);
4977 if (btrfs_key_type(&key) != BTRFS_EXTENT_ITEM_KEY ||
4978 key.objectid + key.offset <= cur_byte) {
4984 cur_byte = key.objectid + key.offset;
4985 btrfs_release_path(root, path);
4987 __alloc_chunk_for_shrink(root, block_group, 0);
4988 ret = relocate_one_extent(root, path, &key, block_group,
4994 key.objectid = cur_byte;
4999 btrfs_release_path(root, path);
5002 btrfs_wait_ordered_range(reloc_inode, 0, (u64)-1);
5003 invalidate_mapping_pages(reloc_inode->i_mapping, 0, -1);
5004 WARN_ON(reloc_inode->i_mapping->nrpages);
5007 if (total_found > 0) {
5008 printk("btrfs found %llu extents in pass %d\n",
5009 (unsigned long long)total_found, pass);
5011 if (total_found == skipped && pass > 2) {
5013 reloc_inode = create_reloc_inode(info, block_group);
5019 /* delete reloc_inode */
5022 /* unpin extents in this range */
5023 trans = btrfs_start_transaction(info->tree_root, 1);
5024 btrfs_commit_transaction(trans, info->tree_root);
5026 spin_lock(&block_group->lock);
5027 WARN_ON(block_group->pinned > 0);
5028 WARN_ON(block_group->reserved > 0);
5029 WARN_ON(btrfs_block_group_used(&block_group->item) > 0);
5030 spin_unlock(&block_group->lock);
5033 btrfs_free_path(path);
5037 int find_first_block_group(struct btrfs_root *root, struct btrfs_path *path,
5038 struct btrfs_key *key)
5041 struct btrfs_key found_key;
5042 struct extent_buffer *leaf;
5045 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
5050 slot = path->slots[0];
5051 leaf = path->nodes[0];
5052 if (slot >= btrfs_header_nritems(leaf)) {
5053 ret = btrfs_next_leaf(root, path);
5060 btrfs_item_key_to_cpu(leaf, &found_key, slot);
5062 if (found_key.objectid >= key->objectid &&
5063 found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
5074 int btrfs_free_block_groups(struct btrfs_fs_info *info)
5076 struct btrfs_block_group_cache *block_group;
5079 spin_lock(&info->block_group_cache_lock);
5080 while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
5081 block_group = rb_entry(n, struct btrfs_block_group_cache,
5083 rb_erase(&block_group->cache_node,
5084 &info->block_group_cache_tree);
5085 spin_unlock(&info->block_group_cache_lock);
5087 btrfs_remove_free_space_cache(block_group);
5088 down_write(&block_group->space_info->groups_sem);
5089 list_del(&block_group->list);
5090 up_write(&block_group->space_info->groups_sem);
5093 spin_lock(&info->block_group_cache_lock);
5095 spin_unlock(&info->block_group_cache_lock);
5099 int btrfs_read_block_groups(struct btrfs_root *root)
5101 struct btrfs_path *path;
5103 struct btrfs_block_group_cache *cache;
5104 struct btrfs_fs_info *info = root->fs_info;
5105 struct btrfs_space_info *space_info;
5106 struct btrfs_key key;
5107 struct btrfs_key found_key;
5108 struct extent_buffer *leaf;
5110 root = info->extent_root;
5113 btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY);
5114 path = btrfs_alloc_path();
5119 ret = find_first_block_group(root, path, &key);
5127 leaf = path->nodes[0];
5128 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5129 cache = kzalloc(sizeof(*cache), GFP_NOFS);
5135 spin_lock_init(&cache->lock);
5136 mutex_init(&cache->alloc_mutex);
5137 INIT_LIST_HEAD(&cache->list);
5138 read_extent_buffer(leaf, &cache->item,
5139 btrfs_item_ptr_offset(leaf, path->slots[0]),
5140 sizeof(cache->item));
5141 memcpy(&cache->key, &found_key, sizeof(found_key));
5143 key.objectid = found_key.objectid + found_key.offset;
5144 btrfs_release_path(root, path);
5145 cache->flags = btrfs_block_group_flags(&cache->item);
5147 ret = update_space_info(info, cache->flags, found_key.offset,
5148 btrfs_block_group_used(&cache->item),
5151 cache->space_info = space_info;
5152 down_write(&space_info->groups_sem);
5153 list_add_tail(&cache->list, &space_info->block_groups);
5154 up_write(&space_info->groups_sem);
5156 ret = btrfs_add_block_group_cache(root->fs_info, cache);
5159 set_avail_alloc_bits(root->fs_info, cache->flags);
5163 btrfs_free_path(path);
5167 int btrfs_make_block_group(struct btrfs_trans_handle *trans,
5168 struct btrfs_root *root, u64 bytes_used,
5169 u64 type, u64 chunk_objectid, u64 chunk_offset,
5173 struct btrfs_root *extent_root;
5174 struct btrfs_block_group_cache *cache;
5176 extent_root = root->fs_info->extent_root;
5178 root->fs_info->last_trans_new_blockgroup = trans->transid;
5180 cache = kzalloc(sizeof(*cache), GFP_NOFS);
5184 cache->key.objectid = chunk_offset;
5185 cache->key.offset = size;
5186 spin_lock_init(&cache->lock);
5187 mutex_init(&cache->alloc_mutex);
5188 INIT_LIST_HEAD(&cache->list);
5189 btrfs_set_key_type(&cache->key, BTRFS_BLOCK_GROUP_ITEM_KEY);
5191 btrfs_set_block_group_used(&cache->item, bytes_used);
5192 btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
5193 cache->flags = type;
5194 btrfs_set_block_group_flags(&cache->item, type);
5196 ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
5197 &cache->space_info);
5199 down_write(&cache->space_info->groups_sem);
5200 list_add_tail(&cache->list, &cache->space_info->block_groups);
5201 up_write(&cache->space_info->groups_sem);
5203 ret = btrfs_add_block_group_cache(root->fs_info, cache);
5206 ret = btrfs_insert_item(trans, extent_root, &cache->key, &cache->item,
5207 sizeof(cache->item));
5210 finish_current_insert(trans, extent_root, 0);
5211 ret = del_pending_extents(trans, extent_root, 0);
5213 set_avail_alloc_bits(extent_root->fs_info, type);
5218 int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
5219 struct btrfs_root *root, u64 group_start)
5221 struct btrfs_path *path;
5222 struct btrfs_block_group_cache *block_group;
5223 struct btrfs_key key;
5226 root = root->fs_info->extent_root;
5228 block_group = btrfs_lookup_block_group(root->fs_info, group_start);
5229 BUG_ON(!block_group);
5231 memcpy(&key, &block_group->key, sizeof(key));
5233 path = btrfs_alloc_path();
5236 btrfs_remove_free_space_cache(block_group);
5237 rb_erase(&block_group->cache_node,
5238 &root->fs_info->block_group_cache_tree);
5239 down_write(&block_group->space_info->groups_sem);
5240 list_del(&block_group->list);
5241 up_write(&block_group->space_info->groups_sem);
5244 memset(shrink_block_group, 0, sizeof(*shrink_block_group));
5245 kfree(shrink_block_group);
5248 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
5254 ret = btrfs_del_item(trans, root, path);
5256 btrfs_free_path(path);