#include <linux/pagemap.h>
#include <linux/writeback.h>
#include <linux/blkdev.h>
+#include <linux/sort.h>
+#include "compat.h"
#include "hash.h"
#include "crc32c.h"
#include "ctree.h"
int del;
};
-static int finish_current_insert(struct btrfs_trans_handle *trans, struct
- btrfs_root *extent_root, int all);
-static int del_pending_extents(struct btrfs_trans_handle *trans, struct
- btrfs_root *extent_root, int all);
-static struct btrfs_block_group_cache *
-__btrfs_find_block_group(struct btrfs_root *root,
- struct btrfs_block_group_cache *hint,
- u64 search_start, int data, int owner);
+static int finish_current_insert(struct btrfs_trans_handle *trans,
+ struct btrfs_root *extent_root, int all);
+static int del_pending_extents(struct btrfs_trans_handle *trans,
+ struct btrfs_root *extent_root, int all);
static int pin_down_bytes(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
u64 bytenr, u64 num_bytes, int is_data);
* this adds the block group to the fs_info rb tree for the block group
* cache
*/
-int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
+static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
struct btrfs_block_group_cache *block_group)
{
struct rb_node **p;
break;
}
}
+ if (ret)
+ atomic_inc(&ret->count);
spin_unlock(&info->block_group_cache_lock);
return ret;
start = extent_end + 1;
} else if (extent_start > start && extent_start < end) {
size = extent_start - start;
- ret = btrfs_add_free_space_lock(block_group, start,
- size);
+ ret = btrfs_add_free_space(block_group, start,
+ size);
BUG_ON(ret);
start = extent_end + 1;
} else {
if (start < end) {
size = end - start;
- ret = btrfs_add_free_space_lock(block_group, start, size);
+ ret = btrfs_add_free_space(block_group, start, size);
BUG_ON(ret);
}
mutex_unlock(&info->pinned_mutex);
return 0;
}
+static int remove_sb_from_cache(struct btrfs_root *root,
+ struct btrfs_block_group_cache *cache)
+{
+ u64 bytenr;
+ u64 *logical;
+ int stripe_len;
+ int i, nr, ret;
+
+ for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
+ bytenr = btrfs_sb_offset(i);
+ ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
+ cache->key.objectid, bytenr, 0,
+ &logical, &nr, &stripe_len);
+ BUG_ON(ret);
+ while (nr--) {
+ btrfs_remove_free_space(cache, logical[nr],
+ stripe_len);
+ }
+ kfree(logical);
+ }
+ return 0;
+}
+
static int cache_block_group(struct btrfs_root *root,
struct btrfs_block_group_cache *block_group)
{
struct btrfs_key key;
struct extent_buffer *leaf;
int slot;
- u64 last = 0;
- u64 first_free;
- int found = 0;
+ u64 last;
if (!block_group)
return 0;
* skip the locking here
*/
path->skip_locking = 1;
- first_free = max_t(u64, block_group->key.objectid,
- BTRFS_SUPER_INFO_OFFSET + BTRFS_SUPER_INFO_SIZE);
- key.objectid = block_group->key.objectid;
+ last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
+ key.objectid = last;
key.offset = 0;
btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0)
goto err;
- ret = btrfs_previous_item(root, path, 0, BTRFS_EXTENT_ITEM_KEY);
- if (ret < 0)
- goto err;
- if (ret == 0) {
- leaf = path->nodes[0];
- btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
- if (key.objectid + key.offset > first_free)
- first_free = key.objectid + key.offset;
- }
- while(1) {
+
+ while (1) {
leaf = path->nodes[0];
slot = path->slots[0];
if (slot >= btrfs_header_nritems(leaf)) {
break;
if (btrfs_key_type(&key) == BTRFS_EXTENT_ITEM_KEY) {
- if (!found) {
- last = first_free;
- found = 1;
- }
-
add_new_free_space(block_group, root->fs_info, last,
key.objectid);
path->slots[0]++;
}
- if (!found)
- last = first_free;
-
add_new_free_space(block_group, root->fs_info, last,
block_group->key.objectid +
block_group->key.offset);
+ remove_sb_from_cache(root, block_group);
block_group->cached = 1;
ret = 0;
err:
/*
* return the block group that starts at or after bytenr
*/
-struct btrfs_block_group_cache *btrfs_lookup_first_block_group(struct
- btrfs_fs_info *info,
- u64 bytenr)
+static struct btrfs_block_group_cache *
+btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
{
struct btrfs_block_group_cache *cache;
/*
* return the block group that contains teh given bytenr
*/
-struct btrfs_block_group_cache *btrfs_lookup_block_group(struct
- btrfs_fs_info *info,
- u64 bytenr)
+struct btrfs_block_group_cache *btrfs_lookup_block_group(
+ struct btrfs_fs_info *info,
+ u64 bytenr)
{
struct btrfs_block_group_cache *cache;
return cache;
}
+static inline void put_block_group(struct btrfs_block_group_cache *cache)
+{
+ if (atomic_dec_and_test(&cache->count))
+ kfree(cache);
+}
+
static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
u64 flags)
{
struct list_head *head = &info->space_info;
- struct list_head *cur;
struct btrfs_space_info *found;
- list_for_each(cur, head) {
- found = list_entry(cur, struct btrfs_space_info, list);
+ list_for_each_entry(found, head, list) {
if (found->flags == flags)
return found;
}
return num;
}
-static struct btrfs_block_group_cache *
-__btrfs_find_block_group(struct btrfs_root *root,
- struct btrfs_block_group_cache *hint,
- u64 search_start, int data, int owner)
+u64 btrfs_find_block_group(struct btrfs_root *root,
+ u64 search_start, u64 search_hint, int owner)
{
struct btrfs_block_group_cache *cache;
- struct btrfs_block_group_cache *found_group = NULL;
- struct btrfs_fs_info *info = root->fs_info;
u64 used;
- u64 last = 0;
- u64 free_check;
+ u64 last = max(search_hint, search_start);
+ u64 group_start = 0;
int full_search = 0;
- int factor = 10;
+ int factor = 9;
int wrapped = 0;
-
- if (data & BTRFS_BLOCK_GROUP_METADATA)
- factor = 9;
-
- if (search_start) {
- struct btrfs_block_group_cache *shint;
- shint = btrfs_lookup_first_block_group(info, search_start);
- if (shint && block_group_bits(shint, data)) {
- spin_lock(&shint->lock);
- used = btrfs_block_group_used(&shint->item);
- if (used + shint->pinned + shint->reserved <
- div_factor(shint->key.offset, factor)) {
- spin_unlock(&shint->lock);
- return shint;
- }
- spin_unlock(&shint->lock);
- }
- }
- if (hint && block_group_bits(hint, data)) {
- spin_lock(&hint->lock);
- used = btrfs_block_group_used(&hint->item);
- if (used + hint->pinned + hint->reserved <
- div_factor(hint->key.offset, factor)) {
- spin_unlock(&hint->lock);
- return hint;
- }
- spin_unlock(&hint->lock);
- last = hint->key.objectid + hint->key.offset;
- } else {
- if (hint)
- last = max(hint->key.objectid, search_start);
- else
- last = search_start;
- }
again:
while (1) {
cache = btrfs_lookup_first_block_group(root->fs_info, last);
last = cache->key.objectid + cache->key.offset;
used = btrfs_block_group_used(&cache->item);
- if (block_group_bits(cache, data)) {
- free_check = div_factor(cache->key.offset, factor);
+ if ((full_search || !cache->ro) &&
+ block_group_bits(cache, BTRFS_BLOCK_GROUP_METADATA)) {
if (used + cache->pinned + cache->reserved <
- free_check) {
- found_group = cache;
+ div_factor(cache->key.offset, factor)) {
+ group_start = cache->key.objectid;
spin_unlock(&cache->lock);
+ put_block_group(cache);
goto found;
}
}
spin_unlock(&cache->lock);
+ put_block_group(cache);
cond_resched();
}
if (!wrapped) {
goto again;
}
found:
- return found_group;
-}
-
-struct btrfs_block_group_cache *btrfs_find_block_group(struct btrfs_root *root,
- struct btrfs_block_group_cache
- *hint, u64 search_start,
- int data, int owner)
-{
-
- struct btrfs_block_group_cache *ret;
- ret = __btrfs_find_block_group(root, hint, search_start, data, owner);
- return ret;
+ return group_start;
}
/* simple helper to search for an existing extent at a given offset */
* to the key objectid.
*/
-static int noinline lookup_extent_backref(struct btrfs_trans_handle *trans,
+static noinline int lookup_extent_backref(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path,
u64 bytenr, u64 parent,
* updates all the backrefs that are pending on update_list for the
* extent_root
*/
-static int noinline update_backrefs(struct btrfs_trans_handle *trans,
+static noinline int update_backrefs(struct btrfs_trans_handle *trans,
struct btrfs_root *extent_root,
struct btrfs_path *path,
struct list_head *update_list)
btrfs_ref_generation(leaf, ref) != op->orig_generation ||
(ref_objectid != op->level &&
ref_objectid != BTRFS_MULTIPLE_OBJECTIDS)) {
- printk(KERN_ERR "couldn't find %Lu, parent %Lu, root %Lu, "
- "owner %u\n", op->bytenr, op->orig_parent,
- ref_root, op->level);
+ printk(KERN_ERR "btrfs couldn't find %llu, parent %llu, "
+ "root %llu, owner %u\n",
+ (unsigned long long)op->bytenr,
+ (unsigned long long)op->orig_parent,
+ (unsigned long long)ref_root, op->level);
btrfs_print_leaf(extent_root, leaf);
BUG();
}
return 0;
}
-static int noinline insert_extents(struct btrfs_trans_handle *trans,
+static noinline int insert_extents(struct btrfs_trans_handle *trans,
struct btrfs_root *extent_root,
struct btrfs_path *path,
struct list_head *insert_list, int nr)
*/
i = last;
last = 0;
- cur = insert_list->next;
- op = list_entry(cur, struct pending_extent_op, list);
total--;
+ if (i < total) {
+ cur = insert_list->next;
+ op = list_entry(cur, struct pending_extent_op,
+ list);
+ }
} else {
i += ret;
}
return ret;
}
-static int noinline insert_extent_backref(struct btrfs_trans_handle *trans,
+static noinline int insert_extent_backref(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path,
u64 bytenr, u64 parent,
return ret;
}
-static int noinline remove_extent_backref(struct btrfs_trans_handle *trans,
+static noinline int remove_extent_backref(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path)
{
return ret;
}
-static int noinline free_extents(struct btrfs_trans_handle *trans,
+#ifdef BIO_RW_DISCARD
+static void btrfs_issue_discard(struct block_device *bdev,
+ u64 start, u64 len)
+{
+ blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_KERNEL);
+}
+#endif
+
+static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
+ u64 num_bytes)
+{
+#ifdef BIO_RW_DISCARD
+ int ret;
+ u64 map_length = num_bytes;
+ struct btrfs_multi_bio *multi = NULL;
+
+ /* Tell the block device(s) that the sectors can be discarded */
+ ret = btrfs_map_block(&root->fs_info->mapping_tree, READ,
+ bytenr, &map_length, &multi, 0);
+ if (!ret) {
+ struct btrfs_bio_stripe *stripe = multi->stripes;
+ int i;
+
+ if (map_length > num_bytes)
+ map_length = num_bytes;
+
+ for (i = 0; i < multi->num_stripes; i++, stripe++) {
+ btrfs_issue_discard(stripe->dev->bdev,
+ stripe->physical,
+ map_length);
+ }
+ kfree(multi);
+ }
+
+ return ret;
+#else
+ return 0;
+#endif
+}
+
+static noinline int free_extents(struct btrfs_trans_handle *trans,
struct btrfs_root *extent_root,
struct list_head *del_list)
{
extent_root->root_key.objectid,
op->orig_generation, op->level, 1);
if (ret) {
- printk("Unable to find backref byte nr %Lu root %Lu gen %Lu "
- "owner %u\n", op->bytenr,
- extent_root->root_key.objectid, op->orig_generation,
- op->level);
+ printk(KERN_ERR "btrfs unable to find backref byte nr %llu "
+ "root %llu gen %llu owner %u\n",
+ (unsigned long long)op->bytenr,
+ (unsigned long long)extent_root->root_key.objectid,
+ (unsigned long long)op->orig_generation, op->level);
btrfs_print_leaf(extent_root, path->nodes[0]);
WARN_ON(1);
goto out;
end = pos;
/* update the free space counters */
- spin_lock_irq(&info->delalloc_lock);
+ spin_lock(&info->delalloc_lock);
super_used = btrfs_super_bytes_used(&info->super_copy);
btrfs_set_super_bytes_used(&info->super_copy,
super_used - bytes_freed);
- spin_unlock_irq(&info->delalloc_lock);
root_used = btrfs_root_used(&extent_root->root_item);
btrfs_set_root_used(&extent_root->root_item,
root_used - bytes_freed);
+ spin_unlock(&info->delalloc_lock);
/* delete the items */
ret = btrfs_del_items(trans, extent_root, path,
for (pos = cur, n = pos->next; pos != end;
pos = n, n = pos->next) {
struct pending_extent_op *tmp;
-#ifdef BIO_RW_DISCARD
- u64 map_length;
- struct btrfs_multi_bio *multi = NULL;
-#endif
tmp = list_entry(pos, struct pending_extent_op, list);
/*
tmp->del);
BUG_ON(ret);
-#ifdef BIO_RW_DISCARD
- ret = btrfs_map_block(&info->mapping_tree, READ,
- tmp->bytenr, &map_length, &multi,
- 0);
- if (!ret) {
- struct btrfs_bio_stripe *stripe;
- int i;
-
- stripe = multi->stripe;
-
- if (map_length > tmp->num_bytes)
- map_length = tmp->num_bytes;
-
- for (i = 0; i < multi->num_stripes;
- i++, stripe++)
- blkdev_issue_discard(stripe->dev->bdev,
- stripe->physical >> 9,
- map_length >> 9);
- kfree(multi);
- }
-#endif
list_del_init(&tmp->list);
unlock_extent(&info->extent_ins, tmp->bytenr,
tmp->bytenr + tmp->num_bytes - 1,
btrfs_item_key_to_cpu(l, &key, path->slots[0]);
if (key.objectid != bytenr) {
btrfs_print_leaf(root->fs_info->extent_root, path->nodes[0]);
- printk("wanted %Lu found %Lu\n", bytenr, key.objectid);
+ printk(KERN_ERR "btrfs wanted %llu found %llu\n",
+ (unsigned long long)bytenr,
+ (unsigned long long)key.objectid);
BUG();
}
BUG_ON(key.type != BTRFS_EXTENT_ITEM_KEY);
int btrfs_extent_post_op(struct btrfs_trans_handle *trans,
struct btrfs_root *root)
{
- finish_current_insert(trans, root->fs_info->extent_root, 1);
- del_pending_extents(trans, root->fs_info->extent_root, 1);
+ u64 start;
+ u64 end;
+ int ret;
+
+ while(1) {
+ finish_current_insert(trans, root->fs_info->extent_root, 1);
+ del_pending_extents(trans, root->fs_info->extent_root, 1);
+
+ /* is there more work to do? */
+ ret = find_first_extent_bit(&root->fs_info->pending_del,
+ 0, &start, &end, EXTENT_WRITEBACK);
+ if (!ret)
+ continue;
+ ret = find_first_extent_bit(&root->fs_info->extent_ins,
+ 0, &start, &end, EXTENT_WRITEBACK);
+ if (!ret)
+ continue;
+ break;
+ }
return 0;
}
goto out;
if (ret != 0) {
btrfs_print_leaf(root, path->nodes[0]);
- printk("failed to find block number %Lu\n", bytenr);
+ printk(KERN_INFO "btrfs failed to find block number %llu\n",
+ (unsigned long long)bytenr);
BUG();
}
l = path->nodes[0];
}
int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, u64 bytenr)
+ struct btrfs_root *root, u64 objectid, u64 bytenr)
{
struct btrfs_root *extent_root = root->fs_info->extent_root;
struct btrfs_path *path;
ref_item = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_extent_ref);
ref_root = btrfs_ref_root(leaf, ref_item);
- if (ref_root != root->root_key.objectid &&
- ref_root != BTRFS_TREE_LOG_OBJECTID) {
+ if ((ref_root != root->root_key.objectid &&
+ ref_root != BTRFS_TREE_LOG_OBJECTID) ||
+ objectid != btrfs_ref_objectid(leaf, ref_item)) {
ret = 1;
goto out;
}
return ret;
}
-int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
- struct extent_buffer *orig_buf, struct extent_buffer *buf,
- u32 *nr_extents)
+/* when a block goes through cow, we update the reference counts of
+ * everything that block points to. The internal pointers of the block
+ * can be in just about any order, and it is likely to have clusters of
+ * things that are close together and clusters of things that are not.
+ *
+ * To help reduce the seeks that come with updating all of these reference
+ * counts, sort them by byte number before actual updates are done.
+ *
+ * struct refsort is used to match byte number to slot in the btree block.
+ * we sort based on the byte number and then use the slot to actually
+ * find the item.
+ *
+ * struct refsort is smaller than strcut btrfs_item and smaller than
+ * struct btrfs_key_ptr. Since we're currently limited to the page size
+ * for a btree block, there's no way for a kmalloc of refsorts for a
+ * single node to be bigger than a page.
+ */
+struct refsort {
+ u64 bytenr;
+ u32 slot;
+};
+
+/*
+ * for passing into sort()
+ */
+static int refsort_cmp(const void *a_void, const void *b_void)
+{
+ const struct refsort *a = a_void;
+ const struct refsort *b = b_void;
+
+ if (a->bytenr < b->bytenr)
+ return -1;
+ if (a->bytenr > b->bytenr)
+ return 1;
+ return 0;
+}
+
+
+noinline int btrfs_inc_ref(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct extent_buffer *orig_buf,
+ struct extent_buffer *buf, u32 *nr_extents)
{
u64 bytenr;
u64 ref_root;
u64 orig_root;
u64 ref_generation;
u64 orig_generation;
+ struct refsort *sorted;
u32 nritems;
u32 nr_file_extents = 0;
struct btrfs_key key;
int level;
int ret = 0;
int faili = 0;
+ int refi = 0;
+ int slot;
int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
u64, u64, u64, u64, u64, u64, u64, u64);
nritems = btrfs_header_nritems(buf);
level = btrfs_header_level(buf);
+ sorted = kmalloc(sizeof(struct refsort) * nritems, GFP_NOFS);
+ BUG_ON(!sorted);
+
if (root->ref_cows) {
process_func = __btrfs_inc_extent_ref;
} else {
process_func = __btrfs_update_extent_ref;
}
+ /*
+ * we make two passes through the items. In the first pass we
+ * only record the byte number and slot. Then we sort based on
+ * byte number and do the actual work based on the sorted results
+ */
for (i = 0; i < nritems; i++) {
cond_resched();
if (level == 0) {
continue;
nr_file_extents++;
+ sorted[refi].bytenr = bytenr;
+ sorted[refi].slot = i;
+ refi++;
+ } else {
+ bytenr = btrfs_node_blockptr(buf, i);
+ sorted[refi].bytenr = bytenr;
+ sorted[refi].slot = i;
+ refi++;
+ }
+ }
+ /*
+ * if refi == 0, we didn't actually put anything into the sorted
+ * array and we're done
+ */
+ if (refi == 0)
+ goto out;
+
+ sort(sorted, refi, sizeof(struct refsort), refsort_cmp, NULL);
+
+ for (i = 0; i < refi; i++) {
+ cond_resched();
+ slot = sorted[i].slot;
+ bytenr = sorted[i].bytenr;
+
+ if (level == 0) {
+ btrfs_item_key_to_cpu(buf, &key, slot);
ret = process_func(trans, root, bytenr,
orig_buf->start, buf->start,
key.objectid);
if (ret) {
- faili = i;
+ faili = slot;
WARN_ON(1);
goto fail;
}
} else {
- bytenr = btrfs_node_blockptr(buf, i);
ret = process_func(trans, root, bytenr,
orig_buf->start, buf->start,
orig_root, ref_root,
orig_generation, ref_generation,
level - 1);
if (ret) {
- faili = i;
+ faili = slot;
WARN_ON(1);
goto fail;
}
}
}
out:
+ kfree(sorted);
if (nr_extents) {
if (level == 0)
*nr_extents = nr_file_extents;
}
return 0;
fail:
+ kfree(sorted);
WARN_ON(1);
return ret;
}
if (!path)
return -ENOMEM;
- while(1) {
+ while (1) {
cache = NULL;
spin_lock(&root->fs_info->block_group_cache_lock);
for (n = rb_first(&root->fs_info->block_group_cache_tree);
return werr;
}
+int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
+{
+ struct btrfs_block_group_cache *block_group;
+ int readonly = 0;
+
+ block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
+ if (!block_group || block_group->ro)
+ readonly = 1;
+ if (block_group)
+ put_block_group(block_group);
+ return readonly;
+}
+
static int update_space_info(struct btrfs_fs_info *info, u64 flags,
u64 total_bytes, u64 bytes_used,
struct btrfs_space_info **space_info)
spin_unlock(&space_info->lock);
ret = btrfs_alloc_chunk(trans, extent_root, flags);
- if (ret) {
-printk("space info full %Lu\n", flags);
+ if (ret)
space_info->full = 1;
- }
out:
mutex_unlock(&extent_root->fs_info->chunk_mutex);
return ret;
u64 old_val;
u64 byte_in_group;
- while(total) {
+ while (total) {
cache = btrfs_lookup_block_group(info, bytenr);
if (!cache)
return -1;
if (alloc) {
old_val += num_bytes;
cache->space_info->bytes_used += num_bytes;
- if (cache->ro) {
+ if (cache->ro)
cache->space_info->bytes_readonly -= num_bytes;
- WARN_ON(1);
- }
btrfs_set_block_group_used(&cache->item, old_val);
spin_unlock(&cache->lock);
spin_unlock(&cache->space_info->lock);
spin_unlock(&cache->space_info->lock);
if (mark_free) {
int ret;
+
+ ret = btrfs_discard_extent(root, bytenr,
+ num_bytes);
+ WARN_ON(ret);
+
ret = btrfs_add_free_space(cache, bytenr,
num_bytes);
- if (ret)
- return -1;
+ WARN_ON(ret);
}
}
+ put_block_group(cache);
total -= num_bytes;
bytenr += num_bytes;
}
static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
{
struct btrfs_block_group_cache *cache;
+ u64 bytenr;
cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
if (!cache)
return 0;
- return cache->key.objectid;
+ bytenr = cache->key.objectid;
+ put_block_group(cache);
+
+ return bytenr;
}
int btrfs_update_pinned_extents(struct btrfs_root *root,
spin_unlock(&cache->lock);
spin_unlock(&cache->space_info->lock);
fs_info->total_pinned -= len;
+ if (cache->cached)
+ btrfs_add_free_space(cache, bytenr, len);
}
+ put_block_group(cache);
bytenr += len;
num -= len;
}
}
spin_unlock(&cache->lock);
spin_unlock(&cache->space_info->lock);
+ put_block_group(cache);
bytenr += len;
num -= len;
}
int ret;
mutex_lock(&root->fs_info->pinned_mutex);
- while(1) {
+ while (1) {
ret = find_first_extent_bit(pinned_extents, last,
&start, &end, EXTENT_DIRTY);
if (ret)
u64 start;
u64 end;
int ret;
- struct btrfs_block_group_cache *cache;
mutex_lock(&root->fs_info->pinned_mutex);
- while(1) {
+ while (1) {
ret = find_first_extent_bit(unpin, 0, &start, &end,
EXTENT_DIRTY);
if (ret)
break;
+
+ ret = btrfs_discard_extent(root, start, end + 1 - start);
+
btrfs_update_pinned_extents(root, start, end + 1 - start, 0);
clear_extent_dirty(unpin, start, end, GFP_NOFS);
- cache = btrfs_lookup_block_group(root->fs_info, start);
- if (cache->cached)
- btrfs_add_free_space(cache, start, end - start + 1);
+
if (need_resched()) {
mutex_unlock(&root->fs_info->pinned_mutex);
cond_resched();
}
}
mutex_unlock(&root->fs_info->pinned_mutex);
- return 0;
+ return ret;
}
static int finish_current_insert(struct btrfs_trans_handle *trans,
u64 end;
u64 priv;
u64 search = 0;
- u64 skipped = 0;
struct btrfs_fs_info *info = extent_root->fs_info;
struct btrfs_path *path;
struct pending_extent_op *extent_op, *tmp;
struct list_head insert_list, update_list;
int ret;
- int num_inserts = 0, max_inserts;
+ int num_inserts = 0, max_inserts, restart = 0;
path = btrfs_alloc_path();
INIT_LIST_HEAD(&insert_list);
ret = find_first_extent_bit(&info->extent_ins, search, &start,
&end, EXTENT_WRITEBACK);
if (ret) {
- if (skipped && all && !num_inserts) {
- skipped = 0;
+ if (restart && !num_inserts &&
+ list_empty(&update_list)) {
+ restart = 0;
+ search = 0;
continue;
}
- mutex_unlock(&info->extent_ins_mutex);
break;
}
ret = try_lock_extent(&info->extent_ins, start, end, GFP_NOFS);
if (!ret) {
- skipped = 1;
+ if (all)
+ restart = 1;
search = end + 1;
if (need_resched()) {
mutex_unlock(&info->extent_ins_mutex);
list_add_tail(&extent_op->list, &insert_list);
search = end + 1;
if (num_inserts == max_inserts) {
- mutex_unlock(&info->extent_ins_mutex);
+ restart = 1;
break;
}
} else if (extent_op->type == PENDING_BACKREF_UPDATE) {
}
/*
- * process teh update list, clear the writeback bit for it, and if
+ * process the update list, clear the writeback bit for it, and if
* somebody marked this thing for deletion then just unlock it and be
* done, the free_extents will handle it
*/
- mutex_lock(&info->extent_ins_mutex);
list_for_each_entry_safe(extent_op, tmp, &update_list, list) {
clear_extent_bits(&info->extent_ins, extent_op->bytenr,
extent_op->bytenr + extent_op->num_bytes - 1,
if (!list_empty(&update_list)) {
ret = update_backrefs(trans, extent_root, path, &update_list);
BUG_ON(ret);
+
+ /* we may have COW'ed new blocks, so lets start over */
+ if (all)
+ restart = 1;
}
/*
* need to make sure everything is cleaned then reset everything and
* go back to the beginning
*/
- if (!num_inserts && all && skipped) {
+ if (!num_inserts && restart) {
search = 0;
- skipped = 0;
+ restart = 0;
INIT_LIST_HEAD(&update_list);
INIT_LIST_HEAD(&insert_list);
goto again;
extent_op->bytenr + extent_op->num_bytes - 1,
EXTENT_WRITEBACK, GFP_NOFS);
if (extent_op->del) {
+ u64 used;
list_del_init(&extent_op->list);
unlock_extent(&info->extent_ins, extent_op->bytenr,
extent_op->bytenr + extent_op->num_bytes
extent_op->num_bytes, 0);
mutex_unlock(&extent_root->fs_info->pinned_mutex);
+ spin_lock(&info->delalloc_lock);
+ used = btrfs_super_bytes_used(&info->super_copy);
+ btrfs_set_super_bytes_used(&info->super_copy,
+ used - extent_op->num_bytes);
+ used = btrfs_root_used(&extent_root->root_item);
+ btrfs_set_root_used(&extent_root->root_item,
+ used - extent_op->num_bytes);
+ spin_unlock(&info->delalloc_lock);
+
ret = update_block_group(trans, extent_root,
extent_op->bytenr,
extent_op->num_bytes,
BUG_ON(ret);
/*
- * if we broke out of the loop in order to insert stuff because we hit
- * the maximum number of inserts at a time we can handle, then loop
- * back and pick up where we left off
+ * if restart is set for whatever reason we need to go back and start
+ * searching through the pending list again.
+ *
+ * We just inserted some extents, which could have resulted in new
+ * blocks being allocated, which would result in new blocks needing
+ * updates, so if all is set we _must_ restart to get the updated
+ * blocks.
*/
- if (num_inserts == max_inserts) {
- INIT_LIST_HEAD(&insert_list);
- INIT_LIST_HEAD(&update_list);
- num_inserts = 0;
- goto again;
- }
-
- /*
- * again, if we need to make absolutely sure there are no more pending
- * extent operations left and we know that we skipped some, go back to
- * the beginning and do it all again
- */
- if (all && skipped) {
+ if (restart || all) {
INIT_LIST_HEAD(&insert_list);
INIT_LIST_HEAD(&update_list);
search = 0;
- skipped = 0;
+ restart = 0;
num_inserts = 0;
goto again;
}
if (ret == 0) {
struct btrfs_key found_key;
extent_slot = path->slots[0];
- while(extent_slot > 0) {
+ while (extent_slot > 0) {
extent_slot--;
btrfs_item_key_to_cpu(path->nodes[0], &found_key,
extent_slot);
&key, path, -1, 1);
if (ret) {
printk(KERN_ERR "umm, got %d back from search"
- ", was looking for %Lu\n", ret,
- bytenr);
+ ", was looking for %llu\n", ret,
+ (unsigned long long)bytenr);
btrfs_print_leaf(extent_root, path->nodes[0]);
}
BUG_ON(ret);
} else {
btrfs_print_leaf(extent_root, path->nodes[0]);
WARN_ON(1);
- printk("Unable to find ref byte nr %Lu root %Lu "
- "gen %Lu owner %Lu\n", bytenr,
- root_objectid, ref_generation, owner_objectid);
+ printk(KERN_ERR "btrfs unable to find ref byte nr %llu "
+ "root %llu gen %llu owner %llu\n",
+ (unsigned long long)bytenr,
+ (unsigned long long)root_objectid,
+ (unsigned long long)ref_generation,
+ (unsigned long long)owner_objectid);
}
leaf = path->nodes[0];
if (refs == 0) {
u64 super_used;
u64 root_used;
-#ifdef BIO_RW_DISCARD
- u64 map_length = num_bytes;
- struct btrfs_multi_bio *multi = NULL;
-#endif
if (pin) {
mutex_lock(&root->fs_info->pinned_mutex);
mark_free = 1;
BUG_ON(ret < 0);
}
-
/* block accounting for super block */
- spin_lock_irq(&info->delalloc_lock);
+ spin_lock(&info->delalloc_lock);
super_used = btrfs_super_bytes_used(&info->super_copy);
btrfs_set_super_bytes_used(&info->super_copy,
super_used - num_bytes);
- spin_unlock_irq(&info->delalloc_lock);
/* block accounting for root item */
root_used = btrfs_root_used(&root->root_item);
btrfs_set_root_used(&root->root_item,
root_used - num_bytes);
+ spin_unlock(&info->delalloc_lock);
ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
num_to_del);
BUG_ON(ret);
btrfs_release_path(extent_root, path);
+
+ if (owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
+ ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
+ BUG_ON(ret);
+ }
+
ret = update_block_group(trans, root, bytenr, num_bytes, 0,
mark_free);
BUG_ON(ret);
-
-#ifdef BIO_RW_DISCARD
- /* Tell the block device(s) that the sectors can be discarded */
- ret = btrfs_map_block(&root->fs_info->mapping_tree, READ,
- bytenr, &map_length, &multi, 0);
- if (!ret) {
- struct btrfs_bio_stripe *stripe = multi->stripes;
- int i;
-
- if (map_length > num_bytes)
- map_length = num_bytes;
-
- for (i = 0; i < multi->num_stripes; i++, stripe++) {
- blkdev_issue_discard(stripe->dev->bdev,
- stripe->physical >> 9,
- map_length >> 9);
- }
- kfree(multi);
- }
-#endif
}
btrfs_free_path(path);
finish_current_insert(trans, extent_root, 0);
* find all the blocks marked as pending in the radix tree and remove
* them from the extent map
*/
-static int del_pending_extents(struct btrfs_trans_handle *trans, struct
- btrfs_root *extent_root, int all)
+static int del_pending_extents(struct btrfs_trans_handle *trans,
+ struct btrfs_root *extent_root, int all)
{
int ret;
int err = 0;
again:
mutex_lock(&info->extent_ins_mutex);
- while(1) {
+ while (1) {
ret = find_first_extent_bit(pending_del, search, &start, &end,
EXTENT_WRITEBACK);
if (ret) {
if (all && skipped && !nr) {
search = 0;
+ skipped = 0;
continue;
}
mutex_unlock(&info->extent_ins_mutex);
goto again;
}
+ if (!err)
+ finish_current_insert(trans, extent_root, 0);
return err;
}
/* if metadata always pin */
if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID) {
if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
- struct btrfs_block_group_cache *cache;
-
- /* btrfs_free_reserved_extent */
- cache = btrfs_lookup_block_group(root->fs_info, bytenr);
- BUG_ON(!cache);
- btrfs_add_free_space(cache, bytenr, num_bytes);
+ mutex_lock(&root->fs_info->pinned_mutex);
+ btrfs_update_pinned_extents(root, bytenr, num_bytes, 1);
+ mutex_unlock(&root->fs_info->pinned_mutex);
update_reserved_extents(root, bytenr, num_bytes, 0);
return 0;
}
* ins->offset == number of blocks
* Any available blocks before search_start are skipped.
*/
-static int noinline find_free_extent(struct btrfs_trans_handle *trans,
+static noinline int find_free_extent(struct btrfs_trans_handle *trans,
struct btrfs_root *orig_root,
u64 num_bytes, u64 empty_size,
u64 search_start, u64 search_end,
int data)
{
int ret = 0;
- struct btrfs_root * root = orig_root->fs_info->extent_root;
+ struct btrfs_root *root = orig_root->fs_info->extent_root;
u64 total_needed = num_bytes;
u64 *last_ptr = NULL;
u64 last_wanted = 0;
if (data & BTRFS_BLOCK_GROUP_METADATA) {
last_ptr = &root->fs_info->last_alloc;
- empty_cluster = 64 * 1024;
+ if (!btrfs_test_opt(root, SSD))
+ empty_cluster = 64 * 1024;
}
if ((data & BTRFS_BLOCK_GROUP_DATA) && btrfs_test_opt(root, SSD))
if (!block_group)
goto new_group_no_lock;
+ if (unlikely(!block_group->cached)) {
+ mutex_lock(&block_group->cache_mutex);
+ ret = cache_block_group(root, block_group);
+ mutex_unlock(&block_group->cache_mutex);
+ if (ret)
+ break;
+ }
+
mutex_lock(&block_group->alloc_mutex);
if (unlikely(!block_group_bits(block_group, data)))
goto new_group;
- ret = cache_block_group(root, block_group);
- if (ret) {
- mutex_unlock(&block_group->alloc_mutex);
- break;
- }
-
- if (block_group->ro)
+ if (unlikely(block_group->ro))
goto new_group;
free_space = btrfs_find_free_space(block_group, search_start,
}
new_group:
mutex_unlock(&block_group->alloc_mutex);
+ put_block_group(block_group);
+ block_group = NULL;
new_group_no_lock:
/* don't try to compare new allocations against the
* last allocation any more
block_group = list_entry(cur, struct btrfs_block_group_cache,
list);
+ atomic_inc(&block_group->count);
+
search_start = block_group->key.objectid;
cur = cur->next;
}
/* we found what we needed */
if (ins->objectid) {
if (!(data & BTRFS_BLOCK_GROUP_DATA))
- trans->block_group = block_group;
+ trans->block_group = block_group->key.objectid;
if (last_ptr)
*last_ptr = ins->objectid + ins->offset;
ret = 0;
} else if (!ret) {
- printk(KERN_ERR "we were searching for %Lu bytes, num_bytes %Lu,"
- " loop %d, allowed_alloc %d\n", total_needed, num_bytes,
+ printk(KERN_ERR "btrfs searching for %llu bytes, "
+ "num_bytes %llu, loop %d, allowed_alloc %d\n",
+ (unsigned long long)total_needed,
+ (unsigned long long)num_bytes,
loop, allowed_chunk_alloc);
ret = -ENOSPC;
}
+ if (block_group)
+ put_block_group(block_group);
up_read(&space_info->groups_sem);
return ret;
static void dump_space_info(struct btrfs_space_info *info, u64 bytes)
{
struct btrfs_block_group_cache *cache;
- struct list_head *l;
- printk(KERN_INFO "space_info has %Lu free, is %sfull\n",
- info->total_bytes - info->bytes_used - info->bytes_pinned -
- info->bytes_reserved, (info->full) ? "" : "not ");
+ printk(KERN_INFO "space_info has %llu free, is %sfull\n",
+ (unsigned long long)(info->total_bytes - info->bytes_used -
+ info->bytes_pinned - info->bytes_reserved),
+ (info->full) ? "" : "not ");
down_read(&info->groups_sem);
- list_for_each(l, &info->block_groups) {
- cache = list_entry(l, struct btrfs_block_group_cache, list);
+ list_for_each_entry(cache, &info->block_groups, list) {
spin_lock(&cache->lock);
- printk(KERN_INFO "block group %Lu has %Lu bytes, %Lu used "
- "%Lu pinned %Lu reserved\n",
- cache->key.objectid, cache->key.offset,
- btrfs_block_group_used(&cache->item),
- cache->pinned, cache->reserved);
+ printk(KERN_INFO "block group %llu has %llu bytes, %llu used "
+ "%llu pinned %llu reserved\n",
+ (unsigned long long)cache->key.objectid,
+ (unsigned long long)cache->key.offset,
+ (unsigned long long)btrfs_block_group_used(&cache->item),
+ (unsigned long long)cache->pinned,
+ (unsigned long long)cache->reserved);
btrfs_dump_free_space(cache, bytes);
spin_unlock(&cache->lock);
}
if (data) {
alloc_profile = info->avail_data_alloc_bits &
- info->data_alloc_profile;
+ info->data_alloc_profile;
data = BTRFS_BLOCK_GROUP_DATA | alloc_profile;
} else if (root == root->fs_info->chunk_root) {
alloc_profile = info->avail_system_alloc_bits &
- info->system_alloc_profile;
+ info->system_alloc_profile;
data = BTRFS_BLOCK_GROUP_SYSTEM | alloc_profile;
} else {
alloc_profile = info->avail_metadata_alloc_bits &
- info->metadata_alloc_profile;
+ info->metadata_alloc_profile;
data = BTRFS_BLOCK_GROUP_METADATA | alloc_profile;
}
again:
struct btrfs_space_info *sinfo;
sinfo = __find_space_info(root->fs_info, data);
- printk("allocation failed flags %Lu, wanted %Lu\n",
- data, num_bytes);
+ printk(KERN_ERR "btrfs allocation failed flags %llu, "
+ "wanted %llu\n", (unsigned long long)data,
+ (unsigned long long)num_bytes);
dump_space_info(sinfo, num_bytes);
BUG();
}
int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len)
{
struct btrfs_block_group_cache *cache;
+ int ret = 0;
cache = btrfs_lookup_block_group(root->fs_info, start);
if (!cache) {
- printk(KERN_ERR "Unable to find block group for %Lu\n", start);
+ printk(KERN_ERR "Unable to find block group for %llu\n",
+ (unsigned long long)start);
return -ENOSPC;
}
+
+ ret = btrfs_discard_extent(root, start, len);
+
btrfs_add_free_space(cache, start, len);
+ put_block_group(cache);
update_reserved_extents(root, start, len, 0);
- return 0;
+
+ return ret;
}
int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
parent = ins->objectid;
/* block accounting for super block */
- spin_lock_irq(&info->delalloc_lock);
+ spin_lock(&info->delalloc_lock);
super_used = btrfs_super_bytes_used(&info->super_copy);
btrfs_set_super_bytes_used(&info->super_copy, super_used + num_bytes);
- spin_unlock_irq(&info->delalloc_lock);
/* block accounting for root item */
root_used = btrfs_root_used(&root->root_item);
btrfs_set_root_used(&root->root_item, root_used + num_bytes);
+ spin_unlock(&info->delalloc_lock);
if (root == extent_root) {
struct pending_extent_op *extent_op;
}
update_block:
- ret = update_block_group(trans, root, ins->objectid, ins->offset, 1, 0);
+ ret = update_block_group(trans, root, ins->objectid,
+ ins->offset, 1, 0);
if (ret) {
- printk("update block group failed for %Lu %Lu\n",
- ins->objectid, ins->offset);
+ printk(KERN_ERR "btrfs update block group failed for %llu "
+ "%llu\n", (unsigned long long)ins->objectid,
+ (unsigned long long)ins->offset);
BUG();
}
out:
struct btrfs_block_group_cache *block_group;
block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
- mutex_lock(&block_group->alloc_mutex);
+ mutex_lock(&block_group->cache_mutex);
cache_block_group(root, block_group);
+ mutex_unlock(&block_group->cache_mutex);
- ret = btrfs_remove_free_space_lock(block_group, ins->objectid,
- ins->offset);
- mutex_unlock(&block_group->alloc_mutex);
+ ret = btrfs_remove_free_space(block_group, ins->objectid,
+ ins->offset);
BUG_ON(ret);
+ put_block_group(block_group);
ret = __btrfs_alloc_reserved_extent(trans, root, parent, root_objectid,
ref_generation, owner, ins);
return ret;
btrfs_set_header_generation(buf, trans->transid);
btrfs_tree_lock(buf);
clean_tree_block(trans, root, buf);
+
+ btrfs_set_lock_blocking(buf);
btrfs_set_buffer_uptodate(buf);
+
if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
set_extent_dirty(&root->dirty_log_pages, buf->start,
buf->start + buf->len - 1, GFP_NOFS);
buf->start + buf->len - 1, GFP_NOFS);
}
trans->blocks_used++;
+ /* this returns a buffer locked for blocking */
return buf;
}
{
u64 leaf_owner;
u64 leaf_generation;
+ struct refsort *sorted;
struct btrfs_key key;
struct btrfs_file_extent_item *fi;
int i;
int nritems;
int ret;
+ int refi = 0;
+ int slot;
BUG_ON(!btrfs_is_leaf(leaf));
nritems = btrfs_header_nritems(leaf);
leaf_owner = btrfs_header_owner(leaf);
leaf_generation = btrfs_header_generation(leaf);
+ sorted = kmalloc(sizeof(*sorted) * nritems, GFP_NOFS);
+ /* we do this loop twice. The first time we build a list
+ * of the extents we have a reference on, then we sort the list
+ * by bytenr. The second time around we actually do the
+ * extent freeing.
+ */
for (i = 0; i < nritems; i++) {
u64 disk_bytenr;
cond_resched();
btrfs_item_key_to_cpu(leaf, &key, i);
+
+ /* only extents have references, skip everything else */
if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
continue;
+
fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
+
+ /* inline extents live in the btree, they don't have refs */
if (btrfs_file_extent_type(leaf, fi) ==
BTRFS_FILE_EXTENT_INLINE)
continue;
- /*
- * FIXME make sure to insert a trans record that
- * repeats the snapshot del on crash
- */
+
disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
+
+ /* holes don't have refs */
if (disk_bytenr == 0)
continue;
+ sorted[refi].bytenr = disk_bytenr;
+ sorted[refi].slot = i;
+ refi++;
+ }
+
+ if (refi == 0)
+ goto out;
+
+ sort(sorted, refi, sizeof(struct refsort), refsort_cmp, NULL);
+
+ for (i = 0; i < refi; i++) {
+ u64 disk_bytenr;
+
+ disk_bytenr = sorted[i].bytenr;
+ slot = sorted[i].slot;
+
+ cond_resched();
+
+ btrfs_item_key_to_cpu(leaf, &key, slot);
+ if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
+ continue;
+
+ fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
+
ret = __btrfs_free_extent(trans, root, disk_bytenr,
btrfs_file_extent_disk_num_bytes(leaf, fi),
leaf->start, leaf_owner, leaf_generation,
wake_up(&root->fs_info->transaction_throttle);
cond_resched();
}
+out:
+ kfree(sorted);
return 0;
}
-static int noinline cache_drop_leaf_ref(struct btrfs_trans_handle *trans,
+static noinline int cache_drop_leaf_ref(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_leaf_ref *ref)
{
int i;
int ret;
- struct btrfs_extent_info *info = ref->extents;
+ struct btrfs_extent_info *info;
+ struct refsort *sorted;
+
+ if (ref->nritems == 0)
+ return 0;
+ sorted = kmalloc(sizeof(*sorted) * ref->nritems, GFP_NOFS);
for (i = 0; i < ref->nritems; i++) {
+ sorted[i].bytenr = ref->extents[i].bytenr;
+ sorted[i].slot = i;
+ }
+ sort(sorted, ref->nritems, sizeof(struct refsort), refsort_cmp, NULL);
+
+ /*
+ * the items in the ref were sorted when the ref was inserted
+ * into the ref cache, so this is already in order
+ */
+ for (i = 0; i < ref->nritems; i++) {
+ info = ref->extents + sorted[i].slot;
ret = __btrfs_free_extent(trans, root, info->bytenr,
info->num_bytes, ref->bytenr,
ref->owner, ref->generation,
info++;
}
+ kfree(sorted);
return 0;
}
-int drop_snap_lookup_refcount(struct btrfs_root *root, u64 start, u64 len,
- u32 *refs)
+static int drop_snap_lookup_refcount(struct btrfs_root *root, u64 start,
+ u64 len, u32 *refs)
{
int ret;
ret = btrfs_lookup_extent_ref(NULL, root, start, len, refs);
BUG_ON(ret);
-#if 0 // some debugging code in case we see problems here
+#if 0 /* some debugging code in case we see problems here */
/* if the refs count is one, it won't get increased again. But
* if the ref count is > 1, someone may be decreasing it at
* the same time we are.
free_extent_buffer(eb);
}
if (*refs == 1) {
- printk("block %llu went down to one during drop_snap\n",
- (unsigned long long)start);
+ printk(KERN_ERR "btrfs block %llu went down to one "
+ "during drop_snap\n", (unsigned long long)start);
}
}
return ret;
}
+/*
+ * this is used while deleting old snapshots, and it drops the refs
+ * on a whole subtree starting from a level 1 node.
+ *
+ * The idea is to sort all the leaf pointers, and then drop the
+ * ref on all the leaves in order. Most of the time the leaves
+ * will have ref cache entries, so no leaf IOs will be required to
+ * find the extents they have references on.
+ *
+ * For each leaf, any references it has are also dropped in order
+ *
+ * This ends up dropping the references in something close to optimal
+ * order for reading and modifying the extent allocation tree.
+ */
+static noinline int drop_level_one_refs(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct btrfs_path *path)
+{
+ u64 bytenr;
+ u64 root_owner;
+ u64 root_gen;
+ struct extent_buffer *eb = path->nodes[1];
+ struct extent_buffer *leaf;
+ struct btrfs_leaf_ref *ref;
+ struct refsort *sorted = NULL;
+ int nritems = btrfs_header_nritems(eb);
+ int ret;
+ int i;
+ int refi = 0;
+ int slot = path->slots[1];
+ u32 blocksize = btrfs_level_size(root, 0);
+ u32 refs;
+
+ if (nritems == 0)
+ goto out;
+
+ root_owner = btrfs_header_owner(eb);
+ root_gen = btrfs_header_generation(eb);
+ sorted = kmalloc(sizeof(*sorted) * nritems, GFP_NOFS);
+
+ /*
+ * step one, sort all the leaf pointers so we don't scribble
+ * randomly into the extent allocation tree
+ */
+ for (i = slot; i < nritems; i++) {
+ sorted[refi].bytenr = btrfs_node_blockptr(eb, i);
+ sorted[refi].slot = i;
+ refi++;
+ }
+
+ /*
+ * nritems won't be zero, but if we're picking up drop_snapshot
+ * after a crash, slot might be > 0, so double check things
+ * just in case.
+ */
+ if (refi == 0)
+ goto out;
+
+ sort(sorted, refi, sizeof(struct refsort), refsort_cmp, NULL);
+
+ /*
+ * the first loop frees everything the leaves point to
+ */
+ for (i = 0; i < refi; i++) {
+ u64 ptr_gen;
+
+ bytenr = sorted[i].bytenr;
+
+ /*
+ * check the reference count on this leaf. If it is > 1
+ * we just decrement it below and don't update any
+ * of the refs the leaf points to.
+ */
+ ret = drop_snap_lookup_refcount(root, bytenr, blocksize, &refs);
+ BUG_ON(ret);
+ if (refs != 1)
+ continue;
+
+ ptr_gen = btrfs_node_ptr_generation(eb, sorted[i].slot);
+
+ /*
+ * the leaf only had one reference, which means the
+ * only thing pointing to this leaf is the snapshot
+ * we're deleting. It isn't possible for the reference
+ * count to increase again later
+ *
+ * The reference cache is checked for the leaf,
+ * and if found we'll be able to drop any refs held by
+ * the leaf without needing to read it in.
+ */
+ ref = btrfs_lookup_leaf_ref(root, bytenr);
+ if (ref && ref->generation != ptr_gen) {
+ btrfs_free_leaf_ref(root, ref);
+ ref = NULL;
+ }
+ if (ref) {
+ ret = cache_drop_leaf_ref(trans, root, ref);
+ BUG_ON(ret);
+ btrfs_remove_leaf_ref(root, ref);
+ btrfs_free_leaf_ref(root, ref);
+ } else {
+ /*
+ * the leaf wasn't in the reference cache, so
+ * we have to read it.
+ */
+ leaf = read_tree_block(root, bytenr, blocksize,
+ ptr_gen);
+ ret = btrfs_drop_leaf_ref(trans, root, leaf);
+ BUG_ON(ret);
+ free_extent_buffer(leaf);
+ }
+ atomic_inc(&root->fs_info->throttle_gen);
+ wake_up(&root->fs_info->transaction_throttle);
+ cond_resched();
+ }
+
+ /*
+ * run through the loop again to free the refs on the leaves.
+ * This is faster than doing it in the loop above because
+ * the leaves are likely to be clustered together. We end up
+ * working in nice chunks on the extent allocation tree.
+ */
+ for (i = 0; i < refi; i++) {
+ bytenr = sorted[i].bytenr;
+ ret = __btrfs_free_extent(trans, root, bytenr,
+ blocksize, eb->start,
+ root_owner, root_gen, 0, 1);
+ BUG_ON(ret);
+
+ atomic_inc(&root->fs_info->throttle_gen);
+ wake_up(&root->fs_info->transaction_throttle);
+ cond_resched();
+ }
+out:
+ kfree(sorted);
+
+ /*
+ * update the path to show we've processed the entire level 1
+ * node. This will get saved into the root's drop_snapshot_progress
+ * field so these drops are not repeated again if this transaction
+ * commits.
+ */
+ path->slots[1] = nritems;
+ return 0;
+}
+
/*
* helper function for drop_snapshot, this walks down the tree dropping ref
* counts as it goes.
*/
-static int noinline walk_down_tree(struct btrfs_trans_handle *trans,
+static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path, int *level)
{
struct extent_buffer *next;
struct extent_buffer *cur;
struct extent_buffer *parent;
- struct btrfs_leaf_ref *ref;
u32 blocksize;
int ret;
u32 refs;
/*
* walk down to the last node level and free all the leaves
*/
- while(*level >= 0) {
+ while (*level >= 0) {
WARN_ON(*level < 0);
WARN_ON(*level >= BTRFS_MAX_LEVEL);
cur = path->nodes[*level];
if (path->slots[*level] >=
btrfs_header_nritems(cur))
break;
+
+ /* the new code goes down to level 1 and does all the
+ * leaves pointed to that node in bulk. So, this check
+ * for level 0 will always be false.
+ *
+ * But, the disk format allows the drop_snapshot_progress
+ * field in the root to leave things in a state where
+ * a leaf will need cleaning up here. If someone crashes
+ * with the old code and then boots with the new code,
+ * we might find a leaf here.
+ */
if (*level == 0) {
ret = btrfs_drop_leaf_ref(trans, root, cur);
BUG_ON(ret);
break;
}
+
+ /*
+ * once we get to level one, process the whole node
+ * at once, including everything below it.
+ */
+ if (*level == 1) {
+ ret = drop_level_one_refs(trans, root, path);
+ BUG_ON(ret);
+ break;
+ }
+
bytenr = btrfs_node_blockptr(cur, path->slots[*level]);
ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]);
blocksize = btrfs_level_size(root, *level - 1);
ret = drop_snap_lookup_refcount(root, bytenr, blocksize, &refs);
BUG_ON(ret);
+
+ /*
+ * if there is more than one reference, we don't need
+ * to read that node to drop any references it has. We
+ * just drop the ref we hold on that node and move on to the
+ * next slot in this level.
+ */
if (refs != 1) {
parent = path->nodes[*level];
root_owner = btrfs_header_owner(parent);
continue;
}
+
/*
- * at this point, we have a single ref, and since the
- * only place referencing this extent is a dead root
- * the reference count should never go higher.
- * So, we don't need to check it again
+ * we need to keep freeing things in the next level down.
+ * read the block and loop around to process it
*/
- if (*level == 1) {
- ref = btrfs_lookup_leaf_ref(root, bytenr);
- if (ref && ref->generation != ptr_gen) {
- btrfs_free_leaf_ref(root, ref);
- ref = NULL;
- }
- if (ref) {
- ret = cache_drop_leaf_ref(trans, root, ref);
- BUG_ON(ret);
- btrfs_remove_leaf_ref(root, ref);
- btrfs_free_leaf_ref(root, ref);
- *level = 0;
- break;
- }
- if (printk_ratelimit()) {
- printk("leaf ref miss for bytenr %llu\n",
- (unsigned long long)bytenr);
- }
- }
- next = btrfs_find_tree_block(root, bytenr, blocksize);
- if (!next || !btrfs_buffer_uptodate(next, ptr_gen)) {
- free_extent_buffer(next);
-
- next = read_tree_block(root, bytenr, blocksize,
- ptr_gen);
- cond_resched();
-#if 0
- /*
- * this is a debugging check and can go away
- * the ref should never go all the way down to 1
- * at this point
- */
- ret = lookup_extent_ref(NULL, root, bytenr, blocksize,
- &refs);
- BUG_ON(ret);
- WARN_ON(refs != 1);
-#endif
- }
+ next = read_tree_block(root, bytenr, blocksize, ptr_gen);
WARN_ON(*level <= 0);
if (path->nodes[*level-1])
free_extent_buffer(path->nodes[*level-1]);
root_owner = btrfs_header_owner(parent);
root_gen = btrfs_header_generation(parent);
+ /*
+ * cleanup and free the reference on the last node
+ * we processed
+ */
ret = __btrfs_free_extent(trans, root, bytenr, blocksize,
parent->start, root_owner, root_gen,
*level, 1);
free_extent_buffer(path->nodes[*level]);
path->nodes[*level] = NULL;
+
*level += 1;
BUG_ON(ret);
* walk_down_tree. The main difference is that it checks reference
* counts while tree blocks are locked.
*/
-static int noinline walk_down_subtree(struct btrfs_trans_handle *trans,
+static noinline int walk_down_subtree(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path, int *level)
{
next = read_tree_block(root, bytenr, blocksize, ptr_gen);
btrfs_tree_lock(next);
+ btrfs_set_lock_blocking(next);
ret = btrfs_lookup_extent_ref(trans, root, bytenr, blocksize,
&refs);
* to find the first node higher up where we haven't yet gone through
* all the slots
*/
-static int noinline walk_up_tree(struct btrfs_trans_handle *trans,
+static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path,
int *level, int max_level)
if (slot < btrfs_header_nritems(path->nodes[i]) - 1) {
struct extent_buffer *node;
struct btrfs_disk_key disk_key;
+
+ /*
+ * there is more work to do in this level.
+ * Update the drop_progress marker to reflect
+ * the work we've done so far, and then bump
+ * the slot number
+ */
node = path->nodes[i];
path->slots[i]++;
*level = i;
return 0;
} else {
struct extent_buffer *parent;
+
+ /*
+ * this whole node is done, free our reference
+ * on it and go up one level
+ */
if (path->nodes[*level] == root->node)
parent = path->nodes[*level];
else
}
}
}
- while(1) {
+ while (1) {
wret = walk_down_tree(trans, root, path, &level);
if (wret > 0)
break;
return min(last, start + nr - 1);
}
-static int noinline relocate_inode_pages(struct inode *inode, u64 start,
+static noinline int relocate_inode_pages(struct inode *inode, u64 start,
u64 len)
{
u64 page_start;
}
set_page_extent_mapped(page);
- btrfs_set_extent_delalloc(inode, page_start, page_end);
if (i == first_index)
set_extent_bits(io_tree, page_start, page_end,
EXTENT_BOUNDARY, GFP_NOFS);
+ btrfs_set_extent_delalloc(inode, page_start, page_end);
set_page_dirty(page);
total_dirty++;
return ret;
}
-static int noinline relocate_data_extent(struct inode *reloc_inode,
+static noinline int relocate_data_extent(struct inode *reloc_inode,
struct btrfs_key *extent_key,
u64 offset)
{
root_objectid == BTRFS_EXTENT_TREE_OBJECTID ||
root_objectid == BTRFS_CHUNK_TREE_OBJECTID ||
root_objectid == BTRFS_DEV_TREE_OBJECTID ||
- root_objectid == BTRFS_TREE_LOG_OBJECTID)
+ root_objectid == BTRFS_TREE_LOG_OBJECTID ||
+ root_objectid == BTRFS_CSUM_TREE_OBJECTID)
return 1;
return 0;
}
-static int noinline __next_ref_path(struct btrfs_trans_handle *trans,
+static noinline int __next_ref_path(struct btrfs_trans_handle *trans,
struct btrfs_root *extent_root,
struct btrfs_ref_path *ref_path,
int first_time)
if (level < ref_path->lowest_level)
break;
- if (level >= 0) {
+ if (level >= 0)
bytenr = ref_path->nodes[level];
- } else {
+ else
bytenr = ref_path->extent_start;
- }
BUG_ON(bytenr == 0);
parent = ref_path->nodes[level + 1];
level = ref_path->current_level;
while (level < BTRFS_MAX_LEVEL - 1) {
u64 ref_objectid;
- if (level >= 0) {
+
+ if (level >= 0)
bytenr = ref_path->nodes[level];
- } else {
+ else
bytenr = ref_path->extent_start;
- }
+
BUG_ON(bytenr == 0);
key.objectid = bytenr;
return __next_ref_path(trans, extent_root, ref_path, 0);
}
-static int noinline get_new_locations(struct inode *reloc_inode,
+static noinline int get_new_locations(struct inode *reloc_inode,
struct btrfs_key *extent_key,
u64 offset, int no_fragment,
struct disk_extent **extents,
path->slots[0]++;
}
- WARN_ON(cur_pos + offset > last_byte);
+ BUG_ON(cur_pos + offset > last_byte);
if (cur_pos + offset < last_byte) {
ret = -ENOENT;
goto out;
return ret;
}
-static int noinline replace_one_extent(struct btrfs_trans_handle *trans,
+static noinline int replace_one_extent(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path,
struct btrfs_key *extent_key,
u64 lock_end = 0;
u64 num_bytes;
u64 ext_offset;
- u64 first_pos;
+ u64 search_end = (u64)-1;
u32 nritems;
int nr_scaned = 0;
int extent_locked = 0;
int ret;
memcpy(&key, leaf_key, sizeof(key));
- first_pos = INT_LIMIT(loff_t) - extent_key->offset;
if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS) {
if (key.objectid < ref_path->owner_objectid ||
(key.objectid == ref_path->owner_objectid &&
if ((key.objectid > ref_path->owner_objectid) ||
(key.objectid == ref_path->owner_objectid &&
key.type > BTRFS_EXTENT_DATA_KEY) ||
- (key.offset >= first_pos + extent_key->offset))
+ key.offset >= search_end)
break;
}
num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
ext_offset = btrfs_file_extent_offset(leaf, fi);
- if (first_pos > key.offset - ext_offset)
- first_pos = key.offset - ext_offset;
+ if (search_end == (u64)-1) {
+ search_end = key.offset - ext_offset +
+ btrfs_file_extent_ram_bytes(leaf, fi);
+ }
if (!extent_locked) {
lock_start = key.offset;
}
skip:
if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS &&
- key.offset >= first_pos + extent_key->offset)
+ key.offset >= search_end)
break;
cond_resched();
ref->bytenr = buf->start;
ref->owner = btrfs_header_owner(buf);
ref->generation = btrfs_header_generation(buf);
+
ret = btrfs_add_leaf_ref(root, ref, 0);
WARN_ON(ret);
btrfs_free_leaf_ref(root, ref);
return 0;
}
-static int noinline invalidate_extent_cache(struct btrfs_root *root,
+static noinline int invalidate_extent_cache(struct btrfs_root *root,
struct extent_buffer *leaf,
struct btrfs_block_group_cache *group,
struct btrfs_root *target_root)
return 0;
}
-static int noinline replace_extents_in_leaf(struct btrfs_trans_handle *trans,
+static noinline int replace_extents_in_leaf(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct extent_buffer *leaf,
struct btrfs_block_group_cache *group,
return 0;
}
-static int noinline init_reloc_tree(struct btrfs_trans_handle *trans,
+static noinline int init_reloc_tree(struct btrfs_trans_handle *trans,
struct btrfs_root *root)
{
struct btrfs_root *reloc_root;
* tree blocks are shared between reloc trees, so they are also shared
* between subvols.
*/
-static int noinline relocate_one_path(struct btrfs_trans_handle *trans,
+static noinline int relocate_one_path(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path,
struct btrfs_key *first_key,
return 0;
}
-static int noinline relocate_tree_block(struct btrfs_trans_handle *trans,
+static noinline int relocate_tree_block(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path,
struct btrfs_key *first_key,
return 0;
}
-static int noinline del_extent_zero(struct btrfs_trans_handle *trans,
+static noinline int del_extent_zero(struct btrfs_trans_handle *trans,
struct btrfs_root *extent_root,
struct btrfs_path *path,
struct btrfs_key *extent_key)
return ret;
}
-static struct btrfs_root noinline *read_ref_root(struct btrfs_fs_info *fs_info,
+static noinline struct btrfs_root *read_ref_root(struct btrfs_fs_info *fs_info,
struct btrfs_ref_path *ref_path)
{
struct btrfs_key root_key;
return btrfs_read_fs_root_no_name(fs_info, &root_key);
}
-static int noinline relocate_one_extent(struct btrfs_root *extent_root,
+static noinline int relocate_one_extent(struct btrfs_root *extent_root,
struct btrfs_path *path,
struct btrfs_key *extent_key,
struct btrfs_block_group_cache *group,
ref_path = kmalloc(sizeof(*ref_path), GFP_NOFS);
if (!ref_path) {
- ret = -ENOMEM;
- goto out;
+ ret = -ENOMEM;
+ goto out;
}
for (loops = 0; ; loops++) {
prev_block = block_start;
}
- if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID &&
- pass >= 2) {
+ btrfs_record_root_in_trans(found_root);
+ if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
+ /*
+ * try to update data extent references while
+ * keeping metadata shared between snapshots.
+ */
+ if (pass == 1) {
+ ret = relocate_one_path(trans, found_root,
+ path, &first_key, ref_path,
+ group, reloc_inode);
+ if (ret < 0)
+ goto out;
+ continue;
+ }
/*
* use fallback method to process the remaining
* references.
if (ret)
goto out;
}
- btrfs_record_root_in_trans(found_root);
ret = replace_one_extent(trans, found_root,
path, extent_key,
&first_key, ref_path,
new_extents, nr_extents);
- if (ret < 0)
- goto out;
- continue;
- }
-
- btrfs_record_root_in_trans(found_root);
- if (ref_path->owner_objectid < BTRFS_FIRST_FREE_OBJECTID) {
+ } else {
ret = relocate_tree_block(trans, found_root, path,
&first_key, ref_path);
- } else {
- /*
- * try to update data extent references while
- * keeping metadata shared between snapshots.
- */
- ret = relocate_one_path(trans, found_root, path,
- &first_key, ref_path,
- group, reloc_inode);
}
if (ret < 0)
goto out;
return flags;
}
-int __alloc_chunk_for_shrink(struct btrfs_root *root,
+static int __alloc_chunk_for_shrink(struct btrfs_root *root,
struct btrfs_block_group_cache *shrink_block_group,
int force)
{
btrfs_set_inode_generation(leaf, item, 1);
btrfs_set_inode_size(leaf, item, size);
btrfs_set_inode_mode(leaf, item, S_IFREG | 0600);
- btrfs_set_inode_flags(leaf, item, BTRFS_INODE_NODATASUM |
- BTRFS_INODE_NOCOMPRESS);
+ btrfs_set_inode_flags(leaf, item, BTRFS_INODE_NOCOMPRESS);
btrfs_mark_buffer_dirty(leaf);
btrfs_release_path(root, path);
out:
return ret;
}
-static struct inode noinline *create_reloc_inode(struct btrfs_fs_info *fs_info,
+static noinline struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info,
struct btrfs_block_group_cache *group)
{
struct inode *inode = NULL;
} else {
BUG_ON(1);
}
+ BTRFS_I(inode)->index_cnt = group->key.objectid;
err = btrfs_orphan_add(trans, inode);
out:
return inode;
}
+int btrfs_reloc_clone_csums(struct inode *inode, u64 file_pos, u64 len)
+{
+
+ struct btrfs_ordered_sum *sums;
+ struct btrfs_sector_sum *sector_sum;
+ struct btrfs_ordered_extent *ordered;
+ struct btrfs_root *root = BTRFS_I(inode)->root;
+ struct list_head list;
+ size_t offset;
+ int ret;
+ u64 disk_bytenr;
+
+ INIT_LIST_HEAD(&list);
+
+ ordered = btrfs_lookup_ordered_extent(inode, file_pos);
+ BUG_ON(ordered->file_offset != file_pos || ordered->len != len);
+
+ disk_bytenr = file_pos + BTRFS_I(inode)->index_cnt;
+ ret = btrfs_lookup_csums_range(root->fs_info->csum_root, disk_bytenr,
+ disk_bytenr + len - 1, &list);
+
+ while (!list_empty(&list)) {
+ sums = list_entry(list.next, struct btrfs_ordered_sum, list);
+ list_del_init(&sums->list);
+
+ sector_sum = sums->sums;
+ sums->bytenr = ordered->start;
+
+ offset = 0;
+ while (offset < sums->len) {
+ sector_sum->bytenr += ordered->start - disk_bytenr;
+ sector_sum++;
+ offset += root->sectorsize;
+ }
+
+ btrfs_add_ordered_sum(inode, ordered, sums);
+ }
+ btrfs_put_ordered_extent(ordered);
+ return 0;
+}
+
int btrfs_relocate_block_group(struct btrfs_root *root, u64 group_start)
{
struct btrfs_trans_handle *trans;
block_group = btrfs_lookup_block_group(info, group_start);
BUG_ON(!block_group);
- printk("btrfs relocating block group %llu flags %llu\n",
+ printk(KERN_INFO "btrfs relocating block group %llu flags %llu\n",
(unsigned long long)block_group->key.objectid,
(unsigned long long)block_group->flags);
btrfs_remove_leaf_refs(info->tree_root, (u64)-1, 1);
mutex_unlock(&root->fs_info->cleaner_mutex);
- while(1) {
+ while (1) {
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0)
goto out;
if (pass == 0) {
btrfs_wait_ordered_range(reloc_inode, 0, (u64)-1);
invalidate_mapping_pages(reloc_inode->i_mapping, 0, -1);
- WARN_ON(reloc_inode->i_mapping->nrpages);
}
if (total_found > 0) {
- printk("btrfs found %llu extents in pass %d\n",
+ printk(KERN_INFO "btrfs found %llu extents in pass %d\n",
(unsigned long long)total_found, pass);
pass++;
if (total_found == skipped && pass > 2) {
WARN_ON(block_group->reserved > 0);
WARN_ON(btrfs_block_group_used(&block_group->item) > 0);
spin_unlock(&block_group->lock);
+ put_block_group(block_group);
ret = 0;
out:
btrfs_free_path(path);
return ret;
}
-int find_first_block_group(struct btrfs_root *root, struct btrfs_path *path,
- struct btrfs_key *key)
+static int find_first_block_group(struct btrfs_root *root,
+ struct btrfs_path *path, struct btrfs_key *key)
{
int ret = 0;
struct btrfs_key found_key;
if (ret < 0)
goto out;
- while(1) {
+ while (1) {
slot = path->slots[0];
leaf = path->nodes[0];
if (slot >= btrfs_header_nritems(leaf)) {
down_write(&block_group->space_info->groups_sem);
list_del(&block_group->list);
up_write(&block_group->space_info->groups_sem);
+
+ WARN_ON(atomic_read(&block_group->count) != 1);
kfree(block_group);
spin_lock(&info->block_group_cache_lock);
if (!path)
return -ENOMEM;
- while(1) {
+ while (1) {
ret = find_first_block_group(root, path, &key);
if (ret > 0) {
ret = 0;
break;
}
+ atomic_set(&cache->count, 1);
spin_lock_init(&cache->lock);
mutex_init(&cache->alloc_mutex);
+ mutex_init(&cache->cache_mutex);
INIT_LIST_HEAD(&cache->list);
read_extent_buffer(leaf, &cache->item,
btrfs_item_ptr_offset(leaf, path->slots[0]),
cache->key.objectid = chunk_offset;
cache->key.offset = size;
+ cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
+ atomic_set(&cache->count, 1);
spin_lock_init(&cache->lock);
mutex_init(&cache->alloc_mutex);
+ mutex_init(&cache->cache_mutex);
INIT_LIST_HEAD(&cache->list);
- btrfs_set_key_type(&cache->key, BTRFS_BLOCK_GROUP_ITEM_KEY);
btrfs_set_block_group_used(&cache->item, bytes_used);
btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
path = btrfs_alloc_path();
BUG_ON(!path);
- btrfs_remove_free_space_cache(block_group);
+ spin_lock(&root->fs_info->block_group_cache_lock);
rb_erase(&block_group->cache_node,
&root->fs_info->block_group_cache_tree);
+ spin_unlock(&root->fs_info->block_group_cache_lock);
+ btrfs_remove_free_space_cache(block_group);
down_write(&block_group->space_info->groups_sem);
list_del(&block_group->list);
up_write(&block_group->space_info->groups_sem);
spin_unlock(&block_group->space_info->lock);
block_group->space_info->full = 0;
- /*
- memset(shrink_block_group, 0, sizeof(*shrink_block_group));
- kfree(shrink_block_group);
- */
+ put_block_group(block_group);
+ put_block_group(block_group);
ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
if (ret > 0)