ret = entry;
n = n->rb_left;
} else if (offset > entry->offset) {
- if (contains &&
- (entry->offset + entry->bytes - 1) >= offset) {
+ if ((entry->offset + entry->bytes - 1) >= offset &&
+ bytes <= entry->bytes) {
ret = entry;
break;
}
return ret;
}
-int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
- u64 offset, u64 bytes)
+static int __btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
+ u64 offset, u64 bytes)
{
struct btrfs_free_space *right_info;
struct btrfs_free_space *left_info;
* are adding, if there is remove that struct and add a new one to
* cover the entire range
*/
- spin_lock(&block_group->lock);
-
right_info = tree_search_offset(&block_group->free_space_offset,
offset+bytes, 0, 1);
left_info = tree_search_offset(&block_group->free_space_offset,
if (ret)
kfree(info);
out:
- spin_unlock(&block_group->lock);
if (ret) {
printk(KERN_ERR "btrfs: unable to add free space :%d\n", ret);
if (ret == -EEXIST)
return ret;
}
-int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
- u64 offset, u64 bytes)
+static int
+__btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
+ u64 offset, u64 bytes)
{
struct btrfs_free_space *info;
int ret = 0;
- spin_lock(&block_group->lock);
info = tree_search_offset(&block_group->free_space_offset, offset, 0,
1);
ret = link_free_space(block_group, info);
BUG_ON(ret);
+ } else if (info && info->offset < offset &&
+ info->offset + info->bytes >= offset + bytes) {
+ u64 old_start = info->offset;
+ /*
+ * we're freeing space in the middle of the info,
+ * this can happen during tree log replay
+ *
+ * first unlink the old info and then
+ * insert it again after the hole we're creating
+ */
+ unlink_free_space(block_group, info);
+ if (offset + bytes < info->offset + info->bytes) {
+ u64 old_end = info->offset + info->bytes;
+
+ info->offset = offset + bytes;
+ info->bytes = old_end - info->offset;
+ ret = link_free_space(block_group, info);
+ BUG_ON(ret);
+ } else {
+ /* the hole we're creating ends at the end
+ * of the info struct, just free the info
+ */
+ kfree(info);
+ }
+
+ /* step two, insert a new info struct to cover anything
+ * before the hole
+ */
+ ret = __btrfs_add_free_space(block_group, old_start,
+ offset - old_start);
+ BUG_ON(ret);
} else {
WARN_ON(1);
}
out:
- spin_unlock(&block_group->lock);
+ return ret;
+}
+
+int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
+ u64 offset, u64 bytes)
+{
+ int ret;
+ struct btrfs_free_space *sp;
+
+ mutex_lock(&block_group->alloc_mutex);
+ ret = __btrfs_add_free_space(block_group, offset, bytes);
+ sp = tree_search_offset(&block_group->free_space_offset, offset, 0, 1);
+ BUG_ON(!sp);
+ mutex_unlock(&block_group->alloc_mutex);
+
+ return ret;
+}
+
+int btrfs_add_free_space_lock(struct btrfs_block_group_cache *block_group,
+ u64 offset, u64 bytes)
+{
+ int ret;
+ struct btrfs_free_space *sp;
+
+ ret = __btrfs_add_free_space(block_group, offset, bytes);
+ sp = tree_search_offset(&block_group->free_space_offset, offset, 0, 1);
+ BUG_ON(!sp);
+
+ return ret;
+}
+
+int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
+ u64 offset, u64 bytes)
+{
+ int ret = 0;
+
+ mutex_lock(&block_group->alloc_mutex);
+ ret = __btrfs_remove_free_space(block_group, offset, bytes);
+ mutex_unlock(&block_group->alloc_mutex);
+
+ return ret;
+}
+
+int btrfs_remove_free_space_lock(struct btrfs_block_group_cache *block_group,
+ u64 offset, u64 bytes)
+{
+ int ret;
+
+ ret = __btrfs_remove_free_space(block_group, offset, bytes);
+
return ret;
}
struct btrfs_free_space *info;
struct rb_node *node;
- spin_lock(&block_group->lock);
+ mutex_lock(&block_group->alloc_mutex);
while ((node = rb_last(&block_group->free_space_bytes)) != NULL) {
info = rb_entry(node, struct btrfs_free_space, bytes_index);
unlink_free_space(block_group, info);
kfree(info);
if (need_resched()) {
- spin_unlock(&block_group->lock);
+ mutex_unlock(&block_group->alloc_mutex);
cond_resched();
- spin_lock(&block_group->lock);
+ mutex_lock(&block_group->alloc_mutex);
}
}
- spin_unlock(&block_group->lock);
+ mutex_unlock(&block_group->alloc_mutex);
}
struct btrfs_free_space *btrfs_find_free_space_offset(struct
{
struct btrfs_free_space *ret;
- spin_lock(&block_group->lock);
+ mutex_lock(&block_group->alloc_mutex);
ret = tree_search_offset(&block_group->free_space_offset, offset,
bytes, 0);
- spin_unlock(&block_group->lock);
+ mutex_unlock(&block_group->alloc_mutex);
return ret;
}
{
struct btrfs_free_space *ret;
- spin_lock(&block_group->lock);
+ mutex_lock(&block_group->alloc_mutex);
ret = tree_search_bytes(&block_group->free_space_bytes, offset, bytes);
- spin_unlock(&block_group->lock);
+ mutex_unlock(&block_group->alloc_mutex);
return ret;
}
*block_group, u64 offset,
u64 bytes)
{
- struct btrfs_free_space *ret;
+ struct btrfs_free_space *ret = NULL;
- spin_lock(&block_group->lock);
ret = tree_search_offset(&block_group->free_space_offset, offset,
bytes, 0);
if (!ret)
ret = tree_search_bytes(&block_group->free_space_bytes,
offset, bytes);
- spin_unlock(&block_group->lock);
-
return ret;
}