* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 021110-1307, USA.
*/
-
#include <linux/sched.h>
-#include <linux/crc32c.h>
#include <linux/pagemap.h>
#include "hash.h"
+#include "crc32c.h"
#include "ctree.h"
#include "disk-io.h"
#include "print-tree.h"
#include "transaction.h"
+#include "volumes.h"
-#define BLOCK_GROUP_DATA EXTENT_WRITEBACK
+#define BLOCK_GROUP_DATA EXTENT_WRITEBACK
#define BLOCK_GROUP_METADATA EXTENT_UPTODATE
+#define BLOCK_GROUP_SYSTEM EXTENT_NEW
+
#define BLOCK_GROUP_DIRTY EXTENT_DIRTY
static int finish_current_insert(struct btrfs_trans_handle *trans, struct
btrfs_root *extent_root);
static int del_pending_extents(struct btrfs_trans_handle *trans, struct
btrfs_root *extent_root);
-static int find_previous_extent(struct btrfs_root *root,
- struct btrfs_path *path)
-{
- struct btrfs_key found_key;
- struct extent_buffer *leaf;
- int ret;
- while(1) {
- if (path->slots[0] == 0) {
- ret = btrfs_prev_leaf(root, path);
- if (ret != 0)
- return ret;
- } else {
- path->slots[0]--;
- }
- leaf = path->nodes[0];
- btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
- if (found_key.type == BTRFS_EXTENT_ITEM_KEY)
- return 0;
- }
- return 1;
-}
static int cache_block_group(struct btrfs_root *root,
struct btrfs_block_group_cache *block_group)
int ret;
struct btrfs_key key;
struct extent_buffer *leaf;
- struct extent_map_tree *free_space_cache;
+ struct extent_io_tree *free_space_cache;
int slot;
u64 last = 0;
u64 hole_size;
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0)
return ret;
- ret = find_previous_extent(root, path);
+ ret = btrfs_previous_item(root, path, 0, BTRFS_EXTENT_ITEM_KEY);
if (ret < 0)
return ret;
if (ret == 0) {
btrfs_fs_info *info,
u64 bytenr)
{
- struct extent_map_tree *block_group_cache;
+ struct extent_io_tree *block_group_cache;
struct btrfs_block_group_cache *block_group = NULL;
u64 ptr;
u64 start;
block_group_cache = &info->block_group_cache;
ret = find_first_extent_bit(block_group_cache,
bytenr, &start, &end,
- BLOCK_GROUP_DATA | BLOCK_GROUP_METADATA);
+ BLOCK_GROUP_DATA | BLOCK_GROUP_METADATA |
+ BLOCK_GROUP_SYSTEM);
if (ret) {
return NULL;
}
return block_group;
return NULL;
}
-static u64 noinline find_search_start(struct btrfs_root *root,
+
+static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
+{
+ return (cache->flags & bits) == bits;
+}
+
+static int noinline find_search_start(struct btrfs_root *root,
struct btrfs_block_group_cache **cache_ret,
- u64 search_start, int num, int data)
+ u64 *start_ret, int num, int data)
{
int ret;
struct btrfs_block_group_cache *cache = *cache_ret;
+ struct extent_io_tree *free_space_cache;
+ struct extent_state *state;
u64 last;
u64 start = 0;
- u64 end = 0;
u64 cache_miss = 0;
u64 total_fs_bytes;
+ u64 search_start = *start_ret;
int wrapped = 0;
- if (!cache) {
+ if (!cache)
goto out;
- }
total_fs_bytes = btrfs_super_total_bytes(&root->fs_info->super_copy);
+ free_space_cache = &root->fs_info->free_space_cache;
+
again:
ret = cache_block_group(root, cache);
if (ret)
goto out;
last = max(search_start, cache->key.objectid);
+ if (!block_group_bits(cache, data)) {
+ goto new_group;
+ }
+ spin_lock_irq(&free_space_cache->lock);
+ state = find_first_extent_bit_state(free_space_cache, last, EXTENT_DIRTY);
while(1) {
- ret = find_first_extent_bit(&root->fs_info->free_space_cache,
- last, &start, &end, EXTENT_DIRTY);
- if (ret) {
+ if (!state) {
if (!cache_miss)
cache_miss = last;
+ spin_unlock_irq(&free_space_cache->lock);
goto new_group;
}
- start = max(last, start);
- last = end + 1;
+ start = max(last, state->start);
+ last = state->end + 1;
if (last - start < num) {
if (last == cache->key.objectid + cache->key.offset)
cache_miss = start;
+ do {
+ state = extent_state_next(state);
+ } while(state && !(state->state & EXTENT_DIRTY));
continue;
}
- if (data != BTRFS_BLOCK_GROUP_MIXED &&
- start + num > cache->key.objectid + cache->key.offset)
+ spin_unlock_irq(&free_space_cache->lock);
+ if (start + num > cache->key.objectid + cache->key.offset)
goto new_group;
if (start + num > total_fs_bytes)
goto new_group;
- return start;
+ if (!block_group_bits(cache, data)) {
+ printk("block group bits don't match %Lu %d\n", cache->flags, data);
+ }
+ *start_ret = start;
+ return 0;
}
out:
cache = btrfs_lookup_block_group(root->fs_info, search_start);
if (!cache) {
- printk("Unable to find block group for %Lu\n",
- search_start);
+ printk("Unable to find block group for %Lu\n", search_start);
WARN_ON(1);
- return search_start;
}
- return search_start;
+ return -ENOSPC;
new_group:
last = cache->key.objectid + cache->key.offset;
if (!wrapped) {
wrapped = 1;
last = search_start;
- data = BTRFS_BLOCK_GROUP_MIXED;
goto wrapped;
}
goto out;
return num;
}
+static int block_group_state_bits(u64 flags)
+{
+ int bits = 0;
+ if (flags & BTRFS_BLOCK_GROUP_DATA)
+ bits |= BLOCK_GROUP_DATA;
+ if (flags & BTRFS_BLOCK_GROUP_METADATA)
+ bits |= BLOCK_GROUP_METADATA;
+ if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
+ bits |= BLOCK_GROUP_SYSTEM;
+ return bits;
+}
+
struct btrfs_block_group_cache *btrfs_find_block_group(struct btrfs_root *root,
struct btrfs_block_group_cache
*hint, u64 search_start,
int data, int owner)
{
struct btrfs_block_group_cache *cache;
- struct extent_map_tree *block_group_cache;
+ struct extent_io_tree *block_group_cache;
struct btrfs_block_group_cache *found_group = NULL;
struct btrfs_fs_info *info = root->fs_info;
u64 used;
int ret;
int full_search = 0;
int factor = 8;
- int data_swap = 0;
block_group_cache = &info->block_group_cache;
total_fs_bytes = btrfs_super_total_bytes(&root->fs_info->super_copy);
if (!owner)
factor = 8;
- if (data == BTRFS_BLOCK_GROUP_MIXED) {
- bit = BLOCK_GROUP_DATA | BLOCK_GROUP_METADATA;
- factor = 10;
- } else if (data)
- bit = BLOCK_GROUP_DATA;
- else
- bit = BLOCK_GROUP_METADATA;
+ bit = block_group_state_bits(data);
if (search_start && search_start < total_fs_bytes) {
struct btrfs_block_group_cache *shint;
shint = btrfs_lookup_block_group(info, search_start);
- if (shint && (shint->data == data ||
- shint->data == BTRFS_BLOCK_GROUP_MIXED)) {
+ if (shint && block_group_bits(shint, data)) {
used = btrfs_block_group_used(&shint->item);
if (used + shint->pinned <
div_factor(shint->key.offset, factor)) {
}
}
}
- if (hint && hint->key.objectid < total_fs_bytes &&
- (hint->data == data || hint->data == BTRFS_BLOCK_GROUP_MIXED)) {
+ if (hint && block_group_bits(hint, data) &&
+ hint->key.objectid < total_fs_bytes) {
used = btrfs_block_group_used(&hint->item);
if (used + hint->pinned <
div_factor(hint->key.offset, factor)) {
if (cache->key.objectid > total_fs_bytes)
break;
- if (full_search)
- free_check = cache->key.offset;
- else
- free_check = div_factor(cache->key.offset, factor);
- if (used + cache->pinned < free_check) {
- found_group = cache;
- goto found;
+ if (block_group_bits(cache, data)) {
+ if (full_search)
+ free_check = cache->key.offset;
+ else
+ free_check = div_factor(cache->key.offset,
+ factor);
+
+ if (used + cache->pinned < free_check) {
+ found_group = cache;
+ goto found;
+ }
}
cond_resched();
}
full_search = 1;
goto again;
}
- if (!data_swap) {
- data_swap = 1;
- bit = BLOCK_GROUP_DATA | BLOCK_GROUP_METADATA;
- last = search_start;
- goto again;
- }
found:
return found_group;
}
u32 high_crc = ~(u32)0;
u32 low_crc = ~(u32)0;
__le64 lenum;
-
lenum = cpu_to_le64(root_objectid);
- high_crc = crc32c(high_crc, &lenum, sizeof(lenum));
+ high_crc = btrfs_crc32c(high_crc, &lenum, sizeof(lenum));
lenum = cpu_to_le64(ref_generation);
- low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
-
-#if 0
- lenum = cpu_to_le64(owner);
- low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
- lenum = cpu_to_le64(owner_offset);
- low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
-#endif
+ low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
+ if (owner >= BTRFS_FIRST_FREE_OBJECTID) {
+ lenum = cpu_to_le64(owner);
+ low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
+ lenum = cpu_to_le64(owner_offset);
+ low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
+ }
return ((u64)high_crc << 32) | (u64)low_crc;
}
if (!path)
return -ENOMEM;
- path->reada = 0;
+ path->reada = 1;
key.objectid = bytenr;
btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
key.offset = num_bytes;
btrfs_release_path(root->fs_info->extent_root, path);
- path->reada = 0;
+ path->reada = 1;
ret = btrfs_insert_extent_backref(trans, root->fs_info->extent_root,
path, bytenr, root_objectid,
ref_generation, owner, owner_offset);
WARN_ON(num_bytes < root->sectorsize);
path = btrfs_alloc_path();
- path->reada = 0;
+ path->reada = 1;
key.objectid = bytenr;
key.offset = num_bytes;
btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
struct btrfs_root *root)
{
- struct extent_map_tree *block_group_cache;
+ struct extent_io_tree *block_group_cache;
struct btrfs_block_group_cache *cache;
int ret;
int err = 0;
ret = get_state_private(block_group_cache, start, &ptr);
if (ret)
break;
-
cache = (struct btrfs_block_group_cache *)(unsigned long)ptr;
err = write_one_cache_group(trans, root,
path, cache);
return werr;
}
+static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
+ u64 flags)
+{
+ struct list_head *head = &info->space_info;
+ struct list_head *cur;
+ struct btrfs_space_info *found;
+ list_for_each(cur, head) {
+ found = list_entry(cur, struct btrfs_space_info, list);
+ if (found->flags == flags)
+ return found;
+ }
+ return NULL;
+
+}
+
+static int update_space_info(struct btrfs_fs_info *info, u64 flags,
+ u64 total_bytes, u64 bytes_used,
+ struct btrfs_space_info **space_info)
+{
+ struct btrfs_space_info *found;
+
+ found = __find_space_info(info, flags);
+ if (found) {
+ found->total_bytes += total_bytes;
+ found->bytes_used += bytes_used;
+ WARN_ON(found->total_bytes < found->bytes_used);
+ *space_info = found;
+ return 0;
+ }
+ found = kmalloc(sizeof(*found), GFP_NOFS);
+ if (!found)
+ return -ENOMEM;
+
+ list_add(&found->list, &info->space_info);
+ found->flags = flags;
+ found->total_bytes = total_bytes;
+ found->bytes_used = bytes_used;
+ found->bytes_pinned = 0;
+ found->full = 0;
+ *space_info = found;
+ return 0;
+}
+
+static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
+{
+ u64 extra_flags = flags & (BTRFS_BLOCK_GROUP_RAID0 |
+ BTRFS_BLOCK_GROUP_RAID1 |
+ BTRFS_BLOCK_GROUP_RAID10 |
+ BTRFS_BLOCK_GROUP_DUP);
+ if (extra_flags) {
+ if (flags & BTRFS_BLOCK_GROUP_DATA)
+ fs_info->avail_data_alloc_bits |= extra_flags;
+ if (flags & BTRFS_BLOCK_GROUP_METADATA)
+ fs_info->avail_metadata_alloc_bits |= extra_flags;
+ if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
+ fs_info->avail_system_alloc_bits |= extra_flags;
+ }
+}
+
+static int do_chunk_alloc(struct btrfs_trans_handle *trans,
+ struct btrfs_root *extent_root, u64 alloc_bytes,
+ u64 flags)
+{
+ struct btrfs_space_info *space_info;
+ u64 thresh;
+ u64 start;
+ u64 num_bytes;
+ int ret;
+
+ space_info = __find_space_info(extent_root->fs_info, flags);
+ if (!space_info) {
+ ret = update_space_info(extent_root->fs_info, flags,
+ 0, 0, &space_info);
+ BUG_ON(ret);
+ }
+ BUG_ON(!space_info);
+
+ if (space_info->full)
+ return 0;
+
+ thresh = div_factor(space_info->total_bytes, 6);
+ if ((space_info->bytes_used + space_info->bytes_pinned + alloc_bytes) <
+ thresh)
+ return 0;
+
+ ret = btrfs_alloc_chunk(trans, extent_root, &start, &num_bytes, flags);
+ if (ret == -ENOSPC) {
+printk("space info full %Lu\n", flags);
+ space_info->full = 1;
+ return 0;
+ }
+
+ BUG_ON(ret);
+
+ ret = btrfs_make_block_group(trans, extent_root, 0, flags,
+ BTRFS_FIRST_CHUNK_TREE_OBJECTID, start, num_bytes);
+ BUG_ON(ret);
+
+ return 0;
+}
+
static int update_block_group(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
u64 bytenr, u64 num_bytes, int alloc,
- int mark_free, int data)
+ int mark_free)
{
struct btrfs_block_group_cache *cache;
struct btrfs_fs_info *info = root->fs_info;
old_val = btrfs_block_group_used(&cache->item);
num_bytes = min(total, cache->key.offset - byte_in_group);
if (alloc) {
- if (cache->data != data &&
- old_val < (cache->key.offset >> 1)) {
- int bit_to_clear;
- int bit_to_set;
- cache->data = data;
- if (data) {
- bit_to_clear = BLOCK_GROUP_METADATA;
- bit_to_set = BLOCK_GROUP_DATA;
- cache->item.flags &=
- ~BTRFS_BLOCK_GROUP_MIXED;
- cache->item.flags |=
- BTRFS_BLOCK_GROUP_DATA;
- } else {
- bit_to_clear = BLOCK_GROUP_DATA;
- bit_to_set = BLOCK_GROUP_METADATA;
- cache->item.flags &=
- ~BTRFS_BLOCK_GROUP_MIXED;
- cache->item.flags &=
- ~BTRFS_BLOCK_GROUP_DATA;
- }
- clear_extent_bits(&info->block_group_cache,
- start, end, bit_to_clear,
- GFP_NOFS);
- set_extent_bits(&info->block_group_cache,
- start, end, bit_to_set,
- GFP_NOFS);
- } else if (cache->data != data &&
- cache->data != BTRFS_BLOCK_GROUP_MIXED) {
- cache->data = BTRFS_BLOCK_GROUP_MIXED;
- set_extent_bits(&info->block_group_cache,
- start, end,
- BLOCK_GROUP_DATA |
- BLOCK_GROUP_METADATA,
- GFP_NOFS);
- }
old_val += num_bytes;
+ cache->space_info->bytes_used += num_bytes;
} else {
old_val -= num_bytes;
+ cache->space_info->bytes_used -= num_bytes;
if (mark_free) {
set_extent_dirty(&info->free_space_cache,
bytenr, bytenr + num_bytes - 1,
}
return 0;
}
+
static int update_pinned_extents(struct btrfs_root *root,
u64 bytenr, u64 num, int pin)
{
(bytenr - cache->key.objectid));
if (pin) {
cache->pinned += len;
+ cache->space_info->bytes_pinned += len;
fs_info->total_pinned += len;
} else {
cache->pinned -= len;
+ cache->space_info->bytes_pinned -= len;
fs_info->total_pinned -= len;
}
bytenr += len;
return 0;
}
-int btrfs_copy_pinned(struct btrfs_root *root, struct extent_map_tree *copy)
+int btrfs_copy_pinned(struct btrfs_root *root, struct extent_io_tree *copy)
{
u64 last = 0;
u64 start;
u64 end;
- struct extent_map_tree *pinned_extents = &root->fs_info->pinned_extents;
+ struct extent_io_tree *pinned_extents = &root->fs_info->pinned_extents;
int ret;
while(1) {
int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
- struct extent_map_tree *unpin)
+ struct extent_io_tree *unpin)
{
u64 start;
u64 end;
int ret;
- struct extent_map_tree *free_space_cache;
+ struct extent_io_tree *free_space_cache;
free_space_cache = &root->fs_info->free_space_cache;
while(1) {
root->fs_info->running_transaction->transid;
u64 header_transid =
btrfs_header_generation(buf);
- if (header_transid == transid) {
+ if (header_transid == transid &&
+ !btrfs_header_flag(buf,
+ BTRFS_HEADER_FLAG_WRITTEN)) {
clean_tree_block(NULL, root, buf);
free_extent_buffer(buf);
return 1;
struct btrfs_root *extent_root = info->extent_root;
struct extent_buffer *leaf;
int ret;
+ int extent_slot = 0;
+ int found_extent = 0;
+ int num_to_del = 1;
struct btrfs_extent_item *ei;
u32 refs;
if (!path)
return -ENOMEM;
- path->reada = 0;
+ path->reada = 1;
ret = lookup_extent_backref(trans, extent_root, path,
bytenr, root_objectid,
ref_generation,
owner_objectid, owner_offset, 1);
if (ret == 0) {
- ret = btrfs_del_item(trans, extent_root, path);
+ struct btrfs_key found_key;
+ extent_slot = path->slots[0];
+ while(extent_slot > 0) {
+ extent_slot--;
+ btrfs_item_key_to_cpu(path->nodes[0], &found_key,
+ extent_slot);
+ if (found_key.objectid != bytenr)
+ break;
+ if (found_key.type == BTRFS_EXTENT_ITEM_KEY &&
+ found_key.offset == num_bytes) {
+ found_extent = 1;
+ break;
+ }
+ if (path->slots[0] - extent_slot > 5)
+ break;
+ }
+ if (!found_extent)
+ ret = btrfs_del_item(trans, extent_root, path);
} else {
btrfs_print_leaf(extent_root, path->nodes[0]);
WARN_ON(1);
root_objectid, ref_generation, owner_objectid,
owner_offset);
}
- btrfs_release_path(extent_root, path);
- ret = btrfs_search_slot(trans, extent_root, &key, path, -1, 1);
- if (ret < 0)
- return ret;
- BUG_ON(ret);
+ if (!found_extent) {
+ btrfs_release_path(extent_root, path);
+ ret = btrfs_search_slot(trans, extent_root, &key, path, -1, 1);
+ if (ret < 0)
+ return ret;
+ BUG_ON(ret);
+ extent_slot = path->slots[0];
+ }
leaf = path->nodes[0];
- ei = btrfs_item_ptr(leaf, path->slots[0],
+ ei = btrfs_item_ptr(leaf, extent_slot,
struct btrfs_extent_item);
refs = btrfs_extent_refs(leaf, ei);
BUG_ON(refs == 0);
refs -= 1;
btrfs_set_extent_refs(leaf, ei, refs);
+
btrfs_mark_buffer_dirty(leaf);
+ if (refs == 0 && found_extent && path->slots[0] == extent_slot + 1) {
+ /* if the back ref and the extent are next to each other
+ * they get deleted below in one shot
+ */
+ path->slots[0] = extent_slot;
+ num_to_del = 2;
+ } else if (found_extent) {
+ /* otherwise delete the extent back ref */
+ ret = btrfs_del_item(trans, extent_root, path);
+ BUG_ON(ret);
+ /* if refs are 0, we need to setup the path for deletion */
+ if (refs == 0) {
+ btrfs_release_path(extent_root, path);
+ ret = btrfs_search_slot(trans, extent_root, &key, path,
+ -1, 1);
+ if (ret < 0)
+ return ret;
+ BUG_ON(ret);
+ }
+ }
+
if (refs == 0) {
u64 super_used;
u64 root_used;
root_used = btrfs_root_used(&root->root_item);
btrfs_set_root_used(&root->root_item,
root_used - num_bytes);
-
- ret = btrfs_del_item(trans, extent_root, path);
+ ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
+ num_to_del);
if (ret) {
return ret;
}
ret = update_block_group(trans, root, bytenr, num_bytes, 0,
- mark_free, 0);
+ mark_free);
BUG_ON(ret);
}
btrfs_free_path(path);
int err = 0;
u64 start;
u64 end;
- struct extent_map_tree *pending_del;
- struct extent_map_tree *pinned_extents;
+ struct extent_io_tree *pending_del;
+ struct extent_io_tree *pinned_extents;
pending_del = &extent_root->fs_info->pending_del;
pinned_extents = &extent_root->fs_info->pinned_extents;
u64 exclude_start, u64 exclude_nr,
int data)
{
- struct btrfs_path *path;
- struct btrfs_key key;
- u64 hole_size = 0;
- u64 aligned;
int ret;
- int slot = 0;
- u64 last_byte = 0;
u64 orig_search_start = search_start;
- int start_found;
- struct extent_buffer *l;
struct btrfs_root * root = orig_root->fs_info->extent_root;
struct btrfs_fs_info *info = root->fs_info;
u64 total_needed = num_bytes;
- int level;
+ u64 *last_ptr = NULL;
struct btrfs_block_group_cache *block_group;
int full_scan = 0;
int wrapped = 0;
- u64 cached_start;
+ int empty_cluster = 2 * 1024 * 1024;
WARN_ON(num_bytes < root->sectorsize);
btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
- level = btrfs_header_level(root->node);
+ if (data & BTRFS_BLOCK_GROUP_METADATA) {
+ last_ptr = &root->fs_info->last_alloc;
+ empty_cluster = 256 * 1024;
+ }
- if (num_bytes >= 32 * 1024 * 1024 && hint_byte) {
- data = BTRFS_BLOCK_GROUP_MIXED;
+ if ((data & BTRFS_BLOCK_GROUP_DATA) && btrfs_test_opt(root, SSD)) {
+ last_ptr = &root->fs_info->last_data_alloc;
}
- /* for SSD, cluster allocations together as much as possible */
- if (btrfs_test_opt(root, SSD)) {
- if (!data) {
- if (root->fs_info->last_alloc)
- hint_byte = root->fs_info->last_alloc;
- else {
- hint_byte = hint_byte &
- ~((u64)BTRFS_BLOCK_GROUP_SIZE - 1);
- empty_size += 16 * 1024 * 1024;
- }
+ if (last_ptr) {
+ if (*last_ptr)
+ hint_byte = *last_ptr;
+ else {
+ empty_size += empty_cluster;
}
}
- search_end = min(search_end,
- btrfs_super_total_bytes(&info->super_copy));
+ if (search_end == (u64)-1)
+ search_end = btrfs_super_total_bytes(&info->super_copy);
+
if (hint_byte) {
block_group = btrfs_lookup_block_group(info, hint_byte);
if (!block_group)
hint_byte = search_start;
block_group = btrfs_find_block_group(root, block_group,
hint_byte, data, 1);
+ if (last_ptr && *last_ptr == 0 && block_group)
+ hint_byte = block_group->key.objectid;
} else {
block_group = btrfs_find_block_group(root,
trans->block_group,
search_start, data, 1);
}
+ search_start = max(search_start, hint_byte);
total_needed += empty_size;
- path = btrfs_alloc_path();
+
check_failed:
if (!block_group) {
block_group = btrfs_lookup_block_group(info, search_start);
block_group = btrfs_lookup_block_group(info,
orig_search_start);
}
- search_start = find_search_start(root, &block_group, search_start,
- total_needed, data);
+ ret = find_search_start(root, &block_group, &search_start,
+ total_needed, data);
+ if (ret == -ENOSPC && last_ptr && *last_ptr) {
+ *last_ptr = 0;
+ block_group = btrfs_lookup_block_group(info,
+ orig_search_start);
+ search_start = orig_search_start;
+ ret = find_search_start(root, &block_group, &search_start,
+ total_needed, data);
+ }
+ if (ret == -ENOSPC)
+ goto enospc;
+ if (ret)
+ goto error;
- if (!data && btrfs_test_opt(root, SSD) && info->last_alloc &&
- search_start != info->last_alloc) {
- info->last_alloc = 0;
+ if (last_ptr && *last_ptr && search_start != *last_ptr) {
+ *last_ptr = 0;
if (!empty_size) {
- empty_size += 16 * 1024 * 1024;
+ empty_size += empty_cluster;
total_needed += empty_size;
}
- search_start = find_search_start(root, &block_group,
- search_start, total_needed,
- data);
+ block_group = btrfs_lookup_block_group(info,
+ orig_search_start);
+ search_start = orig_search_start;
+ ret = find_search_start(root, &block_group,
+ &search_start, total_needed, data);
+ if (ret == -ENOSPC)
+ goto enospc;
+ if (ret)
+ goto error;
}
search_start = stripe_align(root, search_start);
- cached_start = search_start;
- btrfs_init_path(path);
ins->objectid = search_start;
- ins->offset = 0;
- start_found = 0;
- path->reada = 2;
-
- ret = btrfs_search_slot(trans, root, ins, path, 0, 0);
- if (ret < 0)
- goto error;
- ret = find_previous_extent(root, path);
- if (ret < 0)
- goto error;
- l = path->nodes[0];
- btrfs_item_key_to_cpu(l, &key, path->slots[0]);
- while (1) {
- l = path->nodes[0];
- slot = path->slots[0];
- if (slot >= btrfs_header_nritems(l)) {
- ret = btrfs_next_leaf(root, path);
- if (ret == 0)
- continue;
- if (ret < 0)
- goto error;
-
- search_start = max(search_start,
- block_group->key.objectid);
- if (!start_found) {
- aligned = stripe_align(root, search_start);
- ins->objectid = aligned;
- if (aligned >= search_end) {
- ret = -ENOSPC;
- goto error;
- }
- ins->offset = search_end - aligned;
- start_found = 1;
- goto check_pending;
- }
- ins->objectid = stripe_align(root,
- last_byte > search_start ?
- last_byte : search_start);
- if (search_end <= ins->objectid) {
- ret = -ENOSPC;
- goto error;
- }
- ins->offset = search_end - ins->objectid;
- BUG_ON(ins->objectid >= search_end);
- goto check_pending;
- }
- btrfs_item_key_to_cpu(l, &key, slot);
-
- if (key.objectid >= search_start && key.objectid > last_byte &&
- start_found) {
- if (last_byte < search_start)
- last_byte = search_start;
- aligned = stripe_align(root, last_byte);
- hole_size = key.objectid - aligned;
- if (key.objectid > aligned && hole_size >= num_bytes) {
- ins->objectid = aligned;
- ins->offset = hole_size;
- goto check_pending;
- }
- }
- if (btrfs_key_type(&key) != BTRFS_EXTENT_ITEM_KEY) {
- if (!start_found && btrfs_key_type(&key) ==
- BTRFS_BLOCK_GROUP_ITEM_KEY) {
- last_byte = key.objectid;
- start_found = 1;
- }
- goto next;
- }
-
-
- start_found = 1;
- last_byte = key.objectid + key.offset;
-
- if (!full_scan && data != BTRFS_BLOCK_GROUP_MIXED &&
- last_byte >= block_group->key.objectid +
- block_group->key.offset) {
- btrfs_release_path(root, path);
- search_start = block_group->key.objectid +
- block_group->key.offset;
- goto new_group;
- }
-next:
- path->slots[0]++;
- cond_resched();
- }
-check_pending:
- /* we have to make sure we didn't find an extent that has already
- * been allocated by the map tree or the original allocation
- */
- btrfs_release_path(root, path);
- BUG_ON(ins->objectid < search_start);
+ ins->offset = num_bytes;
if (ins->objectid + num_bytes >= search_end)
goto enospc;
- if (!full_scan && data != BTRFS_BLOCK_GROUP_MIXED &&
- ins->objectid + num_bytes > block_group->
- key.objectid + block_group->key.offset) {
+
+ if (ins->objectid + num_bytes >
+ block_group->key.objectid + block_group->key.offset) {
search_start = block_group->key.objectid +
block_group->key.offset;
goto new_group;
}
+
if (test_range_bit(&info->extent_ins, ins->objectid,
ins->objectid + num_bytes -1, EXTENT_LOCKED, 0)) {
search_start = ins->objectid + num_bytes;
goto new_group;
}
+
if (test_range_bit(&info->pinned_extents, ins->objectid,
ins->objectid + num_bytes -1, EXTENT_DIRTY, 0)) {
search_start = ins->objectid + num_bytes;
goto new_group;
}
+
if (exclude_nr > 0 && (ins->objectid + num_bytes > exclude_start &&
ins->objectid < exclude_start + exclude_nr)) {
search_start = exclude_start + exclude_nr;
goto new_group;
}
- if (!data) {
+
+ if (!(data & BTRFS_BLOCK_GROUP_DATA)) {
block_group = btrfs_lookup_block_group(info, ins->objectid);
if (block_group)
trans->block_group = block_group;
}
ins->offset = num_bytes;
- btrfs_free_path(path);
+ if (last_ptr) {
+ *last_ptr = ins->objectid + ins->offset;
+ if (*last_ptr ==
+ btrfs_super_total_bytes(&root->fs_info->super_copy)) {
+ *last_ptr = 0;
+ }
+ }
return 0;
new_group:
if (!full_scan)
total_needed -= empty_size;
full_scan = 1;
- data = BTRFS_BLOCK_GROUP_MIXED;
} else
wrapped = 1;
}
goto check_failed;
error:
- btrfs_release_path(root, path);
- btrfs_free_path(path);
- if (btrfs_test_opt(root, SSD) && !ret && !data)
- info->last_alloc = ins->objectid + ins->offset;
return ret;
}
/*
*/
int btrfs_alloc_extent(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
- u64 num_bytes, u64 root_objectid, u64 ref_generation,
+ u64 num_bytes, u64 min_alloc_size,
+ u64 root_objectid, u64 ref_generation,
u64 owner, u64 owner_offset,
u64 empty_size, u64 hint_byte,
u64 search_end, struct btrfs_key *ins, int data)
u64 root_used;
u64 search_start = 0;
u64 new_hint;
+ u64 alloc_profile;
+ u32 sizes[2];
struct btrfs_fs_info *info = root->fs_info;
struct btrfs_root *extent_root = info->extent_root;
- struct btrfs_extent_item extent_item;
+ struct btrfs_extent_item *extent_item;
+ struct btrfs_extent_ref *ref;
struct btrfs_path *path;
-
- btrfs_set_stack_extent_refs(&extent_item, 1);
+ struct btrfs_key keys[2];
+
+ if (data) {
+ alloc_profile = info->avail_data_alloc_bits &
+ info->data_alloc_profile;
+ data = BTRFS_BLOCK_GROUP_DATA | alloc_profile;
+ } else if (root == root->fs_info->chunk_root) {
+ alloc_profile = info->avail_system_alloc_bits &
+ info->system_alloc_profile;
+ data = BTRFS_BLOCK_GROUP_SYSTEM | alloc_profile;
+ } else {
+ alloc_profile = info->avail_metadata_alloc_bits &
+ info->metadata_alloc_profile;
+ data = BTRFS_BLOCK_GROUP_METADATA | alloc_profile;
+ }
+again:
+ if (root->ref_cows) {
+ if (!(data & BTRFS_BLOCK_GROUP_METADATA)) {
+ ret = do_chunk_alloc(trans, root->fs_info->extent_root,
+ 2 * 1024 * 1024,
+ BTRFS_BLOCK_GROUP_METADATA |
+ (info->metadata_alloc_profile &
+ info->avail_metadata_alloc_bits));
+ BUG_ON(ret);
+ }
+ ret = do_chunk_alloc(trans, root->fs_info->extent_root,
+ num_bytes + 2 * 1024 * 1024, data);
+ BUG_ON(ret);
+ }
new_hint = max(hint_byte, root->fs_info->alloc_start);
if (new_hint < btrfs_super_total_bytes(&info->super_copy))
search_start, search_end, hint_byte, ins,
trans->alloc_exclude_start,
trans->alloc_exclude_nr, data);
+
+ if (ret == -ENOSPC && num_bytes > min_alloc_size) {
+ num_bytes = num_bytes >> 1;
+ num_bytes = max(num_bytes, min_alloc_size);
+ goto again;
+ }
BUG_ON(ret);
if (ret)
return ret;
set_extent_bits(&root->fs_info->extent_ins, ins->objectid,
ins->objectid + ins->offset - 1,
EXTENT_LOCKED, GFP_NOFS);
- WARN_ON(data == 1);
goto update_block;
}
WARN_ON(trans->alloc_exclude_nr);
trans->alloc_exclude_start = ins->objectid;
trans->alloc_exclude_nr = ins->offset;
- ret = btrfs_insert_item(trans, extent_root, ins, &extent_item,
- sizeof(extent_item));
- trans->alloc_exclude_start = 0;
- trans->alloc_exclude_nr = 0;
- BUG_ON(ret);
+ memcpy(&keys[0], ins, sizeof(*ins));
+ keys[1].offset = hash_extent_ref(root_objectid, ref_generation,
+ owner, owner_offset);
+ keys[1].objectid = ins->objectid;
+ keys[1].type = BTRFS_EXTENT_REF_KEY;
+ sizes[0] = sizeof(*extent_item);
+ sizes[1] = sizeof(*ref);
path = btrfs_alloc_path();
BUG_ON(!path);
- ret = btrfs_insert_extent_backref(trans, extent_root, path,
- ins->objectid, root_objectid,
- ref_generation, owner, owner_offset);
+
+ ret = btrfs_insert_empty_items(trans, extent_root, path, keys,
+ sizes, 2);
BUG_ON(ret);
+ extent_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
+ struct btrfs_extent_item);
+ btrfs_set_extent_refs(path->nodes[0], extent_item, 1);
+ ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
+ struct btrfs_extent_ref);
+
+ btrfs_set_ref_root(path->nodes[0], ref, root_objectid);
+ btrfs_set_ref_generation(path->nodes[0], ref, ref_generation);
+ btrfs_set_ref_objectid(path->nodes[0], ref, owner);
+ btrfs_set_ref_offset(path->nodes[0], ref, owner_offset);
+
+ btrfs_mark_buffer_dirty(path->nodes[0]);
+
+ trans->alloc_exclude_start = 0;
+ trans->alloc_exclude_nr = 0;
btrfs_free_path(path);
finish_current_insert(trans, extent_root);
pending_ret = del_pending_extents(trans, extent_root);
}
update_block:
- ret = update_block_group(trans, root, ins->objectid, ins->offset, 1, 0,
- data);
- BUG_ON(ret);
+ ret = update_block_group(trans, root, ins->objectid, ins->offset, 1, 0);
+ if (ret) {
+ printk("update block group failed for %Lu %Lu\n",
+ ins->objectid, ins->offset);
+ BUG();
+ }
return 0;
}
int ret;
struct extent_buffer *buf;
- ret = btrfs_alloc_extent(trans, root, blocksize,
+ ret = btrfs_alloc_extent(trans, root, blocksize, blocksize,
root_objectid, ref_generation,
level, first_objectid, empty_size, hint,
(u64)-1, &ins, 0);
}
btrfs_set_header_generation(buf, trans->transid);
clean_tree_block(trans, root, buf);
- wait_on_tree_block_writeback(root, buf);
btrfs_set_buffer_uptodate(buf);
if (PageDirty(buf->first_page)) {
set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
buf->start + buf->len - 1, GFP_NOFS);
- set_extent_bits(&BTRFS_I(root->fs_info->btree_inode)->extent_tree,
- buf->start, buf->start + buf->len - 1,
- EXTENT_CSUM, GFP_NOFS);
- buf->flags |= EXTENT_CSUM;
if (!btrfs_test_opt(root, SSD))
btrfs_set_buffer_defrag(buf);
trans->blocks_used++;
}
static void noinline reada_walk_down(struct btrfs_root *root,
- struct extent_buffer *node)
+ struct extent_buffer *node,
+ int slot)
{
- int i;
- u32 nritems;
u64 bytenr;
- int ret;
+ u64 last = 0;
+ u32 nritems;
u32 refs;
- int level;
u32 blocksize;
+ int ret;
+ int i;
+ int level;
+ int skipped = 0;
nritems = btrfs_header_nritems(node);
level = btrfs_header_level(node);
- for (i = 0; i < nritems; i++) {
+ if (level)
+ return;
+
+ for (i = slot; i < nritems && skipped < 32; i++) {
bytenr = btrfs_node_blockptr(node, i);
- blocksize = btrfs_level_size(root, level - 1);
- ret = lookup_extent_ref(NULL, root, bytenr, blocksize, &refs);
- BUG_ON(ret);
- if (refs != 1)
+ if (last && ((bytenr > last && bytenr - last > 32 * 1024) ||
+ (last > bytenr && last - bytenr > 32 * 1024))) {
+ skipped++;
continue;
+ }
+ blocksize = btrfs_level_size(root, level - 1);
+ if (i != slot) {
+ ret = lookup_extent_ref(NULL, root, bytenr,
+ blocksize, &refs);
+ BUG_ON(ret);
+ if (refs != 1) {
+ skipped++;
+ continue;
+ }
+ }
mutex_unlock(&root->fs_info->fs_mutex);
ret = readahead_tree_block(root, bytenr, blocksize);
+ last = bytenr + blocksize;
cond_resched();
mutex_lock(&root->fs_info->fs_mutex);
if (ret)
WARN_ON(*level >= BTRFS_MAX_LEVEL);
cur = path->nodes[*level];
- if (*level > 0 && path->slots[*level] == 0)
- reada_walk_down(root, cur);
-
if (btrfs_header_level(cur) != *level)
WARN_ON(1);
next = btrfs_find_tree_block(root, bytenr, blocksize);
if (!next || !btrfs_buffer_uptodate(next)) {
free_extent_buffer(next);
+ reada_walk_down(root, cur, path->slots[*level]);
+
mutex_unlock(&root->fs_info->fs_mutex);
next = read_tree_block(root, bytenr, blocksize);
mutex_lock(&root->fs_info->fs_mutex);
- /* we dropped the lock, check one more time */
+ /* we've dropped the lock, double check */
ret = lookup_extent_ref(trans, root, bytenr,
blocksize, &refs);
BUG_ON(ret);
BUG_ON(ret);
continue;
}
+ } else if (next) {
+ btrfs_verify_block_csum(root, next);
}
WARN_ON(*level <= 0);
if (path->nodes[*level-1])
unsigned long last_index;
unsigned long i;
struct page *page;
- struct btrfs_root *root = BTRFS_I(inode)->root;
- struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
+ struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
struct file_ra_state *ra;
ra = kzalloc(sizeof(*ra), GFP_NOFS);
page_start = (u64)page->index << PAGE_CACHE_SHIFT;
page_end = page_start + PAGE_CACHE_SIZE - 1;
- lock_extent(em_tree, page_start, page_end, GFP_NOFS);
+ lock_extent(io_tree, page_start, page_end, GFP_NOFS);
delalloc_start = page_start;
- existing_delalloc =
- count_range_bits(&BTRFS_I(inode)->extent_tree,
- &delalloc_start, page_end,
- PAGE_CACHE_SIZE, EXTENT_DELALLOC);
+ existing_delalloc = count_range_bits(io_tree,
+ &delalloc_start, page_end,
+ PAGE_CACHE_SIZE, EXTENT_DELALLOC);
- set_extent_delalloc(em_tree, page_start,
+ set_extent_delalloc(io_tree, page_start,
page_end, GFP_NOFS);
- spin_lock(&root->fs_info->delalloc_lock);
- root->fs_info->delalloc_bytes += PAGE_CACHE_SIZE -
- existing_delalloc;
- spin_unlock(&root->fs_info->delalloc_lock);
-
- unlock_extent(em_tree, page_start, page_end, GFP_NOFS);
+ unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
set_page_dirty(page);
unlock_page(page);
page_cache_release(page);
u64 cur_byte;
u64 total_found;
struct btrfs_fs_info *info = root->fs_info;
- struct extent_map_tree *block_group_cache;
+ struct extent_io_tree *block_group_cache;
struct btrfs_key key;
struct btrfs_key found_key;
struct extent_buffer *leaf;
if (ret < 0)
goto out;
- ret = find_previous_extent(root, path);
+ ret = btrfs_previous_item(root, path, 0, BTRFS_EXTENT_ITEM_KEY);
if (ret < 0)
goto out;
if (ret == 0) {
int btrfs_grow_extent_tree(struct btrfs_trans_handle *trans,
struct btrfs_root *root, u64 new_size)
{
- struct btrfs_path *path;
- u64 nr = 0;
- u64 cur_byte;
- u64 old_size;
- unsigned long rem;
- struct btrfs_block_group_cache *cache;
- struct btrfs_block_group_item *item;
- struct btrfs_fs_info *info = root->fs_info;
- struct extent_map_tree *block_group_cache;
- struct btrfs_key key;
- struct extent_buffer *leaf;
- int ret;
- int bit;
-
- old_size = btrfs_super_total_bytes(&info->super_copy);
- block_group_cache = &info->block_group_cache;
-
- root = info->extent_root;
-
- cache = btrfs_lookup_block_group(root->fs_info, old_size - 1);
-
- cur_byte = cache->key.objectid + cache->key.offset;
- if (cur_byte >= new_size)
- goto set_size;
-
- key.offset = BTRFS_BLOCK_GROUP_SIZE;
- btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY);
+ btrfs_set_super_total_bytes(&root->fs_info->super_copy, new_size);
+ return 0;
+}
- path = btrfs_alloc_path();
- if (!path)
- return -ENOMEM;
+int find_first_block_group(struct btrfs_root *root, struct btrfs_path *path,
+ struct btrfs_key *key)
+{
+ int ret;
+ struct btrfs_key found_key;
+ struct extent_buffer *leaf;
+ int slot;
- while(cur_byte < new_size) {
- key.objectid = cur_byte;
- ret = btrfs_insert_empty_item(trans, root, path, &key,
- sizeof(struct btrfs_block_group_item));
- BUG_ON(ret);
+ ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
+ if (ret < 0)
+ return ret;
+ while(1) {
+ slot = path->slots[0];
leaf = path->nodes[0];
- item = btrfs_item_ptr(leaf, path->slots[0],
- struct btrfs_block_group_item);
-
- btrfs_set_disk_block_group_used(leaf, item, 0);
- div_long_long_rem(nr, 3, &rem);
- if (rem) {
- btrfs_set_disk_block_group_flags(leaf, item,
- BTRFS_BLOCK_GROUP_DATA);
- } else {
- btrfs_set_disk_block_group_flags(leaf, item, 0);
- }
- nr++;
-
- cache = kmalloc(sizeof(*cache), GFP_NOFS);
- BUG_ON(!cache);
-
- read_extent_buffer(leaf, &cache->item, (unsigned long)item,
- sizeof(cache->item));
-
- memcpy(&cache->key, &key, sizeof(key));
- cache->cached = 0;
- cache->pinned = 0;
- cur_byte = key.objectid + key.offset;
- btrfs_release_path(root, path);
-
- if (cache->item.flags & BTRFS_BLOCK_GROUP_DATA) {
- bit = BLOCK_GROUP_DATA;
- cache->data = BTRFS_BLOCK_GROUP_DATA;
- } else {
- bit = BLOCK_GROUP_METADATA;
- cache->data = 0;
+ if (slot >= btrfs_header_nritems(leaf)) {
+ ret = btrfs_next_leaf(root, path);
+ if (ret == 0)
+ continue;
+ if (ret < 0)
+ goto error;
+ break;
}
+ btrfs_item_key_to_cpu(leaf, &found_key, slot);
- /* use EXTENT_LOCKED to prevent merging */
- set_extent_bits(block_group_cache, key.objectid,
- key.objectid + key.offset - 1,
- bit | EXTENT_LOCKED, GFP_NOFS);
- set_state_private(block_group_cache, key.objectid,
- (unsigned long)cache);
+ if (found_key.objectid >= key->objectid &&
+ found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY)
+ return 0;
+ path->slots[0]++;
}
- btrfs_free_path(path);
-set_size:
- btrfs_set_super_total_bytes(&info->super_copy, new_size);
- return 0;
+ ret = -ENOENT;
+error:
+ return ret;
}
int btrfs_read_block_groups(struct btrfs_root *root)
{
struct btrfs_path *path;
int ret;
- int err = 0;
int bit;
struct btrfs_block_group_cache *cache;
struct btrfs_fs_info *info = root->fs_info;
- struct extent_map_tree *block_group_cache;
+ struct btrfs_space_info *space_info;
+ struct extent_io_tree *block_group_cache;
struct btrfs_key key;
struct btrfs_key found_key;
struct extent_buffer *leaf;
block_group_cache = &info->block_group_cache;
-
root = info->extent_root;
key.objectid = 0;
- key.offset = BTRFS_BLOCK_GROUP_SIZE;
+ key.offset = 0;
btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY);
-
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
while(1) {
- ret = btrfs_search_slot(NULL, info->extent_root,
- &key, path, 0, 0);
- if (ret != 0) {
- err = ret;
- break;
+ ret = find_first_block_group(root, path, &key);
+ if (ret > 0) {
+ ret = 0;
+ goto error;
}
+ if (ret != 0)
+ goto error;
+
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
cache = kmalloc(sizeof(*cache), GFP_NOFS);
if (!cache) {
- err = -1;
+ ret = -ENOMEM;
break;
}
memcpy(&cache->key, &found_key, sizeof(found_key));
cache->cached = 0;
cache->pinned = 0;
+
key.objectid = found_key.objectid + found_key.offset;
btrfs_release_path(root, path);
-
- if (cache->item.flags & BTRFS_BLOCK_GROUP_MIXED) {
- bit = BLOCK_GROUP_DATA | BLOCK_GROUP_METADATA;
- cache->data = BTRFS_BLOCK_GROUP_MIXED;
- } else if (cache->item.flags & BTRFS_BLOCK_GROUP_DATA) {
+ cache->flags = btrfs_block_group_flags(&cache->item);
+ bit = 0;
+ if (cache->flags & BTRFS_BLOCK_GROUP_DATA) {
bit = BLOCK_GROUP_DATA;
- cache->data = BTRFS_BLOCK_GROUP_DATA;
- } else {
+ } else if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) {
+ bit = BLOCK_GROUP_SYSTEM;
+ } else if (cache->flags & BTRFS_BLOCK_GROUP_METADATA) {
bit = BLOCK_GROUP_METADATA;
- cache->data = 0;
}
+ set_avail_alloc_bits(info, cache->flags);
+
+ ret = update_space_info(info, cache->flags, found_key.offset,
+ btrfs_block_group_used(&cache->item),
+ &space_info);
+ BUG_ON(ret);
+ cache->space_info = space_info;
/* use EXTENT_LOCKED to prevent merging */
set_extent_bits(block_group_cache, found_key.objectid,
btrfs_super_total_bytes(&info->super_copy))
break;
}
-
+ ret = 0;
+error:
btrfs_free_path(path);
+ return ret;
+}
+
+int btrfs_make_block_group(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root, u64 bytes_used,
+ u64 type, u64 chunk_objectid, u64 chunk_offset,
+ u64 size)
+{
+ int ret;
+ int bit = 0;
+ struct btrfs_root *extent_root;
+ struct btrfs_block_group_cache *cache;
+ struct extent_io_tree *block_group_cache;
+
+ extent_root = root->fs_info->extent_root;
+ block_group_cache = &root->fs_info->block_group_cache;
+
+ cache = kmalloc(sizeof(*cache), GFP_NOFS);
+ BUG_ON(!cache);
+ cache->key.objectid = chunk_offset;
+ cache->key.offset = size;
+ cache->cached = 0;
+ cache->pinned = 0;
+
+ btrfs_set_key_type(&cache->key, BTRFS_BLOCK_GROUP_ITEM_KEY);
+ memset(&cache->item, 0, sizeof(cache->item));
+ btrfs_set_block_group_used(&cache->item, bytes_used);
+ btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
+ cache->flags = type;
+ btrfs_set_block_group_flags(&cache->item, type);
+
+ ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
+ &cache->space_info);
+ BUG_ON(ret);
+
+ bit = block_group_state_bits(type);
+ set_extent_bits(block_group_cache, chunk_offset,
+ chunk_offset + size - 1,
+ bit | EXTENT_LOCKED, GFP_NOFS);
+
+ set_state_private(block_group_cache, chunk_offset,
+ (unsigned long)cache);
+ ret = btrfs_insert_item(trans, extent_root, &cache->key, &cache->item,
+ sizeof(cache->item));
+ BUG_ON(ret);
+
+ finish_current_insert(trans, extent_root);
+ ret = del_pending_extents(trans, extent_root);
+ BUG_ON(ret);
+ set_avail_alloc_bits(extent_root->fs_info, type);
return 0;
}