io_ctl_unmap_page(io_ctl);
for (i = 0; i < io_ctl->num_pages; i++) {
- ClearPageChecked(io_ctl->pages[i]);
- unlock_page(io_ctl->pages[i]);
- page_cache_release(io_ctl->pages[i]);
+ if (io_ctl->pages[i]) {
+ ClearPageChecked(io_ctl->pages[i]);
+ unlock_page(io_ctl->pages[i]);
+ page_cache_release(io_ctl->pages[i]);
+ }
}
}
if (!num_entries)
return 0;
- io_ctl_init(&io_ctl, inode, root);
+ ret = io_ctl_init(&io_ctl, inode, root);
+ if (ret)
+ return ret;
+
ret = readahead_cache(inode);
if (ret)
goto out;
struct io_ctl io_ctl;
struct list_head bitmap_list;
struct btrfs_key key;
- u64 start, end, len;
+ u64 start, extent_start, extent_end, len;
int entries = 0;
int bitmaps = 0;
int ret;
if (!i_size_read(inode))
return -1;
- io_ctl_init(&io_ctl, inode, root);
+ ret = io_ctl_init(&io_ctl, inode, root);
+ if (ret)
+ return -1;
/* Get the cluster for this block_group if it exists */
if (block_group && !list_empty(&block_group->cluster_list))
struct btrfs_free_cluster,
block_group_list);
- /*
- * We shouldn't have switched the pinned extents yet so this is the
- * right one
- */
- unpin = root->fs_info->pinned_extents;
-
/* Lock all pages first so we can lock the extent safely. */
io_ctl_prepare_pages(&io_ctl, inode, 0);
lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
0, &cached_state, GFP_NOFS);
- /*
- * When searching for pinned extents, we need to start at our start
- * offset.
- */
- if (block_group)
- start = block_group->key.objectid;
-
node = rb_first(&ctl->free_space_offset);
if (!node && cluster) {
node = rb_first(&cluster->root);
* We want to add any pinned extents to our free space cache
* so we don't leak the space
*/
+
+ /*
+ * We shouldn't have switched the pinned extents yet so this is the
+ * right one
+ */
+ unpin = root->fs_info->pinned_extents;
+
+ if (block_group)
+ start = block_group->key.objectid;
+
while (block_group && (start < block_group->key.objectid +
block_group->key.offset)) {
- ret = find_first_extent_bit(unpin, start, &start, &end,
+ ret = find_first_extent_bit(unpin, start,
+ &extent_start, &extent_end,
EXTENT_DIRTY);
if (ret) {
ret = 0;
}
/* This pinned extent is out of our range */
- if (start >= block_group->key.objectid +
+ if (extent_start >= block_group->key.objectid +
block_group->key.offset)
break;
- len = block_group->key.objectid +
- block_group->key.offset - start;
- len = min(len, end + 1 - start);
+ extent_start = max(extent_start, start);
+ extent_end = min(block_group->key.objectid +
+ block_group->key.offset, extent_end + 1);
+ len = extent_end - extent_start;
entries++;
- ret = io_ctl_add_entry(&io_ctl, start, len, NULL);
+ ret = io_ctl_add_entry(&io_ctl, extent_start, len, NULL);
if (ret)
goto out_nospc;
- start = end + 1;
+ start = extent_end;
}
/* Write out the bitmaps */
cluster->block_group = NULL;
}
-int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group,
- u64 *trimmed, u64 start, u64 end, u64 minlen)
+static int do_trimming(struct btrfs_block_group_cache *block_group,
+ u64 *total_trimmed, u64 start, u64 bytes,
+ u64 reserved_start, u64 reserved_bytes)
{
- struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
- struct btrfs_free_space *entry = NULL;
+ struct btrfs_space_info *space_info = block_group->space_info;
struct btrfs_fs_info *fs_info = block_group->fs_info;
- u64 bytes = 0;
- u64 actually_trimmed;
- int ret = 0;
+ int ret;
+ int update = 0;
+ u64 trimmed = 0;
- *trimmed = 0;
+ spin_lock(&space_info->lock);
+ spin_lock(&block_group->lock);
+ if (!block_group->ro) {
+ block_group->reserved += reserved_bytes;
+ space_info->bytes_reserved += reserved_bytes;
+ update = 1;
+ }
+ spin_unlock(&block_group->lock);
+ spin_unlock(&space_info->lock);
+
+ ret = btrfs_error_discard_extent(fs_info->extent_root,
+ start, bytes, &trimmed);
+ if (!ret)
+ *total_trimmed += trimmed;
+
+ btrfs_add_free_space(block_group, reserved_start, reserved_bytes);
+
+ if (update) {
+ spin_lock(&space_info->lock);
+ spin_lock(&block_group->lock);
+ if (block_group->ro)
+ space_info->bytes_readonly += reserved_bytes;
+ block_group->reserved -= reserved_bytes;
+ space_info->bytes_reserved -= reserved_bytes;
+ spin_unlock(&space_info->lock);
+ spin_unlock(&block_group->lock);
+ }
+
+ return ret;
+}
+
+static int trim_no_bitmap(struct btrfs_block_group_cache *block_group,
+ u64 *total_trimmed, u64 start, u64 end, u64 minlen)
+{
+ struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
+ struct btrfs_free_space *entry;
+ struct rb_node *node;
+ int ret = 0;
+ u64 extent_start;
+ u64 extent_bytes;
+ u64 bytes;
while (start < end) {
spin_lock(&ctl->tree_lock);
}
entry = tree_search_offset(ctl, start, 0, 1);
- if (!entry)
- entry = tree_search_offset(ctl,
- offset_to_bitmap(ctl, start),
- 1, 1);
-
- if (!entry || entry->offset >= end) {
+ if (!entry) {
spin_unlock(&ctl->tree_lock);
break;
}
- if (entry->bitmap) {
- ret = search_bitmap(ctl, entry, &start, &bytes);
- if (!ret) {
- if (start >= end) {
- spin_unlock(&ctl->tree_lock);
- break;
- }
- bytes = min(bytes, end - start);
- bitmap_clear_bits(ctl, entry, start, bytes);
- if (entry->bytes == 0)
- free_bitmap(ctl, entry);
- } else {
- start = entry->offset + BITS_PER_BITMAP *
- block_group->sectorsize;
+ /* skip bitmaps */
+ while (entry->bitmap) {
+ node = rb_next(&entry->offset_index);
+ if (!node) {
spin_unlock(&ctl->tree_lock);
- ret = 0;
- continue;
+ goto out;
}
- } else {
- start = entry->offset;
- bytes = min(entry->bytes, end - start);
- unlink_free_space(ctl, entry);
- kmem_cache_free(btrfs_free_space_cachep, entry);
+ entry = rb_entry(node, struct btrfs_free_space,
+ offset_index);
+ }
+
+ if (entry->offset >= end) {
+ spin_unlock(&ctl->tree_lock);
+ break;
}
+ extent_start = entry->offset;
+ extent_bytes = entry->bytes;
+ start = max(start, extent_start);
+ bytes = min(extent_start + extent_bytes, end) - start;
+ if (bytes < minlen) {
+ spin_unlock(&ctl->tree_lock);
+ goto next;
+ }
+
+ unlink_free_space(ctl, entry);
+ kmem_cache_free(btrfs_free_space_cachep, entry);
+
spin_unlock(&ctl->tree_lock);
- if (bytes >= minlen) {
- struct btrfs_space_info *space_info;
- int update = 0;
-
- space_info = block_group->space_info;
- spin_lock(&space_info->lock);
- spin_lock(&block_group->lock);
- if (!block_group->ro) {
- block_group->reserved += bytes;
- space_info->bytes_reserved += bytes;
- update = 1;
- }
- spin_unlock(&block_group->lock);
- spin_unlock(&space_info->lock);
-
- ret = btrfs_error_discard_extent(fs_info->extent_root,
- start,
- bytes,
- &actually_trimmed);
-
- btrfs_add_free_space(block_group, start, bytes);
- if (update) {
- spin_lock(&space_info->lock);
- spin_lock(&block_group->lock);
- if (block_group->ro)
- space_info->bytes_readonly += bytes;
- block_group->reserved -= bytes;
- space_info->bytes_reserved -= bytes;
- spin_unlock(&space_info->lock);
- spin_unlock(&block_group->lock);
- }
+ ret = do_trimming(block_group, total_trimmed, start, bytes,
+ extent_start, extent_bytes);
+ if (ret)
+ break;
+next:
+ start += bytes;
- if (ret)
- break;
- *trimmed += actually_trimmed;
+ if (fatal_signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ break;
+ }
+
+ cond_resched();
+ }
+out:
+ return ret;
+}
+
+static int trim_bitmaps(struct btrfs_block_group_cache *block_group,
+ u64 *total_trimmed, u64 start, u64 end, u64 minlen)
+{
+ struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
+ struct btrfs_free_space *entry;
+ int ret = 0;
+ int ret2;
+ u64 bytes;
+ u64 offset = offset_to_bitmap(ctl, start);
+
+ while (offset < end) {
+ bool next_bitmap = false;
+
+ spin_lock(&ctl->tree_lock);
+
+ if (ctl->free_space < minlen) {
+ spin_unlock(&ctl->tree_lock);
+ break;
+ }
+
+ entry = tree_search_offset(ctl, offset, 1, 0);
+ if (!entry) {
+ spin_unlock(&ctl->tree_lock);
+ next_bitmap = true;
+ goto next;
+ }
+
+ bytes = minlen;
+ ret2 = search_bitmap(ctl, entry, &start, &bytes);
+ if (ret2 || start >= end) {
+ spin_unlock(&ctl->tree_lock);
+ next_bitmap = true;
+ goto next;
+ }
+
+ bytes = min(bytes, end - start);
+ if (bytes < minlen) {
+ spin_unlock(&ctl->tree_lock);
+ goto next;
+ }
+
+ bitmap_clear_bits(ctl, entry, start, bytes);
+ if (entry->bytes == 0)
+ free_bitmap(ctl, entry);
+
+ spin_unlock(&ctl->tree_lock);
+
+ ret = do_trimming(block_group, total_trimmed, start, bytes,
+ start, bytes);
+ if (ret)
+ break;
+next:
+ if (next_bitmap) {
+ offset += BITS_PER_BITMAP * ctl->unit;
+ } else {
+ start += bytes;
+ if (start >= offset + BITS_PER_BITMAP * ctl->unit)
+ offset += BITS_PER_BITMAP * ctl->unit;
}
- start += bytes;
- bytes = 0;
if (fatal_signal_pending(current)) {
ret = -ERESTARTSYS;
return ret;
}
+int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group,
+ u64 *trimmed, u64 start, u64 end, u64 minlen)
+{
+ int ret;
+
+ *trimmed = 0;
+
+ ret = trim_no_bitmap(block_group, trimmed, start, end, minlen);
+ if (ret)
+ return ret;
+
+ ret = trim_bitmaps(block_group, trimmed, start, end, minlen);
+
+ return ret;
+}
+
/*
* Find the left-most item in the cache tree, and then return the
* smallest inode number in the item.
/*
* find_free_dev_extent - find free space in the specified device
- * @trans: transaction handler
* @device: the device which we search the free space in
* @num_bytes: the size of the free space that we need
* @start: store the start of the free space.
* But if we don't find suitable free space, it is used to store the size of
* the max free space.
*/
-int find_free_dev_extent(struct btrfs_trans_handle *trans,
- struct btrfs_device *device, u64 num_bytes,
+int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes,
u64 *start, u64 *len)
{
struct btrfs_key key;
key.offset = search_start;
key.type = BTRFS_DEV_EXTENT_KEY;
- ret = btrfs_search_slot(trans, root, &key, path, 0, 0);
+ ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0)
goto out;
if (ret > 0) {
/*
* does all the dirty work required for changing file system's UUID.
*/
-static int btrfs_prepare_sprout(struct btrfs_trans_handle *trans,
- struct btrfs_root *root)
+static int btrfs_prepare_sprout(struct btrfs_root *root)
{
struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
struct btrfs_fs_devices *old_devices;
if (seeding_dev) {
sb->s_flags &= ~MS_RDONLY;
- ret = btrfs_prepare_sprout(trans, root);
+ ret = btrfs_prepare_sprout(root);
BUG_ON(ret);
}
return ret;
}
-static int btrfs_add_system_chunk(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
+static int btrfs_add_system_chunk(struct btrfs_root *root,
struct btrfs_key *key,
struct btrfs_chunk *chunk, int item_size)
{
if (total_avail == 0)
continue;
- ret = find_free_dev_extent(trans, device,
+ ret = find_free_dev_extent(device,
max_stripe_size * dev_stripes,
&dev_offset, &max_avail);
if (ret && ret != -ENOSPC)
BUG_ON(ret);
if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
- ret = btrfs_add_system_chunk(trans, chunk_root, &key, chunk,
+ ret = btrfs_add_system_chunk(chunk_root, &key, chunk,
item_size);
BUG_ON(ret);
}
u64 stripe_nr;
u64 stripe_nr_orig;
u64 stripe_nr_end;
- int stripes_allocated = 8;
- int stripes_required = 1;
int stripe_index;
int i;
+ int ret = 0;
int num_stripes;
int max_errors = 0;
struct btrfs_bio *bbio = NULL;
- if (bbio_ret && !(rw & (REQ_WRITE | REQ_DISCARD)))
- stripes_allocated = 1;
-again:
- if (bbio_ret) {
- bbio = kzalloc(btrfs_bio_size(stripes_allocated),
- GFP_NOFS);
- if (!bbio)
- return -ENOMEM;
-
- atomic_set(&bbio->error, 0);
- }
-
read_lock(&em_tree->lock);
em = lookup_extent_mapping(em_tree, logical, *length);
read_unlock(&em_tree->lock);
if (mirror_num > map->num_stripes)
mirror_num = 0;
- /* if our btrfs_bio struct is too small, back off and try again */
- if (rw & REQ_WRITE) {
- if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
- BTRFS_BLOCK_GROUP_DUP)) {
- stripes_required = map->num_stripes;
- max_errors = 1;
- } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
- stripes_required = map->sub_stripes;
- max_errors = 1;
- }
- }
- if (rw & REQ_DISCARD) {
- if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK)
- stripes_required = map->num_stripes;
- }
- if (bbio_ret && (rw & (REQ_WRITE | REQ_DISCARD)) &&
- stripes_allocated < stripes_required) {
- stripes_allocated = map->num_stripes;
- free_extent_map(em);
- kfree(bbio);
- goto again;
- }
stripe_nr = offset;
/*
* stripe_nr counts the total number of stripes we have to stride
}
BUG_ON(stripe_index >= map->num_stripes);
+ bbio = kzalloc(btrfs_bio_size(num_stripes), GFP_NOFS);
+ if (!bbio) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ atomic_set(&bbio->error, 0);
+
if (rw & REQ_DISCARD) {
+ int factor = 0;
+ int sub_stripes = 0;
+ u64 stripes_per_dev = 0;
+ u32 remaining_stripes = 0;
+
+ if (map->type &
+ (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10)) {
+ if (map->type & BTRFS_BLOCK_GROUP_RAID0)
+ sub_stripes = 1;
+ else
+ sub_stripes = map->sub_stripes;
+
+ factor = map->num_stripes / sub_stripes;
+ stripes_per_dev = div_u64_rem(stripe_nr_end -
+ stripe_nr_orig,
+ factor,
+ &remaining_stripes);
+ }
+
for (i = 0; i < num_stripes; i++) {
bbio->stripes[i].physical =
map->stripes[stripe_index].physical +
stripe_offset + stripe_nr * map->stripe_len;
bbio->stripes[i].dev = map->stripes[stripe_index].dev;
- if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
- u64 stripes;
- u32 last_stripe = 0;
- int j;
-
- div_u64_rem(stripe_nr_end - 1,
- map->num_stripes,
- &last_stripe);
-
- for (j = 0; j < map->num_stripes; j++) {
- u32 test;
-
- div_u64_rem(stripe_nr_end - 1 - j,
- map->num_stripes, &test);
- if (test == stripe_index)
- break;
- }
- stripes = stripe_nr_end - 1 - j;
- do_div(stripes, map->num_stripes);
- bbio->stripes[i].length = map->stripe_len *
- (stripes - stripe_nr + 1);
-
- if (i == 0) {
- bbio->stripes[i].length -=
- stripe_offset;
- stripe_offset = 0;
- }
- if (stripe_index == last_stripe)
- bbio->stripes[i].length -=
- stripe_end_offset;
- } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
- u64 stripes;
- int j;
- int factor = map->num_stripes /
- map->sub_stripes;
- u32 last_stripe = 0;
-
- div_u64_rem(stripe_nr_end - 1,
- factor, &last_stripe);
- last_stripe *= map->sub_stripes;
-
- for (j = 0; j < factor; j++) {
- u32 test;
-
- div_u64_rem(stripe_nr_end - 1 - j,
- factor, &test);
-
- if (test ==
- stripe_index / map->sub_stripes)
- break;
- }
- stripes = stripe_nr_end - 1 - j;
- do_div(stripes, factor);
- bbio->stripes[i].length = map->stripe_len *
- (stripes - stripe_nr + 1);
-
- if (i < map->sub_stripes) {
+ if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
+ BTRFS_BLOCK_GROUP_RAID10)) {
+ bbio->stripes[i].length = stripes_per_dev *
+ map->stripe_len;
+ if (i / sub_stripes < remaining_stripes)
+ bbio->stripes[i].length +=
+ map->stripe_len;
+ if (i < sub_stripes)
bbio->stripes[i].length -=
stripe_offset;
- if (i == map->sub_stripes - 1)
- stripe_offset = 0;
- }
- if (stripe_index >= last_stripe &&
- stripe_index <= (last_stripe +
- map->sub_stripes - 1)) {
+ if ((i / sub_stripes + 1) %
+ sub_stripes == remaining_stripes)
bbio->stripes[i].length -=
stripe_end_offset;
- }
+ if (i == sub_stripes - 1)
+ stripe_offset = 0;
} else
bbio->stripes[i].length = *length;
stripe_index++;
}
}
- if (bbio_ret) {
- *bbio_ret = bbio;
- bbio->num_stripes = num_stripes;
- bbio->max_errors = max_errors;
- bbio->mirror_num = mirror_num;
+
+ if (rw & REQ_WRITE) {
+ if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
+ BTRFS_BLOCK_GROUP_RAID10 |
+ BTRFS_BLOCK_GROUP_DUP)) {
+ max_errors = 1;
+ }
}
+
+ *bbio_ret = bbio;
+ bbio->num_stripes = num_stripes;
+ bbio->max_errors = max_errors;
+ bbio->mirror_num = mirror_num;
out:
free_extent_map(em);
- return 0;
+ return ret;
}
int btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
struct btrfs_fs_devices *fs_devices;
int ret;
- mutex_lock(&uuid_mutex);
+ BUG_ON(!mutex_is_locked(&uuid_mutex));
fs_devices = root->fs_info->fs_devices->seed;
while (fs_devices) {
fs_devices->seed = root->fs_info->fs_devices->seed;
root->fs_info->fs_devices->seed = fs_devices;
out:
- mutex_unlock(&uuid_mutex);
return ret;
}
if (!path)
return -ENOMEM;
+ mutex_lock(&uuid_mutex);
+ lock_chunks(root);
+
/* first we search for all of the device items, and then we
* read in all of the chunk items. This way we can create chunk
* mappings that reference all of the devices that are afound
}
ret = 0;
error:
+ unlock_chunks(root);
+ mutex_unlock(&uuid_mutex);
+
btrfs_free_path(path);
return ret;
}