static void unlock_chunks(struct btrfs_root *root)
{
- mutex_unlock(&root->fs_info->alloc_mutex);
mutex_unlock(&root->fs_info->chunk_mutex);
+ mutex_unlock(&root->fs_info->alloc_mutex);
}
int btrfs_cleanup_fs_uuids(void)
return 0;
}
-static struct btrfs_device *__find_device(struct list_head *head, u64 devid,
- u8 *uuid)
+static noinline struct btrfs_device *__find_device(struct list_head *head,
+ u64 devid, u8 *uuid)
{
struct btrfs_device *dev;
struct list_head *cur;
return NULL;
}
-static struct btrfs_fs_devices *find_fsid(u8 *fsid)
+static noinline struct btrfs_fs_devices *find_fsid(u8 *fsid)
{
struct list_head *cur;
struct btrfs_fs_devices *fs_devices;
* the list if the block device is congested. This way, multiple devices
* can make progress from a single worker thread.
*/
-int run_scheduled_bios(struct btrfs_device *device)
+static int noinline run_scheduled_bios(struct btrfs_device *device)
{
struct bio *pending;
struct backing_dev_info *bdi;
run_scheduled_bios(device);
}
-static int device_list_add(const char *path,
+static noinline int device_list_add(const char *path,
struct btrfs_super_block *disk_super,
u64 devid, struct btrfs_fs_devices **fs_devices_ret)
{
* called very infrequently and that a given device has a small number
* of extents
*/
-static int find_free_dev_extent(struct btrfs_trans_handle *trans,
- struct btrfs_device *device,
- struct btrfs_path *path,
- u64 num_bytes, u64 *start)
+static noinline int find_free_dev_extent(struct btrfs_trans_handle *trans,
+ struct btrfs_device *device,
+ struct btrfs_path *path,
+ u64 num_bytes, u64 *start)
{
struct btrfs_key key;
struct btrfs_root *root = device->dev_root;
return ret;
}
-int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
+int noinline btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
struct btrfs_device *device,
u64 chunk_tree, u64 chunk_objectid,
u64 chunk_offset,
return ret;
}
-static int find_next_chunk(struct btrfs_root *root, u64 objectid, u64 *offset)
+static noinline int find_next_chunk(struct btrfs_root *root,
+ u64 objectid, u64 *offset)
{
struct btrfs_path *path;
int ret;
return ret;
}
-static int find_next_devid(struct btrfs_root *root, struct btrfs_path *path,
- u64 *objectid)
+static noinline int find_next_devid(struct btrfs_root *root,
+ struct btrfs_path *path, u64 *objectid)
{
int ret;
struct btrfs_key key;
return -EIO;
}
+ filemap_write_and_wait(bdev->bd_inode->i_mapping);
mutex_lock(&root->fs_info->volume_mutex);
trans = btrfs_start_transaction(root, 1);
if (ret)
goto out_close_bdev;
+ set_blocksize(device->bdev, 4096);
+
total_bytes = btrfs_super_total_bytes(&root->fs_info->super_copy);
btrfs_set_super_total_bytes(&root->fs_info->super_copy,
total_bytes + device->total_bytes);
goto out;
}
-int btrfs_update_device(struct btrfs_trans_handle *trans,
- struct btrfs_device *device)
+int noinline btrfs_update_device(struct btrfs_trans_handle *trans,
+ struct btrfs_device *device)
{
int ret;
struct btrfs_path *path;
em_tree = &root->fs_info->mapping_tree.map_tree;
/* step one, relocate all the extents inside this chunk */
- ret = btrfs_shrink_extent_tree(extent_root, chunk_offset);
+ ret = btrfs_relocate_block_group(extent_root, chunk_offset);
BUG_ON(ret);
trans = btrfs_start_transaction(root, 1);
BUG_ON(ret);
}
+ ret = btrfs_remove_block_group(trans, extent_root, chunk_offset);
+ BUG_ON(ret);
+
spin_lock(&em_tree->lock);
remove_extent_mapping(em_tree, em);
+ spin_unlock(&em_tree->lock);
+
kfree(map);
em->bdev = NULL;
/* once for the tree */
free_extent_map(em);
- spin_unlock(&em_tree->lock);
-
/* once for us */
free_extent_map(em);
return 0;
}
-static u64 chunk_bytes_by_type(u64 type, u64 calc_size, int num_stripes,
- int sub_stripes)
+static u64 noinline chunk_bytes_by_type(u64 type, u64 calc_size,
+ int num_stripes, int sub_stripes)
{
if (type & (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_DUP))
return calc_size;
else
min_free = calc_size;
- /* we add 1MB because we never use the first 1MB of the device */
- min_free += 1024 * 1024;
+ /*
+ * we add 1MB because we never use the first 1MB of the device, unless
+ * we've looped, then we are likely allocating the maximum amount of
+ * space left already
+ */
+ if (!looped)
+ min_free += 1024 * 1024;
/* build a private list of devices we will allocate from */
while(index < num_stripes) {
}
-#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
static void end_bio_multi_stripe(struct bio *bio, int err)
-#else
-static int end_bio_multi_stripe(struct bio *bio,
- unsigned int bytes_done, int err)
-#endif
{
struct btrfs_multi_bio *multi = bio->bi_private;
int is_orig_bio = 0;
-#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
- if (bio->bi_size)
- return 1;
-#endif
if (err)
atomic_inc(&multi->error);
}
kfree(multi);
-#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
- bio_endio(bio, bio->bi_size, err);
-#else
bio_endio(bio, err);
-#endif
} else if (!is_orig_bio) {
bio_put(bio);
}
-#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
- return 0;
-#endif
}
struct async_sched {
* This will add one bio to the pending list for a device and make sure
* the work struct is scheduled.
*/
-int schedule_bio(struct btrfs_root *root, struct btrfs_device *device,
- int rw, struct bio *bio)
+static int noinline schedule_bio(struct btrfs_root *root,
+ struct btrfs_device *device,
+ int rw, struct bio *bio)
{
int should_queue = 1;
} else {
bio->bi_bdev = root->fs_info->fs_devices->latest_bdev;
bio->bi_sector = logical >> 9;
-#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
- bio_endio(bio, bio->bi_size, -EIO);
-#else
bio_endio(bio, -EIO);
-#endif
}
dev_nr++;
}