2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
18 #include <linux/sched.h>
19 #include <linux/bio.h>
20 #include <linux/buffer_head.h>
21 #include <linux/blkdev.h>
22 #include <linux/random.h>
23 #include <asm/div64.h>
25 #include "extent_map.h"
27 #include "transaction.h"
28 #include "print-tree.h"
30 #include "async-thread.h"
40 struct btrfs_bio_stripe stripes[];
43 #define map_lookup_size(n) (sizeof(struct map_lookup) + \
44 (sizeof(struct btrfs_bio_stripe) * (n)))
46 static DEFINE_MUTEX(uuid_mutex);
47 static LIST_HEAD(fs_uuids);
49 void btrfs_lock_volumes(void)
51 mutex_lock(&uuid_mutex);
54 void btrfs_unlock_volumes(void)
56 mutex_unlock(&uuid_mutex);
59 static void lock_chunks(struct btrfs_root *root)
61 mutex_lock(&root->fs_info->alloc_mutex);
62 mutex_lock(&root->fs_info->chunk_mutex);
65 static void unlock_chunks(struct btrfs_root *root)
67 mutex_unlock(&root->fs_info->alloc_mutex);
68 mutex_unlock(&root->fs_info->chunk_mutex);
71 int btrfs_cleanup_fs_uuids(void)
73 struct btrfs_fs_devices *fs_devices;
74 struct list_head *uuid_cur;
75 struct list_head *devices_cur;
76 struct btrfs_device *dev;
78 list_for_each(uuid_cur, &fs_uuids) {
79 fs_devices = list_entry(uuid_cur, struct btrfs_fs_devices,
81 while(!list_empty(&fs_devices->devices)) {
82 devices_cur = fs_devices->devices.next;
83 dev = list_entry(devices_cur, struct btrfs_device,
86 close_bdev_excl(dev->bdev);
87 fs_devices->open_devices--;
89 list_del(&dev->dev_list);
97 static struct btrfs_device *__find_device(struct list_head *head, u64 devid,
100 struct btrfs_device *dev;
101 struct list_head *cur;
103 list_for_each(cur, head) {
104 dev = list_entry(cur, struct btrfs_device, dev_list);
105 if (dev->devid == devid &&
106 (!uuid || !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE))) {
113 static struct btrfs_fs_devices *find_fsid(u8 *fsid)
115 struct list_head *cur;
116 struct btrfs_fs_devices *fs_devices;
118 list_for_each(cur, &fs_uuids) {
119 fs_devices = list_entry(cur, struct btrfs_fs_devices, list);
120 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
127 * we try to collect pending bios for a device so we don't get a large
128 * number of procs sending bios down to the same device. This greatly
129 * improves the schedulers ability to collect and merge the bios.
131 * But, it also turns into a long list of bios to process and that is sure
132 * to eventually make the worker thread block. The solution here is to
133 * make some progress and then put this work struct back at the end of
134 * the list if the block device is congested. This way, multiple devices
135 * can make progress from a single worker thread.
137 int run_scheduled_bios(struct btrfs_device *device)
140 struct backing_dev_info *bdi;
141 struct btrfs_fs_info *fs_info;
145 unsigned long num_run = 0;
148 bdi = device->bdev->bd_inode->i_mapping->backing_dev_info;
149 fs_info = device->dev_root->fs_info;
150 limit = btrfs_async_submit_limit(fs_info);
151 limit = limit * 2 / 3;
154 spin_lock(&device->io_lock);
156 /* take all the bios off the list at once and process them
157 * later on (without the lock held). But, remember the
158 * tail and other pointers so the bios can be properly reinserted
159 * into the list if we hit congestion
161 pending = device->pending_bios;
162 tail = device->pending_bio_tail;
163 WARN_ON(pending && !tail);
164 device->pending_bios = NULL;
165 device->pending_bio_tail = NULL;
168 * if pending was null this time around, no bios need processing
169 * at all and we can stop. Otherwise it'll loop back up again
170 * and do an additional check so no bios are missed.
172 * device->running_pending is used to synchronize with the
177 device->running_pending = 1;
180 device->running_pending = 0;
182 spin_unlock(&device->io_lock);
186 pending = pending->bi_next;
188 atomic_dec(&fs_info->nr_async_bios);
190 if (atomic_read(&fs_info->nr_async_bios) < limit &&
191 waitqueue_active(&fs_info->async_submit_wait))
192 wake_up(&fs_info->async_submit_wait);
194 BUG_ON(atomic_read(&cur->bi_cnt) == 0);
196 submit_bio(cur->bi_rw, cur);
201 * we made progress, there is more work to do and the bdi
202 * is now congested. Back off and let other work structs
205 if (pending && bdi_write_congested(bdi)) {
206 struct bio *old_head;
208 spin_lock(&device->io_lock);
210 old_head = device->pending_bios;
211 device->pending_bios = pending;
212 if (device->pending_bio_tail)
213 tail->bi_next = old_head;
215 device->pending_bio_tail = tail;
217 spin_unlock(&device->io_lock);
218 btrfs_requeue_work(&device->work);
228 void pending_bios_fn(struct btrfs_work *work)
230 struct btrfs_device *device;
232 device = container_of(work, struct btrfs_device, work);
233 run_scheduled_bios(device);
236 static int device_list_add(const char *path,
237 struct btrfs_super_block *disk_super,
238 u64 devid, struct btrfs_fs_devices **fs_devices_ret)
240 struct btrfs_device *device;
241 struct btrfs_fs_devices *fs_devices;
242 u64 found_transid = btrfs_super_generation(disk_super);
244 fs_devices = find_fsid(disk_super->fsid);
246 fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
249 INIT_LIST_HEAD(&fs_devices->devices);
250 INIT_LIST_HEAD(&fs_devices->alloc_list);
251 list_add(&fs_devices->list, &fs_uuids);
252 memcpy(fs_devices->fsid, disk_super->fsid, BTRFS_FSID_SIZE);
253 fs_devices->latest_devid = devid;
254 fs_devices->latest_trans = found_transid;
257 device = __find_device(&fs_devices->devices, devid,
258 disk_super->dev_item.uuid);
261 device = kzalloc(sizeof(*device), GFP_NOFS);
263 /* we can safely leave the fs_devices entry around */
266 device->devid = devid;
267 device->work.func = pending_bios_fn;
268 memcpy(device->uuid, disk_super->dev_item.uuid,
270 device->barriers = 1;
271 spin_lock_init(&device->io_lock);
272 device->name = kstrdup(path, GFP_NOFS);
277 list_add(&device->dev_list, &fs_devices->devices);
278 list_add(&device->dev_alloc_list, &fs_devices->alloc_list);
279 fs_devices->num_devices++;
282 if (found_transid > fs_devices->latest_trans) {
283 fs_devices->latest_devid = devid;
284 fs_devices->latest_trans = found_transid;
286 *fs_devices_ret = fs_devices;
290 int btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices)
292 struct list_head *head = &fs_devices->devices;
293 struct list_head *cur;
294 struct btrfs_device *device;
296 mutex_lock(&uuid_mutex);
298 list_for_each(cur, head) {
299 device = list_entry(cur, struct btrfs_device, dev_list);
300 if (!device->in_fs_metadata) {
301 struct block_device *bdev;
302 list_del(&device->dev_list);
303 list_del(&device->dev_alloc_list);
304 fs_devices->num_devices--;
307 fs_devices->open_devices--;
308 mutex_unlock(&uuid_mutex);
309 close_bdev_excl(bdev);
310 mutex_lock(&uuid_mutex);
317 mutex_unlock(&uuid_mutex);
321 int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
323 struct list_head *head = &fs_devices->devices;
324 struct list_head *cur;
325 struct btrfs_device *device;
327 mutex_lock(&uuid_mutex);
328 list_for_each(cur, head) {
329 device = list_entry(cur, struct btrfs_device, dev_list);
331 close_bdev_excl(device->bdev);
332 fs_devices->open_devices--;
335 device->in_fs_metadata = 0;
337 fs_devices->mounted = 0;
338 mutex_unlock(&uuid_mutex);
342 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
343 int flags, void *holder)
345 struct block_device *bdev;
346 struct list_head *head = &fs_devices->devices;
347 struct list_head *cur;
348 struct btrfs_device *device;
349 struct block_device *latest_bdev = NULL;
350 struct buffer_head *bh;
351 struct btrfs_super_block *disk_super;
352 u64 latest_devid = 0;
353 u64 latest_transid = 0;
358 mutex_lock(&uuid_mutex);
359 if (fs_devices->mounted)
362 list_for_each(cur, head) {
363 device = list_entry(cur, struct btrfs_device, dev_list);
370 bdev = open_bdev_excl(device->name, flags, holder);
373 printk("open %s failed\n", device->name);
376 set_blocksize(bdev, 4096);
378 bh = __bread(bdev, BTRFS_SUPER_INFO_OFFSET / 4096, 4096);
382 disk_super = (struct btrfs_super_block *)bh->b_data;
383 if (strncmp((char *)(&disk_super->magic), BTRFS_MAGIC,
384 sizeof(disk_super->magic)))
387 devid = le64_to_cpu(disk_super->dev_item.devid);
388 if (devid != device->devid)
391 transid = btrfs_super_generation(disk_super);
392 if (!latest_transid || transid > latest_transid) {
393 latest_devid = devid;
394 latest_transid = transid;
399 device->in_fs_metadata = 0;
400 fs_devices->open_devices++;
406 close_bdev_excl(bdev);
410 if (fs_devices->open_devices == 0) {
414 fs_devices->mounted = 1;
415 fs_devices->latest_bdev = latest_bdev;
416 fs_devices->latest_devid = latest_devid;
417 fs_devices->latest_trans = latest_transid;
419 mutex_unlock(&uuid_mutex);
423 int btrfs_scan_one_device(const char *path, int flags, void *holder,
424 struct btrfs_fs_devices **fs_devices_ret)
426 struct btrfs_super_block *disk_super;
427 struct block_device *bdev;
428 struct buffer_head *bh;
433 mutex_lock(&uuid_mutex);
435 bdev = open_bdev_excl(path, flags, holder);
442 ret = set_blocksize(bdev, 4096);
445 bh = __bread(bdev, BTRFS_SUPER_INFO_OFFSET / 4096, 4096);
450 disk_super = (struct btrfs_super_block *)bh->b_data;
451 if (strncmp((char *)(&disk_super->magic), BTRFS_MAGIC,
452 sizeof(disk_super->magic))) {
456 devid = le64_to_cpu(disk_super->dev_item.devid);
457 transid = btrfs_super_generation(disk_super);
458 if (disk_super->label[0])
459 printk("device label %s ", disk_super->label);
461 /* FIXME, make a readl uuid parser */
462 printk("device fsid %llx-%llx ",
463 *(unsigned long long *)disk_super->fsid,
464 *(unsigned long long *)(disk_super->fsid + 8));
466 printk("devid %Lu transid %Lu %s\n", devid, transid, path);
467 ret = device_list_add(path, disk_super, devid, fs_devices_ret);
472 close_bdev_excl(bdev);
474 mutex_unlock(&uuid_mutex);
479 * this uses a pretty simple search, the expectation is that it is
480 * called very infrequently and that a given device has a small number
483 static int find_free_dev_extent(struct btrfs_trans_handle *trans,
484 struct btrfs_device *device,
485 struct btrfs_path *path,
486 u64 num_bytes, u64 *start)
488 struct btrfs_key key;
489 struct btrfs_root *root = device->dev_root;
490 struct btrfs_dev_extent *dev_extent = NULL;
493 u64 search_start = 0;
494 u64 search_end = device->total_bytes;
498 struct extent_buffer *l;
503 /* FIXME use last free of some kind */
505 /* we don't want to overwrite the superblock on the drive,
506 * so we make sure to start at an offset of at least 1MB
508 search_start = max((u64)1024 * 1024, search_start);
510 if (root->fs_info->alloc_start + num_bytes <= device->total_bytes)
511 search_start = max(root->fs_info->alloc_start, search_start);
513 key.objectid = device->devid;
514 key.offset = search_start;
515 key.type = BTRFS_DEV_EXTENT_KEY;
516 ret = btrfs_search_slot(trans, root, &key, path, 0, 0);
519 ret = btrfs_previous_item(root, path, 0, key.type);
523 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
526 slot = path->slots[0];
527 if (slot >= btrfs_header_nritems(l)) {
528 ret = btrfs_next_leaf(root, path);
535 if (search_start >= search_end) {
539 *start = search_start;
543 *start = last_byte > search_start ?
544 last_byte : search_start;
545 if (search_end <= *start) {
551 btrfs_item_key_to_cpu(l, &key, slot);
553 if (key.objectid < device->devid)
556 if (key.objectid > device->devid)
559 if (key.offset >= search_start && key.offset > last_byte &&
561 if (last_byte < search_start)
562 last_byte = search_start;
563 hole_size = key.offset - last_byte;
564 if (key.offset > last_byte &&
565 hole_size >= num_bytes) {
570 if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY) {
575 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
576 last_byte = key.offset + btrfs_dev_extent_length(l, dev_extent);
582 /* we have to make sure we didn't find an extent that has already
583 * been allocated by the map tree or the original allocation
585 btrfs_release_path(root, path);
586 BUG_ON(*start < search_start);
588 if (*start + num_bytes > search_end) {
592 /* check for pending inserts here */
596 btrfs_release_path(root, path);
600 int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
601 struct btrfs_device *device,
605 struct btrfs_path *path;
606 struct btrfs_root *root = device->dev_root;
607 struct btrfs_key key;
608 struct btrfs_key found_key;
609 struct extent_buffer *leaf = NULL;
610 struct btrfs_dev_extent *extent = NULL;
612 path = btrfs_alloc_path();
616 key.objectid = device->devid;
618 key.type = BTRFS_DEV_EXTENT_KEY;
620 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
622 ret = btrfs_previous_item(root, path, key.objectid,
623 BTRFS_DEV_EXTENT_KEY);
625 leaf = path->nodes[0];
626 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
627 extent = btrfs_item_ptr(leaf, path->slots[0],
628 struct btrfs_dev_extent);
629 BUG_ON(found_key.offset > start || found_key.offset +
630 btrfs_dev_extent_length(leaf, extent) < start);
632 } else if (ret == 0) {
633 leaf = path->nodes[0];
634 extent = btrfs_item_ptr(leaf, path->slots[0],
635 struct btrfs_dev_extent);
639 if (device->bytes_used > 0)
640 device->bytes_used -= btrfs_dev_extent_length(leaf, extent);
641 ret = btrfs_del_item(trans, root, path);
644 btrfs_free_path(path);
648 int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
649 struct btrfs_device *device,
650 u64 chunk_tree, u64 chunk_objectid,
652 u64 num_bytes, u64 *start)
655 struct btrfs_path *path;
656 struct btrfs_root *root = device->dev_root;
657 struct btrfs_dev_extent *extent;
658 struct extent_buffer *leaf;
659 struct btrfs_key key;
661 WARN_ON(!device->in_fs_metadata);
662 path = btrfs_alloc_path();
666 ret = find_free_dev_extent(trans, device, path, num_bytes, start);
671 key.objectid = device->devid;
673 key.type = BTRFS_DEV_EXTENT_KEY;
674 ret = btrfs_insert_empty_item(trans, root, path, &key,
678 leaf = path->nodes[0];
679 extent = btrfs_item_ptr(leaf, path->slots[0],
680 struct btrfs_dev_extent);
681 btrfs_set_dev_extent_chunk_tree(leaf, extent, chunk_tree);
682 btrfs_set_dev_extent_chunk_objectid(leaf, extent, chunk_objectid);
683 btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
685 write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid,
686 (unsigned long)btrfs_dev_extent_chunk_tree_uuid(extent),
689 btrfs_set_dev_extent_length(leaf, extent, num_bytes);
690 btrfs_mark_buffer_dirty(leaf);
692 btrfs_free_path(path);
696 static int find_next_chunk(struct btrfs_root *root, u64 objectid, u64 *offset)
698 struct btrfs_path *path;
700 struct btrfs_key key;
701 struct btrfs_chunk *chunk;
702 struct btrfs_key found_key;
704 path = btrfs_alloc_path();
707 key.objectid = objectid;
708 key.offset = (u64)-1;
709 key.type = BTRFS_CHUNK_ITEM_KEY;
711 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
717 ret = btrfs_previous_item(root, path, 0, BTRFS_CHUNK_ITEM_KEY);
721 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
723 if (found_key.objectid != objectid)
726 chunk = btrfs_item_ptr(path->nodes[0], path->slots[0],
728 *offset = found_key.offset +
729 btrfs_chunk_length(path->nodes[0], chunk);
734 btrfs_free_path(path);
738 static int find_next_devid(struct btrfs_root *root, struct btrfs_path *path,
742 struct btrfs_key key;
743 struct btrfs_key found_key;
745 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
746 key.type = BTRFS_DEV_ITEM_KEY;
747 key.offset = (u64)-1;
749 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
755 ret = btrfs_previous_item(root, path, BTRFS_DEV_ITEMS_OBJECTID,
760 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
762 *objectid = found_key.offset + 1;
766 btrfs_release_path(root, path);
771 * the device information is stored in the chunk root
772 * the btrfs_device struct should be fully filled in
774 int btrfs_add_device(struct btrfs_trans_handle *trans,
775 struct btrfs_root *root,
776 struct btrfs_device *device)
779 struct btrfs_path *path;
780 struct btrfs_dev_item *dev_item;
781 struct extent_buffer *leaf;
782 struct btrfs_key key;
786 root = root->fs_info->chunk_root;
788 path = btrfs_alloc_path();
792 ret = find_next_devid(root, path, &free_devid);
796 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
797 key.type = BTRFS_DEV_ITEM_KEY;
798 key.offset = free_devid;
800 ret = btrfs_insert_empty_item(trans, root, path, &key,
805 leaf = path->nodes[0];
806 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
808 device->devid = free_devid;
809 btrfs_set_device_id(leaf, dev_item, device->devid);
810 btrfs_set_device_type(leaf, dev_item, device->type);
811 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
812 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
813 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
814 btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
815 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
816 btrfs_set_device_group(leaf, dev_item, 0);
817 btrfs_set_device_seek_speed(leaf, dev_item, 0);
818 btrfs_set_device_bandwidth(leaf, dev_item, 0);
820 ptr = (unsigned long)btrfs_device_uuid(dev_item);
821 write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
822 btrfs_mark_buffer_dirty(leaf);
826 btrfs_free_path(path);
830 static int btrfs_rm_dev_item(struct btrfs_root *root,
831 struct btrfs_device *device)
834 struct btrfs_path *path;
835 struct block_device *bdev = device->bdev;
836 struct btrfs_device *next_dev;
837 struct btrfs_key key;
839 struct btrfs_fs_devices *fs_devices;
840 struct btrfs_trans_handle *trans;
842 root = root->fs_info->chunk_root;
844 path = btrfs_alloc_path();
848 trans = btrfs_start_transaction(root, 1);
849 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
850 key.type = BTRFS_DEV_ITEM_KEY;
851 key.offset = device->devid;
854 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
863 ret = btrfs_del_item(trans, root, path);
868 * at this point, the device is zero sized. We want to
869 * remove it from the devices list and zero out the old super
871 list_del_init(&device->dev_list);
872 list_del_init(&device->dev_alloc_list);
873 fs_devices = root->fs_info->fs_devices;
875 next_dev = list_entry(fs_devices->devices.next, struct btrfs_device,
877 if (bdev == root->fs_info->sb->s_bdev)
878 root->fs_info->sb->s_bdev = next_dev->bdev;
879 if (bdev == fs_devices->latest_bdev)
880 fs_devices->latest_bdev = next_dev->bdev;
882 total_bytes = btrfs_super_num_devices(&root->fs_info->super_copy);
883 btrfs_set_super_num_devices(&root->fs_info->super_copy,
886 btrfs_free_path(path);
888 btrfs_commit_transaction(trans, root);
892 int btrfs_rm_device(struct btrfs_root *root, char *device_path)
894 struct btrfs_device *device;
895 struct block_device *bdev;
896 struct buffer_head *bh = NULL;
897 struct btrfs_super_block *disk_super;
902 mutex_lock(&uuid_mutex);
903 mutex_lock(&root->fs_info->volume_mutex);
905 all_avail = root->fs_info->avail_data_alloc_bits |
906 root->fs_info->avail_system_alloc_bits |
907 root->fs_info->avail_metadata_alloc_bits;
909 if ((all_avail & BTRFS_BLOCK_GROUP_RAID10) &&
910 btrfs_super_num_devices(&root->fs_info->super_copy) <= 4) {
911 printk("btrfs: unable to go below four devices on raid10\n");
916 if ((all_avail & BTRFS_BLOCK_GROUP_RAID1) &&
917 btrfs_super_num_devices(&root->fs_info->super_copy) <= 2) {
918 printk("btrfs: unable to go below two devices on raid1\n");
923 if (strcmp(device_path, "missing") == 0) {
924 struct list_head *cur;
925 struct list_head *devices;
926 struct btrfs_device *tmp;
929 devices = &root->fs_info->fs_devices->devices;
930 list_for_each(cur, devices) {
931 tmp = list_entry(cur, struct btrfs_device, dev_list);
932 if (tmp->in_fs_metadata && !tmp->bdev) {
941 printk("btrfs: no missing devices found to remove\n");
946 bdev = open_bdev_excl(device_path, 0,
947 root->fs_info->bdev_holder);
953 bh = __bread(bdev, BTRFS_SUPER_INFO_OFFSET / 4096, 4096);
958 disk_super = (struct btrfs_super_block *)bh->b_data;
959 if (strncmp((char *)(&disk_super->magic), BTRFS_MAGIC,
960 sizeof(disk_super->magic))) {
964 if (memcmp(disk_super->fsid, root->fs_info->fsid,
969 devid = le64_to_cpu(disk_super->dev_item.devid);
970 device = btrfs_find_device(root, devid, NULL);
977 root->fs_info->fs_devices->num_devices--;
978 root->fs_info->fs_devices->open_devices--;
980 ret = btrfs_shrink_device(device, 0);
985 ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device);
990 /* make sure this device isn't detected as part of
993 memset(&disk_super->magic, 0, sizeof(disk_super->magic));
994 set_buffer_dirty(bh);
995 sync_dirty_buffer(bh);
1001 /* one close for the device struct or super_block */
1002 close_bdev_excl(device->bdev);
1005 /* one close for us */
1006 close_bdev_excl(bdev);
1008 kfree(device->name);
1017 close_bdev_excl(bdev);
1019 mutex_unlock(&root->fs_info->volume_mutex);
1020 mutex_unlock(&uuid_mutex);
1024 int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
1026 struct btrfs_trans_handle *trans;
1027 struct btrfs_device *device;
1028 struct block_device *bdev;
1029 struct list_head *cur;
1030 struct list_head *devices;
1035 bdev = open_bdev_excl(device_path, 0, root->fs_info->bdev_holder);
1040 mutex_lock(&root->fs_info->volume_mutex);
1042 trans = btrfs_start_transaction(root, 1);
1044 devices = &root->fs_info->fs_devices->devices;
1045 list_for_each(cur, devices) {
1046 device = list_entry(cur, struct btrfs_device, dev_list);
1047 if (device->bdev == bdev) {
1053 device = kzalloc(sizeof(*device), GFP_NOFS);
1055 /* we can safely leave the fs_devices entry around */
1057 goto out_close_bdev;
1060 device->barriers = 1;
1061 device->work.func = pending_bios_fn;
1062 generate_random_uuid(device->uuid);
1063 spin_lock_init(&device->io_lock);
1064 device->name = kstrdup(device_path, GFP_NOFS);
1065 if (!device->name) {
1067 goto out_close_bdev;
1069 device->io_width = root->sectorsize;
1070 device->io_align = root->sectorsize;
1071 device->sector_size = root->sectorsize;
1072 device->total_bytes = i_size_read(bdev->bd_inode);
1073 device->dev_root = root->fs_info->dev_root;
1074 device->bdev = bdev;
1075 device->in_fs_metadata = 1;
1077 ret = btrfs_add_device(trans, root, device);
1079 goto out_close_bdev;
1081 total_bytes = btrfs_super_total_bytes(&root->fs_info->super_copy);
1082 btrfs_set_super_total_bytes(&root->fs_info->super_copy,
1083 total_bytes + device->total_bytes);
1085 total_bytes = btrfs_super_num_devices(&root->fs_info->super_copy);
1086 btrfs_set_super_num_devices(&root->fs_info->super_copy,
1089 list_add(&device->dev_list, &root->fs_info->fs_devices->devices);
1090 list_add(&device->dev_alloc_list,
1091 &root->fs_info->fs_devices->alloc_list);
1092 root->fs_info->fs_devices->num_devices++;
1093 root->fs_info->fs_devices->open_devices++;
1095 unlock_chunks(root);
1096 btrfs_end_transaction(trans, root);
1097 mutex_unlock(&root->fs_info->volume_mutex);
1102 close_bdev_excl(bdev);
1106 int btrfs_update_device(struct btrfs_trans_handle *trans,
1107 struct btrfs_device *device)
1110 struct btrfs_path *path;
1111 struct btrfs_root *root;
1112 struct btrfs_dev_item *dev_item;
1113 struct extent_buffer *leaf;
1114 struct btrfs_key key;
1116 root = device->dev_root->fs_info->chunk_root;
1118 path = btrfs_alloc_path();
1122 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1123 key.type = BTRFS_DEV_ITEM_KEY;
1124 key.offset = device->devid;
1126 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1135 leaf = path->nodes[0];
1136 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1138 btrfs_set_device_id(leaf, dev_item, device->devid);
1139 btrfs_set_device_type(leaf, dev_item, device->type);
1140 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1141 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1142 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1143 btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
1144 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
1145 btrfs_mark_buffer_dirty(leaf);
1148 btrfs_free_path(path);
1152 static int __btrfs_grow_device(struct btrfs_trans_handle *trans,
1153 struct btrfs_device *device, u64 new_size)
1155 struct btrfs_super_block *super_copy =
1156 &device->dev_root->fs_info->super_copy;
1157 u64 old_total = btrfs_super_total_bytes(super_copy);
1158 u64 diff = new_size - device->total_bytes;
1160 btrfs_set_super_total_bytes(super_copy, old_total + diff);
1161 return btrfs_update_device(trans, device);
1164 int btrfs_grow_device(struct btrfs_trans_handle *trans,
1165 struct btrfs_device *device, u64 new_size)
1168 lock_chunks(device->dev_root);
1169 ret = __btrfs_grow_device(trans, device, new_size);
1170 unlock_chunks(device->dev_root);
1174 static int btrfs_free_chunk(struct btrfs_trans_handle *trans,
1175 struct btrfs_root *root,
1176 u64 chunk_tree, u64 chunk_objectid,
1180 struct btrfs_path *path;
1181 struct btrfs_key key;
1183 root = root->fs_info->chunk_root;
1184 path = btrfs_alloc_path();
1188 key.objectid = chunk_objectid;
1189 key.offset = chunk_offset;
1190 key.type = BTRFS_CHUNK_ITEM_KEY;
1192 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1195 ret = btrfs_del_item(trans, root, path);
1198 btrfs_free_path(path);
1202 int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64
1205 struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
1206 struct btrfs_disk_key *disk_key;
1207 struct btrfs_chunk *chunk;
1214 struct btrfs_key key;
1216 array_size = btrfs_super_sys_array_size(super_copy);
1218 ptr = super_copy->sys_chunk_array;
1221 while (cur < array_size) {
1222 disk_key = (struct btrfs_disk_key *)ptr;
1223 btrfs_disk_key_to_cpu(&key, disk_key);
1225 len = sizeof(*disk_key);
1227 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
1228 chunk = (struct btrfs_chunk *)(ptr + len);
1229 num_stripes = btrfs_stack_chunk_num_stripes(chunk);
1230 len += btrfs_chunk_item_size(num_stripes);
1235 if (key.objectid == chunk_objectid &&
1236 key.offset == chunk_offset) {
1237 memmove(ptr, ptr + len, array_size - (cur + len));
1239 btrfs_set_super_sys_array_size(super_copy, array_size);
1249 int btrfs_relocate_chunk(struct btrfs_root *root,
1250 u64 chunk_tree, u64 chunk_objectid,
1253 struct extent_map_tree *em_tree;
1254 struct btrfs_root *extent_root;
1255 struct btrfs_trans_handle *trans;
1256 struct extent_map *em;
1257 struct map_lookup *map;
1261 printk("btrfs relocating chunk %llu\n",
1262 (unsigned long long)chunk_offset);
1263 root = root->fs_info->chunk_root;
1264 extent_root = root->fs_info->extent_root;
1265 em_tree = &root->fs_info->mapping_tree.map_tree;
1267 /* step one, relocate all the extents inside this chunk */
1268 ret = btrfs_shrink_extent_tree(extent_root, chunk_offset);
1271 trans = btrfs_start_transaction(root, 1);
1277 * step two, delete the device extents and the
1278 * chunk tree entries
1280 spin_lock(&em_tree->lock);
1281 em = lookup_extent_mapping(em_tree, chunk_offset, 1);
1282 spin_unlock(&em_tree->lock);
1284 BUG_ON(em->start > chunk_offset ||
1285 em->start + em->len < chunk_offset);
1286 map = (struct map_lookup *)em->bdev;
1288 for (i = 0; i < map->num_stripes; i++) {
1289 ret = btrfs_free_dev_extent(trans, map->stripes[i].dev,
1290 map->stripes[i].physical);
1293 if (map->stripes[i].dev) {
1294 ret = btrfs_update_device(trans, map->stripes[i].dev);
1298 ret = btrfs_free_chunk(trans, root, chunk_tree, chunk_objectid,
1303 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
1304 ret = btrfs_del_sys_chunk(root, chunk_objectid, chunk_offset);
1308 spin_lock(&em_tree->lock);
1309 remove_extent_mapping(em_tree, em);
1313 /* once for the tree */
1314 free_extent_map(em);
1315 spin_unlock(&em_tree->lock);
1318 free_extent_map(em);
1320 unlock_chunks(root);
1321 btrfs_end_transaction(trans, root);
1325 static u64 div_factor(u64 num, int factor)
1335 int btrfs_balance(struct btrfs_root *dev_root)
1338 struct list_head *cur;
1339 struct list_head *devices = &dev_root->fs_info->fs_devices->devices;
1340 struct btrfs_device *device;
1343 struct btrfs_path *path;
1344 struct btrfs_key key;
1345 struct btrfs_chunk *chunk;
1346 struct btrfs_root *chunk_root = dev_root->fs_info->chunk_root;
1347 struct btrfs_trans_handle *trans;
1348 struct btrfs_key found_key;
1351 mutex_lock(&dev_root->fs_info->volume_mutex);
1352 dev_root = dev_root->fs_info->dev_root;
1354 /* step one make some room on all the devices */
1355 list_for_each(cur, devices) {
1356 device = list_entry(cur, struct btrfs_device, dev_list);
1357 old_size = device->total_bytes;
1358 size_to_free = div_factor(old_size, 1);
1359 size_to_free = min(size_to_free, (u64)1 * 1024 * 1024);
1360 if (device->total_bytes - device->bytes_used > size_to_free)
1363 ret = btrfs_shrink_device(device, old_size - size_to_free);
1366 trans = btrfs_start_transaction(dev_root, 1);
1369 ret = btrfs_grow_device(trans, device, old_size);
1372 btrfs_end_transaction(trans, dev_root);
1375 /* step two, relocate all the chunks */
1376 path = btrfs_alloc_path();
1379 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
1380 key.offset = (u64)-1;
1381 key.type = BTRFS_CHUNK_ITEM_KEY;
1384 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
1389 * this shouldn't happen, it means the last relocate
1395 ret = btrfs_previous_item(chunk_root, path, 0,
1396 BTRFS_CHUNK_ITEM_KEY);
1400 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1402 if (found_key.objectid != key.objectid)
1405 chunk = btrfs_item_ptr(path->nodes[0],
1407 struct btrfs_chunk);
1408 key.offset = found_key.offset;
1409 /* chunk zero is special */
1410 if (key.offset == 0)
1413 btrfs_release_path(chunk_root, path);
1414 ret = btrfs_relocate_chunk(chunk_root,
1415 chunk_root->root_key.objectid,
1422 btrfs_free_path(path);
1423 mutex_unlock(&dev_root->fs_info->volume_mutex);
1428 * shrinking a device means finding all of the device extents past
1429 * the new size, and then following the back refs to the chunks.
1430 * The chunk relocation code actually frees the device extent
1432 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
1434 struct btrfs_trans_handle *trans;
1435 struct btrfs_root *root = device->dev_root;
1436 struct btrfs_dev_extent *dev_extent = NULL;
1437 struct btrfs_path *path;
1444 struct extent_buffer *l;
1445 struct btrfs_key key;
1446 struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
1447 u64 old_total = btrfs_super_total_bytes(super_copy);
1448 u64 diff = device->total_bytes - new_size;
1451 path = btrfs_alloc_path();
1455 trans = btrfs_start_transaction(root, 1);
1465 device->total_bytes = new_size;
1466 ret = btrfs_update_device(trans, device);
1468 unlock_chunks(root);
1469 btrfs_end_transaction(trans, root);
1472 WARN_ON(diff > old_total);
1473 btrfs_set_super_total_bytes(super_copy, old_total - diff);
1474 unlock_chunks(root);
1475 btrfs_end_transaction(trans, root);
1477 key.objectid = device->devid;
1478 key.offset = (u64)-1;
1479 key.type = BTRFS_DEV_EXTENT_KEY;
1482 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1486 ret = btrfs_previous_item(root, path, 0, key.type);
1495 slot = path->slots[0];
1496 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
1498 if (key.objectid != device->devid)
1501 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
1502 length = btrfs_dev_extent_length(l, dev_extent);
1504 if (key.offset + length <= new_size)
1507 chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
1508 chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
1509 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
1510 btrfs_release_path(root, path);
1512 ret = btrfs_relocate_chunk(root, chunk_tree, chunk_objectid,
1519 btrfs_free_path(path);
1523 int btrfs_add_system_chunk(struct btrfs_trans_handle *trans,
1524 struct btrfs_root *root,
1525 struct btrfs_key *key,
1526 struct btrfs_chunk *chunk, int item_size)
1528 struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
1529 struct btrfs_disk_key disk_key;
1533 array_size = btrfs_super_sys_array_size(super_copy);
1534 if (array_size + item_size > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE)
1537 ptr = super_copy->sys_chunk_array + array_size;
1538 btrfs_cpu_key_to_disk(&disk_key, key);
1539 memcpy(ptr, &disk_key, sizeof(disk_key));
1540 ptr += sizeof(disk_key);
1541 memcpy(ptr, chunk, item_size);
1542 item_size += sizeof(disk_key);
1543 btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
1547 static u64 chunk_bytes_by_type(u64 type, u64 calc_size, int num_stripes,
1550 if (type & (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_DUP))
1552 else if (type & BTRFS_BLOCK_GROUP_RAID10)
1553 return calc_size * (num_stripes / sub_stripes);
1555 return calc_size * num_stripes;
1559 int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
1560 struct btrfs_root *extent_root, u64 *start,
1561 u64 *num_bytes, u64 type)
1564 struct btrfs_fs_info *info = extent_root->fs_info;
1565 struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
1566 struct btrfs_path *path;
1567 struct btrfs_stripe *stripes;
1568 struct btrfs_device *device = NULL;
1569 struct btrfs_chunk *chunk;
1570 struct list_head private_devs;
1571 struct list_head *dev_list;
1572 struct list_head *cur;
1573 struct extent_map_tree *em_tree;
1574 struct map_lookup *map;
1575 struct extent_map *em;
1576 int min_stripe_size = 1 * 1024 * 1024;
1578 u64 calc_size = 1024 * 1024 * 1024;
1579 u64 max_chunk_size = calc_size;
1584 int num_stripes = 1;
1585 int min_stripes = 1;
1586 int sub_stripes = 0;
1590 int stripe_len = 64 * 1024;
1591 struct btrfs_key key;
1593 if ((type & BTRFS_BLOCK_GROUP_RAID1) &&
1594 (type & BTRFS_BLOCK_GROUP_DUP)) {
1596 type &= ~BTRFS_BLOCK_GROUP_DUP;
1598 dev_list = &extent_root->fs_info->fs_devices->alloc_list;
1599 if (list_empty(dev_list))
1602 if (type & (BTRFS_BLOCK_GROUP_RAID0)) {
1603 num_stripes = extent_root->fs_info->fs_devices->open_devices;
1606 if (type & (BTRFS_BLOCK_GROUP_DUP)) {
1610 if (type & (BTRFS_BLOCK_GROUP_RAID1)) {
1611 num_stripes = min_t(u64, 2,
1612 extent_root->fs_info->fs_devices->open_devices);
1613 if (num_stripes < 2)
1617 if (type & (BTRFS_BLOCK_GROUP_RAID10)) {
1618 num_stripes = extent_root->fs_info->fs_devices->open_devices;
1619 if (num_stripes < 4)
1621 num_stripes &= ~(u32)1;
1626 if (type & BTRFS_BLOCK_GROUP_DATA) {
1627 max_chunk_size = 10 * calc_size;
1628 min_stripe_size = 64 * 1024 * 1024;
1629 } else if (type & BTRFS_BLOCK_GROUP_METADATA) {
1630 max_chunk_size = 4 * calc_size;
1631 min_stripe_size = 32 * 1024 * 1024;
1632 } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
1633 calc_size = 8 * 1024 * 1024;
1634 max_chunk_size = calc_size * 2;
1635 min_stripe_size = 1 * 1024 * 1024;
1638 path = btrfs_alloc_path();
1642 /* we don't want a chunk larger than 10% of the FS */
1643 percent_max = div_factor(btrfs_super_total_bytes(&info->super_copy), 1);
1644 max_chunk_size = min(percent_max, max_chunk_size);
1647 if (calc_size * num_stripes > max_chunk_size) {
1648 calc_size = max_chunk_size;
1649 do_div(calc_size, num_stripes);
1650 do_div(calc_size, stripe_len);
1651 calc_size *= stripe_len;
1653 /* we don't want tiny stripes */
1654 calc_size = max_t(u64, min_stripe_size, calc_size);
1656 do_div(calc_size, stripe_len);
1657 calc_size *= stripe_len;
1659 INIT_LIST_HEAD(&private_devs);
1660 cur = dev_list->next;
1663 if (type & BTRFS_BLOCK_GROUP_DUP)
1664 min_free = calc_size * 2;
1666 min_free = calc_size;
1668 /* we add 1MB because we never use the first 1MB of the device */
1669 min_free += 1024 * 1024;
1671 /* build a private list of devices we will allocate from */
1672 while(index < num_stripes) {
1673 device = list_entry(cur, struct btrfs_device, dev_alloc_list);
1675 if (device->total_bytes > device->bytes_used)
1676 avail = device->total_bytes - device->bytes_used;
1681 if (device->in_fs_metadata && avail >= min_free) {
1682 u64 ignored_start = 0;
1683 ret = find_free_dev_extent(trans, device, path,
1687 list_move_tail(&device->dev_alloc_list,
1690 if (type & BTRFS_BLOCK_GROUP_DUP)
1693 } else if (device->in_fs_metadata && avail > max_avail)
1695 if (cur == dev_list)
1698 if (index < num_stripes) {
1699 list_splice(&private_devs, dev_list);
1700 if (index >= min_stripes) {
1701 num_stripes = index;
1702 if (type & (BTRFS_BLOCK_GROUP_RAID10)) {
1703 num_stripes /= sub_stripes;
1704 num_stripes *= sub_stripes;
1709 if (!looped && max_avail > 0) {
1711 calc_size = max_avail;
1714 btrfs_free_path(path);
1717 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
1718 key.type = BTRFS_CHUNK_ITEM_KEY;
1719 ret = find_next_chunk(chunk_root, BTRFS_FIRST_CHUNK_TREE_OBJECTID,
1722 btrfs_free_path(path);
1726 chunk = kmalloc(btrfs_chunk_item_size(num_stripes), GFP_NOFS);
1728 btrfs_free_path(path);
1732 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
1735 btrfs_free_path(path);
1738 btrfs_free_path(path);
1741 stripes = &chunk->stripe;
1742 *num_bytes = chunk_bytes_by_type(type, calc_size,
1743 num_stripes, sub_stripes);
1746 while(index < num_stripes) {
1747 struct btrfs_stripe *stripe;
1748 BUG_ON(list_empty(&private_devs));
1749 cur = private_devs.next;
1750 device = list_entry(cur, struct btrfs_device, dev_alloc_list);
1752 /* loop over this device again if we're doing a dup group */
1753 if (!(type & BTRFS_BLOCK_GROUP_DUP) ||
1754 (index == num_stripes - 1))
1755 list_move_tail(&device->dev_alloc_list, dev_list);
1757 ret = btrfs_alloc_dev_extent(trans, device,
1758 info->chunk_root->root_key.objectid,
1759 BTRFS_FIRST_CHUNK_TREE_OBJECTID, key.offset,
1760 calc_size, &dev_offset);
1762 device->bytes_used += calc_size;
1763 ret = btrfs_update_device(trans, device);
1766 map->stripes[index].dev = device;
1767 map->stripes[index].physical = dev_offset;
1768 stripe = stripes + index;
1769 btrfs_set_stack_stripe_devid(stripe, device->devid);
1770 btrfs_set_stack_stripe_offset(stripe, dev_offset);
1771 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
1772 physical = dev_offset;
1775 BUG_ON(!list_empty(&private_devs));
1777 /* key was set above */
1778 btrfs_set_stack_chunk_length(chunk, *num_bytes);
1779 btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
1780 btrfs_set_stack_chunk_stripe_len(chunk, stripe_len);
1781 btrfs_set_stack_chunk_type(chunk, type);
1782 btrfs_set_stack_chunk_num_stripes(chunk, num_stripes);
1783 btrfs_set_stack_chunk_io_align(chunk, stripe_len);
1784 btrfs_set_stack_chunk_io_width(chunk, stripe_len);
1785 btrfs_set_stack_chunk_sector_size(chunk, extent_root->sectorsize);
1786 btrfs_set_stack_chunk_sub_stripes(chunk, sub_stripes);
1787 map->sector_size = extent_root->sectorsize;
1788 map->stripe_len = stripe_len;
1789 map->io_align = stripe_len;
1790 map->io_width = stripe_len;
1792 map->num_stripes = num_stripes;
1793 map->sub_stripes = sub_stripes;
1795 ret = btrfs_insert_item(trans, chunk_root, &key, chunk,
1796 btrfs_chunk_item_size(num_stripes));
1798 *start = key.offset;;
1800 em = alloc_extent_map(GFP_NOFS);
1803 em->bdev = (struct block_device *)map;
1804 em->start = key.offset;
1805 em->len = *num_bytes;
1806 em->block_start = 0;
1808 if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
1809 ret = btrfs_add_system_chunk(trans, chunk_root, &key,
1810 chunk, btrfs_chunk_item_size(num_stripes));
1815 em_tree = &extent_root->fs_info->mapping_tree.map_tree;
1816 spin_lock(&em_tree->lock);
1817 ret = add_extent_mapping(em_tree, em);
1818 spin_unlock(&em_tree->lock);
1820 free_extent_map(em);
1824 void btrfs_mapping_init(struct btrfs_mapping_tree *tree)
1826 extent_map_tree_init(&tree->map_tree, GFP_NOFS);
1829 void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
1831 struct extent_map *em;
1834 spin_lock(&tree->map_tree.lock);
1835 em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1);
1837 remove_extent_mapping(&tree->map_tree, em);
1838 spin_unlock(&tree->map_tree.lock);
1843 free_extent_map(em);
1844 /* once for the tree */
1845 free_extent_map(em);
1849 int btrfs_num_copies(struct btrfs_mapping_tree *map_tree, u64 logical, u64 len)
1851 struct extent_map *em;
1852 struct map_lookup *map;
1853 struct extent_map_tree *em_tree = &map_tree->map_tree;
1856 spin_lock(&em_tree->lock);
1857 em = lookup_extent_mapping(em_tree, logical, len);
1858 spin_unlock(&em_tree->lock);
1861 BUG_ON(em->start > logical || em->start + em->len < logical);
1862 map = (struct map_lookup *)em->bdev;
1863 if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1))
1864 ret = map->num_stripes;
1865 else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
1866 ret = map->sub_stripes;
1869 free_extent_map(em);
1873 static int find_live_mirror(struct map_lookup *map, int first, int num,
1877 if (map->stripes[optimal].dev->bdev)
1879 for (i = first; i < first + num; i++) {
1880 if (map->stripes[i].dev->bdev)
1883 /* we couldn't find one that doesn't fail. Just return something
1884 * and the io error handling code will clean up eventually
1889 static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
1890 u64 logical, u64 *length,
1891 struct btrfs_multi_bio **multi_ret,
1892 int mirror_num, struct page *unplug_page)
1894 struct extent_map *em;
1895 struct map_lookup *map;
1896 struct extent_map_tree *em_tree = &map_tree->map_tree;
1900 int stripes_allocated = 8;
1901 int stripes_required = 1;
1906 struct btrfs_multi_bio *multi = NULL;
1908 if (multi_ret && !(rw & (1 << BIO_RW))) {
1909 stripes_allocated = 1;
1913 multi = kzalloc(btrfs_multi_bio_size(stripes_allocated),
1918 atomic_set(&multi->error, 0);
1921 spin_lock(&em_tree->lock);
1922 em = lookup_extent_mapping(em_tree, logical, *length);
1923 spin_unlock(&em_tree->lock);
1925 if (!em && unplug_page)
1929 printk("unable to find logical %Lu len %Lu\n", logical, *length);
1933 BUG_ON(em->start > logical || em->start + em->len < logical);
1934 map = (struct map_lookup *)em->bdev;
1935 offset = logical - em->start;
1937 if (mirror_num > map->num_stripes)
1940 /* if our multi bio struct is too small, back off and try again */
1941 if (rw & (1 << BIO_RW)) {
1942 if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
1943 BTRFS_BLOCK_GROUP_DUP)) {
1944 stripes_required = map->num_stripes;
1946 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
1947 stripes_required = map->sub_stripes;
1951 if (multi_ret && rw == WRITE &&
1952 stripes_allocated < stripes_required) {
1953 stripes_allocated = map->num_stripes;
1954 free_extent_map(em);
1960 * stripe_nr counts the total number of stripes we have to stride
1961 * to get to this block
1963 do_div(stripe_nr, map->stripe_len);
1965 stripe_offset = stripe_nr * map->stripe_len;
1966 BUG_ON(offset < stripe_offset);
1968 /* stripe_offset is the offset of this block in its stripe*/
1969 stripe_offset = offset - stripe_offset;
1971 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1 |
1972 BTRFS_BLOCK_GROUP_RAID10 |
1973 BTRFS_BLOCK_GROUP_DUP)) {
1974 /* we limit the length of each bio to what fits in a stripe */
1975 *length = min_t(u64, em->len - offset,
1976 map->stripe_len - stripe_offset);
1978 *length = em->len - offset;
1981 if (!multi_ret && !unplug_page)
1986 if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
1987 if (unplug_page || (rw & (1 << BIO_RW)))
1988 num_stripes = map->num_stripes;
1989 else if (mirror_num)
1990 stripe_index = mirror_num - 1;
1992 stripe_index = find_live_mirror(map, 0,
1994 current->pid % map->num_stripes);
1997 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
1998 if (rw & (1 << BIO_RW))
1999 num_stripes = map->num_stripes;
2000 else if (mirror_num)
2001 stripe_index = mirror_num - 1;
2003 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
2004 int factor = map->num_stripes / map->sub_stripes;
2006 stripe_index = do_div(stripe_nr, factor);
2007 stripe_index *= map->sub_stripes;
2009 if (unplug_page || (rw & (1 << BIO_RW)))
2010 num_stripes = map->sub_stripes;
2011 else if (mirror_num)
2012 stripe_index += mirror_num - 1;
2014 stripe_index = find_live_mirror(map, stripe_index,
2015 map->sub_stripes, stripe_index +
2016 current->pid % map->sub_stripes);
2020 * after this do_div call, stripe_nr is the number of stripes
2021 * on this device we have to walk to find the data, and
2022 * stripe_index is the number of our device in the stripe array
2024 stripe_index = do_div(stripe_nr, map->num_stripes);
2026 BUG_ON(stripe_index >= map->num_stripes);
2028 for (i = 0; i < num_stripes; i++) {
2030 struct btrfs_device *device;
2031 struct backing_dev_info *bdi;
2033 device = map->stripes[stripe_index].dev;
2035 bdi = blk_get_backing_dev_info(device->bdev);
2036 if (bdi->unplug_io_fn) {
2037 bdi->unplug_io_fn(bdi, unplug_page);
2041 multi->stripes[i].physical =
2042 map->stripes[stripe_index].physical +
2043 stripe_offset + stripe_nr * map->stripe_len;
2044 multi->stripes[i].dev = map->stripes[stripe_index].dev;
2050 multi->num_stripes = num_stripes;
2051 multi->max_errors = max_errors;
2054 free_extent_map(em);
2058 int btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
2059 u64 logical, u64 *length,
2060 struct btrfs_multi_bio **multi_ret, int mirror_num)
2062 return __btrfs_map_block(map_tree, rw, logical, length, multi_ret,
2066 int btrfs_unplug_page(struct btrfs_mapping_tree *map_tree,
2067 u64 logical, struct page *page)
2069 u64 length = PAGE_CACHE_SIZE;
2070 return __btrfs_map_block(map_tree, READ, logical, &length,
2075 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
2076 static void end_bio_multi_stripe(struct bio *bio, int err)
2078 static int end_bio_multi_stripe(struct bio *bio,
2079 unsigned int bytes_done, int err)
2082 struct btrfs_multi_bio *multi = bio->bi_private;
2083 int is_orig_bio = 0;
2085 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
2090 atomic_inc(&multi->error);
2092 if (bio == multi->orig_bio)
2095 if (atomic_dec_and_test(&multi->stripes_pending)) {
2098 bio = multi->orig_bio;
2100 bio->bi_private = multi->private;
2101 bio->bi_end_io = multi->end_io;
2102 /* only send an error to the higher layers if it is
2103 * beyond the tolerance of the multi-bio
2105 if (atomic_read(&multi->error) > multi->max_errors) {
2109 * this bio is actually up to date, we didn't
2110 * go over the max number of errors
2112 set_bit(BIO_UPTODATE, &bio->bi_flags);
2117 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
2118 bio_endio(bio, bio->bi_size, err);
2120 bio_endio(bio, err);
2122 } else if (!is_orig_bio) {
2125 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
2130 struct async_sched {
2133 struct btrfs_fs_info *info;
2134 struct btrfs_work work;
2138 * see run_scheduled_bios for a description of why bios are collected for
2141 * This will add one bio to the pending list for a device and make sure
2142 * the work struct is scheduled.
2144 int schedule_bio(struct btrfs_root *root, struct btrfs_device *device,
2145 int rw, struct bio *bio)
2147 int should_queue = 1;
2149 /* don't bother with additional async steps for reads, right now */
2150 if (!(rw & (1 << BIO_RW))) {
2152 submit_bio(rw, bio);
2158 * nr_async_bios allows us to reliably return congestion to the
2159 * higher layers. Otherwise, the async bio makes it appear we have
2160 * made progress against dirty pages when we've really just put it
2161 * on a queue for later
2163 atomic_inc(&root->fs_info->nr_async_bios);
2164 WARN_ON(bio->bi_next);
2165 bio->bi_next = NULL;
2168 spin_lock(&device->io_lock);
2170 if (device->pending_bio_tail)
2171 device->pending_bio_tail->bi_next = bio;
2173 device->pending_bio_tail = bio;
2174 if (!device->pending_bios)
2175 device->pending_bios = bio;
2176 if (device->running_pending)
2179 spin_unlock(&device->io_lock);
2182 btrfs_queue_worker(&root->fs_info->submit_workers,
2187 int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
2188 int mirror_num, int async_submit)
2190 struct btrfs_mapping_tree *map_tree;
2191 struct btrfs_device *dev;
2192 struct bio *first_bio = bio;
2193 u64 logical = bio->bi_sector << 9;
2196 struct btrfs_multi_bio *multi = NULL;
2201 length = bio->bi_size;
2202 map_tree = &root->fs_info->mapping_tree;
2203 map_length = length;
2205 ret = btrfs_map_block(map_tree, rw, logical, &map_length, &multi,
2209 total_devs = multi->num_stripes;
2210 if (map_length < length) {
2211 printk("mapping failed logical %Lu bio len %Lu "
2212 "len %Lu\n", logical, length, map_length);
2215 multi->end_io = first_bio->bi_end_io;
2216 multi->private = first_bio->bi_private;
2217 multi->orig_bio = first_bio;
2218 atomic_set(&multi->stripes_pending, multi->num_stripes);
2220 while(dev_nr < total_devs) {
2221 if (total_devs > 1) {
2222 if (dev_nr < total_devs - 1) {
2223 bio = bio_clone(first_bio, GFP_NOFS);
2228 bio->bi_private = multi;
2229 bio->bi_end_io = end_bio_multi_stripe;
2231 bio->bi_sector = multi->stripes[dev_nr].physical >> 9;
2232 dev = multi->stripes[dev_nr].dev;
2233 if (dev && dev->bdev) {
2234 bio->bi_bdev = dev->bdev;
2236 schedule_bio(root, dev, rw, bio);
2238 submit_bio(rw, bio);
2240 bio->bi_bdev = root->fs_info->fs_devices->latest_bdev;
2241 bio->bi_sector = logical >> 9;
2242 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
2243 bio_endio(bio, bio->bi_size, -EIO);
2245 bio_endio(bio, -EIO);
2250 if (total_devs == 1)
2255 struct btrfs_device *btrfs_find_device(struct btrfs_root *root, u64 devid,
2258 struct list_head *head = &root->fs_info->fs_devices->devices;
2260 return __find_device(head, devid, uuid);
2263 static struct btrfs_device *add_missing_dev(struct btrfs_root *root,
2264 u64 devid, u8 *dev_uuid)
2266 struct btrfs_device *device;
2267 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
2269 device = kzalloc(sizeof(*device), GFP_NOFS);
2270 list_add(&device->dev_list,
2271 &fs_devices->devices);
2272 list_add(&device->dev_alloc_list,
2273 &fs_devices->alloc_list);
2274 device->barriers = 1;
2275 device->dev_root = root->fs_info->dev_root;
2276 device->devid = devid;
2277 device->work.func = pending_bios_fn;
2278 fs_devices->num_devices++;
2279 spin_lock_init(&device->io_lock);
2280 memcpy(device->uuid, dev_uuid, BTRFS_UUID_SIZE);
2285 static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
2286 struct extent_buffer *leaf,
2287 struct btrfs_chunk *chunk)
2289 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
2290 struct map_lookup *map;
2291 struct extent_map *em;
2295 u8 uuid[BTRFS_UUID_SIZE];
2300 logical = key->offset;
2301 length = btrfs_chunk_length(leaf, chunk);
2303 spin_lock(&map_tree->map_tree.lock);
2304 em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
2305 spin_unlock(&map_tree->map_tree.lock);
2307 /* already mapped? */
2308 if (em && em->start <= logical && em->start + em->len > logical) {
2309 free_extent_map(em);
2312 free_extent_map(em);
2315 map = kzalloc(sizeof(*map), GFP_NOFS);
2319 em = alloc_extent_map(GFP_NOFS);
2322 num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
2323 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
2325 free_extent_map(em);
2329 em->bdev = (struct block_device *)map;
2330 em->start = logical;
2332 em->block_start = 0;
2334 map->num_stripes = num_stripes;
2335 map->io_width = btrfs_chunk_io_width(leaf, chunk);
2336 map->io_align = btrfs_chunk_io_align(leaf, chunk);
2337 map->sector_size = btrfs_chunk_sector_size(leaf, chunk);
2338 map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
2339 map->type = btrfs_chunk_type(leaf, chunk);
2340 map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
2341 for (i = 0; i < num_stripes; i++) {
2342 map->stripes[i].physical =
2343 btrfs_stripe_offset_nr(leaf, chunk, i);
2344 devid = btrfs_stripe_devid_nr(leaf, chunk, i);
2345 read_extent_buffer(leaf, uuid, (unsigned long)
2346 btrfs_stripe_dev_uuid_nr(chunk, i),
2348 map->stripes[i].dev = btrfs_find_device(root, devid, uuid);
2350 if (!map->stripes[i].dev && !btrfs_test_opt(root, DEGRADED)) {
2352 free_extent_map(em);
2355 if (!map->stripes[i].dev) {
2356 map->stripes[i].dev =
2357 add_missing_dev(root, devid, uuid);
2358 if (!map->stripes[i].dev) {
2360 free_extent_map(em);
2364 map->stripes[i].dev->in_fs_metadata = 1;
2367 spin_lock(&map_tree->map_tree.lock);
2368 ret = add_extent_mapping(&map_tree->map_tree, em);
2369 spin_unlock(&map_tree->map_tree.lock);
2371 free_extent_map(em);
2376 static int fill_device_from_item(struct extent_buffer *leaf,
2377 struct btrfs_dev_item *dev_item,
2378 struct btrfs_device *device)
2382 device->devid = btrfs_device_id(leaf, dev_item);
2383 device->total_bytes = btrfs_device_total_bytes(leaf, dev_item);
2384 device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
2385 device->type = btrfs_device_type(leaf, dev_item);
2386 device->io_align = btrfs_device_io_align(leaf, dev_item);
2387 device->io_width = btrfs_device_io_width(leaf, dev_item);
2388 device->sector_size = btrfs_device_sector_size(leaf, dev_item);
2390 ptr = (unsigned long)btrfs_device_uuid(dev_item);
2391 read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
2396 static int read_one_dev(struct btrfs_root *root,
2397 struct extent_buffer *leaf,
2398 struct btrfs_dev_item *dev_item)
2400 struct btrfs_device *device;
2403 u8 dev_uuid[BTRFS_UUID_SIZE];
2405 devid = btrfs_device_id(leaf, dev_item);
2406 read_extent_buffer(leaf, dev_uuid,
2407 (unsigned long)btrfs_device_uuid(dev_item),
2409 device = btrfs_find_device(root, devid, dev_uuid);
2411 printk("warning devid %Lu missing\n", devid);
2412 device = add_missing_dev(root, devid, dev_uuid);
2417 fill_device_from_item(leaf, dev_item, device);
2418 device->dev_root = root->fs_info->dev_root;
2419 device->in_fs_metadata = 1;
2422 ret = btrfs_open_device(device);
2430 int btrfs_read_super_device(struct btrfs_root *root, struct extent_buffer *buf)
2432 struct btrfs_dev_item *dev_item;
2434 dev_item = (struct btrfs_dev_item *)offsetof(struct btrfs_super_block,
2436 return read_one_dev(root, buf, dev_item);
2439 int btrfs_read_sys_array(struct btrfs_root *root)
2441 struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
2442 struct extent_buffer *sb;
2443 struct btrfs_disk_key *disk_key;
2444 struct btrfs_chunk *chunk;
2446 unsigned long sb_ptr;
2452 struct btrfs_key key;
2454 sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET,
2455 BTRFS_SUPER_INFO_SIZE);
2458 btrfs_set_buffer_uptodate(sb);
2459 write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
2460 array_size = btrfs_super_sys_array_size(super_copy);
2462 ptr = super_copy->sys_chunk_array;
2463 sb_ptr = offsetof(struct btrfs_super_block, sys_chunk_array);
2466 while (cur < array_size) {
2467 disk_key = (struct btrfs_disk_key *)ptr;
2468 btrfs_disk_key_to_cpu(&key, disk_key);
2470 len = sizeof(*disk_key); ptr += len;
2474 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
2475 chunk = (struct btrfs_chunk *)sb_ptr;
2476 ret = read_one_chunk(root, &key, sb, chunk);
2479 num_stripes = btrfs_chunk_num_stripes(sb, chunk);
2480 len = btrfs_chunk_item_size(num_stripes);
2489 free_extent_buffer(sb);
2493 int btrfs_read_chunk_tree(struct btrfs_root *root)
2495 struct btrfs_path *path;
2496 struct extent_buffer *leaf;
2497 struct btrfs_key key;
2498 struct btrfs_key found_key;
2502 root = root->fs_info->chunk_root;
2504 path = btrfs_alloc_path();
2508 /* first we search for all of the device items, and then we
2509 * read in all of the chunk items. This way we can create chunk
2510 * mappings that reference all of the devices that are afound
2512 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2516 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2518 leaf = path->nodes[0];
2519 slot = path->slots[0];
2520 if (slot >= btrfs_header_nritems(leaf)) {
2521 ret = btrfs_next_leaf(root, path);
2528 btrfs_item_key_to_cpu(leaf, &found_key, slot);
2529 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
2530 if (found_key.objectid != BTRFS_DEV_ITEMS_OBJECTID)
2532 if (found_key.type == BTRFS_DEV_ITEM_KEY) {
2533 struct btrfs_dev_item *dev_item;
2534 dev_item = btrfs_item_ptr(leaf, slot,
2535 struct btrfs_dev_item);
2536 ret = read_one_dev(root, leaf, dev_item);
2539 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
2540 struct btrfs_chunk *chunk;
2541 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
2542 ret = read_one_chunk(root, &found_key, leaf, chunk);
2546 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
2548 btrfs_release_path(root, path);
2552 btrfs_free_path(path);