2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
18 #include <linux/sched.h>
19 #include <linux/bio.h>
20 #include <linux/buffer_head.h>
21 #include <linux/blkdev.h>
22 #include <linux/random.h>
23 #include <asm/div64.h>
25 #include "extent_map.h"
27 #include "transaction.h"
28 #include "print-tree.h"
39 struct btrfs_bio_stripe stripes[];
42 #define map_lookup_size(n) (sizeof(struct map_lookup) + \
43 (sizeof(struct btrfs_bio_stripe) * (n)))
45 static DEFINE_MUTEX(uuid_mutex);
46 static LIST_HEAD(fs_uuids);
48 void btrfs_lock_volumes(void)
50 mutex_lock(&uuid_mutex);
53 void btrfs_unlock_volumes(void)
55 mutex_unlock(&uuid_mutex);
58 int btrfs_cleanup_fs_uuids(void)
60 struct btrfs_fs_devices *fs_devices;
61 struct list_head *uuid_cur;
62 struct list_head *devices_cur;
63 struct btrfs_device *dev;
65 list_for_each(uuid_cur, &fs_uuids) {
66 fs_devices = list_entry(uuid_cur, struct btrfs_fs_devices,
68 while(!list_empty(&fs_devices->devices)) {
69 devices_cur = fs_devices->devices.next;
70 dev = list_entry(devices_cur, struct btrfs_device,
73 close_bdev_excl(dev->bdev);
75 list_del(&dev->dev_list);
83 static struct btrfs_device *__find_device(struct list_head *head, u64 devid,
86 struct btrfs_device *dev;
87 struct list_head *cur;
89 list_for_each(cur, head) {
90 dev = list_entry(cur, struct btrfs_device, dev_list);
91 if (dev->devid == devid &&
92 (!uuid || !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE))) {
99 static struct btrfs_fs_devices *find_fsid(u8 *fsid)
101 struct list_head *cur;
102 struct btrfs_fs_devices *fs_devices;
104 list_for_each(cur, &fs_uuids) {
105 fs_devices = list_entry(cur, struct btrfs_fs_devices, list);
106 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
112 static int device_list_add(const char *path,
113 struct btrfs_super_block *disk_super,
114 u64 devid, struct btrfs_fs_devices **fs_devices_ret)
116 struct btrfs_device *device;
117 struct btrfs_fs_devices *fs_devices;
118 u64 found_transid = btrfs_super_generation(disk_super);
120 fs_devices = find_fsid(disk_super->fsid);
122 fs_devices = kmalloc(sizeof(*fs_devices), GFP_NOFS);
125 INIT_LIST_HEAD(&fs_devices->devices);
126 INIT_LIST_HEAD(&fs_devices->alloc_list);
127 list_add(&fs_devices->list, &fs_uuids);
128 memcpy(fs_devices->fsid, disk_super->fsid, BTRFS_FSID_SIZE);
129 fs_devices->latest_devid = devid;
130 fs_devices->latest_trans = found_transid;
131 fs_devices->num_devices = 0;
134 device = __find_device(&fs_devices->devices, devid,
135 disk_super->dev_item.uuid);
138 device = kzalloc(sizeof(*device), GFP_NOFS);
140 /* we can safely leave the fs_devices entry around */
143 device->devid = devid;
144 memcpy(device->uuid, disk_super->dev_item.uuid,
146 device->barriers = 1;
147 spin_lock_init(&device->io_lock);
148 device->name = kstrdup(path, GFP_NOFS);
153 list_add(&device->dev_list, &fs_devices->devices);
154 list_add(&device->dev_alloc_list, &fs_devices->alloc_list);
155 fs_devices->num_devices++;
158 if (found_transid > fs_devices->latest_trans) {
159 fs_devices->latest_devid = devid;
160 fs_devices->latest_trans = found_transid;
162 *fs_devices_ret = fs_devices;
166 int btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices)
168 struct list_head *head = &fs_devices->devices;
169 struct list_head *cur;
170 struct btrfs_device *device;
172 mutex_lock(&uuid_mutex);
174 list_for_each(cur, head) {
175 device = list_entry(cur, struct btrfs_device, dev_list);
176 if (!device->in_fs_metadata) {
177 printk("getting rid of extra dev %s\n", device->name);
179 close_bdev_excl(device->bdev);
180 list_del(&device->dev_list);
181 list_del(&device->dev_alloc_list);
182 fs_devices->num_devices--;
188 mutex_unlock(&uuid_mutex);
191 int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
193 struct list_head *head = &fs_devices->devices;
194 struct list_head *cur;
195 struct btrfs_device *device;
197 mutex_lock(&uuid_mutex);
198 list_for_each(cur, head) {
199 device = list_entry(cur, struct btrfs_device, dev_list);
201 close_bdev_excl(device->bdev);
204 device->in_fs_metadata = 0;
206 mutex_unlock(&uuid_mutex);
210 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
211 int flags, void *holder)
213 struct block_device *bdev;
214 struct list_head *head = &fs_devices->devices;
215 struct list_head *cur;
216 struct btrfs_device *device;
219 mutex_lock(&uuid_mutex);
220 list_for_each(cur, head) {
221 device = list_entry(cur, struct btrfs_device, dev_list);
228 bdev = open_bdev_excl(device->name, flags, holder);
231 printk("open %s failed\n", device->name);
235 set_blocksize(bdev, 4096);
236 if (device->devid == fs_devices->latest_devid)
237 fs_devices->latest_bdev = bdev;
239 device->in_fs_metadata = 0;
242 mutex_unlock(&uuid_mutex);
245 mutex_unlock(&uuid_mutex);
246 btrfs_close_devices(fs_devices);
250 int btrfs_scan_one_device(const char *path, int flags, void *holder,
251 struct btrfs_fs_devices **fs_devices_ret)
253 struct btrfs_super_block *disk_super;
254 struct block_device *bdev;
255 struct buffer_head *bh;
260 mutex_lock(&uuid_mutex);
262 bdev = open_bdev_excl(path, flags, holder);
269 ret = set_blocksize(bdev, 4096);
272 bh = __bread(bdev, BTRFS_SUPER_INFO_OFFSET / 4096, 4096);
277 disk_super = (struct btrfs_super_block *)bh->b_data;
278 if (strncmp((char *)(&disk_super->magic), BTRFS_MAGIC,
279 sizeof(disk_super->magic))) {
283 devid = le64_to_cpu(disk_super->dev_item.devid);
284 transid = btrfs_super_generation(disk_super);
285 if (disk_super->label[0])
286 printk("device label %s ", disk_super->label);
288 /* FIXME, make a readl uuid parser */
289 printk("device fsid %llx-%llx ",
290 *(unsigned long long *)disk_super->fsid,
291 *(unsigned long long *)(disk_super->fsid + 8));
293 printk("devid %Lu transid %Lu %s\n", devid, transid, path);
294 ret = device_list_add(path, disk_super, devid, fs_devices_ret);
299 close_bdev_excl(bdev);
301 mutex_unlock(&uuid_mutex);
306 * this uses a pretty simple search, the expectation is that it is
307 * called very infrequently and that a given device has a small number
310 static int find_free_dev_extent(struct btrfs_trans_handle *trans,
311 struct btrfs_device *device,
312 struct btrfs_path *path,
313 u64 num_bytes, u64 *start)
315 struct btrfs_key key;
316 struct btrfs_root *root = device->dev_root;
317 struct btrfs_dev_extent *dev_extent = NULL;
320 u64 search_start = 0;
321 u64 search_end = device->total_bytes;
325 struct extent_buffer *l;
330 /* FIXME use last free of some kind */
332 /* we don't want to overwrite the superblock on the drive,
333 * so we make sure to start at an offset of at least 1MB
335 search_start = max((u64)1024 * 1024, search_start);
337 if (root->fs_info->alloc_start + num_bytes <= device->total_bytes)
338 search_start = max(root->fs_info->alloc_start, search_start);
340 key.objectid = device->devid;
341 key.offset = search_start;
342 key.type = BTRFS_DEV_EXTENT_KEY;
343 ret = btrfs_search_slot(trans, root, &key, path, 0, 0);
346 ret = btrfs_previous_item(root, path, 0, key.type);
350 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
353 slot = path->slots[0];
354 if (slot >= btrfs_header_nritems(l)) {
355 ret = btrfs_next_leaf(root, path);
362 if (search_start >= search_end) {
366 *start = search_start;
370 *start = last_byte > search_start ?
371 last_byte : search_start;
372 if (search_end <= *start) {
378 btrfs_item_key_to_cpu(l, &key, slot);
380 if (key.objectid < device->devid)
383 if (key.objectid > device->devid)
386 if (key.offset >= search_start && key.offset > last_byte &&
388 if (last_byte < search_start)
389 last_byte = search_start;
390 hole_size = key.offset - last_byte;
391 if (key.offset > last_byte &&
392 hole_size >= num_bytes) {
397 if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY) {
402 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
403 last_byte = key.offset + btrfs_dev_extent_length(l, dev_extent);
409 /* we have to make sure we didn't find an extent that has already
410 * been allocated by the map tree or the original allocation
412 btrfs_release_path(root, path);
413 BUG_ON(*start < search_start);
415 if (*start + num_bytes > search_end) {
419 /* check for pending inserts here */
423 btrfs_release_path(root, path);
427 int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
428 struct btrfs_device *device,
432 struct btrfs_path *path;
433 struct btrfs_root *root = device->dev_root;
434 struct btrfs_key key;
435 struct btrfs_key found_key;
436 struct extent_buffer *leaf = NULL;
437 struct btrfs_dev_extent *extent = NULL;
439 path = btrfs_alloc_path();
443 key.objectid = device->devid;
445 key.type = BTRFS_DEV_EXTENT_KEY;
447 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
449 ret = btrfs_previous_item(root, path, key.objectid,
450 BTRFS_DEV_EXTENT_KEY);
452 leaf = path->nodes[0];
453 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
454 extent = btrfs_item_ptr(leaf, path->slots[0],
455 struct btrfs_dev_extent);
456 BUG_ON(found_key.offset > start || found_key.offset +
457 btrfs_dev_extent_length(leaf, extent) < start);
459 } else if (ret == 0) {
460 leaf = path->nodes[0];
461 extent = btrfs_item_ptr(leaf, path->slots[0],
462 struct btrfs_dev_extent);
466 if (device->bytes_used > 0)
467 device->bytes_used -= btrfs_dev_extent_length(leaf, extent);
468 ret = btrfs_del_item(trans, root, path);
471 btrfs_free_path(path);
475 int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
476 struct btrfs_device *device,
477 u64 chunk_tree, u64 chunk_objectid,
479 u64 num_bytes, u64 *start)
482 struct btrfs_path *path;
483 struct btrfs_root *root = device->dev_root;
484 struct btrfs_dev_extent *extent;
485 struct extent_buffer *leaf;
486 struct btrfs_key key;
488 WARN_ON(!device->in_fs_metadata);
489 path = btrfs_alloc_path();
493 ret = find_free_dev_extent(trans, device, path, num_bytes, start);
498 key.objectid = device->devid;
500 key.type = BTRFS_DEV_EXTENT_KEY;
501 ret = btrfs_insert_empty_item(trans, root, path, &key,
505 leaf = path->nodes[0];
506 extent = btrfs_item_ptr(leaf, path->slots[0],
507 struct btrfs_dev_extent);
508 btrfs_set_dev_extent_chunk_tree(leaf, extent, chunk_tree);
509 btrfs_set_dev_extent_chunk_objectid(leaf, extent, chunk_objectid);
510 btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
512 write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid,
513 (unsigned long)btrfs_dev_extent_chunk_tree_uuid(extent),
516 btrfs_set_dev_extent_length(leaf, extent, num_bytes);
517 btrfs_mark_buffer_dirty(leaf);
519 btrfs_free_path(path);
523 static int find_next_chunk(struct btrfs_root *root, u64 objectid, u64 *offset)
525 struct btrfs_path *path;
527 struct btrfs_key key;
528 struct btrfs_chunk *chunk;
529 struct btrfs_key found_key;
531 path = btrfs_alloc_path();
534 key.objectid = objectid;
535 key.offset = (u64)-1;
536 key.type = BTRFS_CHUNK_ITEM_KEY;
538 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
544 ret = btrfs_previous_item(root, path, 0, BTRFS_CHUNK_ITEM_KEY);
548 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
550 if (found_key.objectid != objectid)
553 chunk = btrfs_item_ptr(path->nodes[0], path->slots[0],
555 *offset = found_key.offset +
556 btrfs_chunk_length(path->nodes[0], chunk);
561 btrfs_free_path(path);
565 static int find_next_devid(struct btrfs_root *root, struct btrfs_path *path,
569 struct btrfs_key key;
570 struct btrfs_key found_key;
572 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
573 key.type = BTRFS_DEV_ITEM_KEY;
574 key.offset = (u64)-1;
576 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
582 ret = btrfs_previous_item(root, path, BTRFS_DEV_ITEMS_OBJECTID,
587 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
589 *objectid = found_key.offset + 1;
593 btrfs_release_path(root, path);
598 * the device information is stored in the chunk root
599 * the btrfs_device struct should be fully filled in
601 int btrfs_add_device(struct btrfs_trans_handle *trans,
602 struct btrfs_root *root,
603 struct btrfs_device *device)
606 struct btrfs_path *path;
607 struct btrfs_dev_item *dev_item;
608 struct extent_buffer *leaf;
609 struct btrfs_key key;
613 root = root->fs_info->chunk_root;
615 path = btrfs_alloc_path();
619 ret = find_next_devid(root, path, &free_devid);
623 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
624 key.type = BTRFS_DEV_ITEM_KEY;
625 key.offset = free_devid;
627 ret = btrfs_insert_empty_item(trans, root, path, &key,
632 leaf = path->nodes[0];
633 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
635 device->devid = free_devid;
636 btrfs_set_device_id(leaf, dev_item, device->devid);
637 btrfs_set_device_type(leaf, dev_item, device->type);
638 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
639 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
640 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
641 btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
642 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
643 btrfs_set_device_group(leaf, dev_item, 0);
644 btrfs_set_device_seek_speed(leaf, dev_item, 0);
645 btrfs_set_device_bandwidth(leaf, dev_item, 0);
647 ptr = (unsigned long)btrfs_device_uuid(dev_item);
648 write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
649 btrfs_mark_buffer_dirty(leaf);
653 btrfs_free_path(path);
657 static int btrfs_rm_dev_item(struct btrfs_root *root,
658 struct btrfs_device *device)
661 struct btrfs_path *path;
662 struct block_device *bdev = device->bdev;
663 struct btrfs_device *next_dev;
664 struct btrfs_key key;
666 struct btrfs_fs_devices *fs_devices;
667 struct btrfs_trans_handle *trans;
669 root = root->fs_info->chunk_root;
671 path = btrfs_alloc_path();
675 trans = btrfs_start_transaction(root, 1);
676 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
677 key.type = BTRFS_DEV_ITEM_KEY;
678 key.offset = device->devid;
680 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
689 ret = btrfs_del_item(trans, root, path);
694 * at this point, the device is zero sized. We want to
695 * remove it from the devices list and zero out the old super
697 list_del_init(&device->dev_list);
698 list_del_init(&device->dev_alloc_list);
699 fs_devices = root->fs_info->fs_devices;
701 next_dev = list_entry(fs_devices->devices.next, struct btrfs_device,
703 if (bdev == root->fs_info->sb->s_bdev)
704 root->fs_info->sb->s_bdev = next_dev->bdev;
705 if (bdev == fs_devices->latest_bdev)
706 fs_devices->latest_bdev = next_dev->bdev;
708 total_bytes = btrfs_super_total_bytes(&root->fs_info->super_copy);
709 btrfs_set_super_total_bytes(&root->fs_info->super_copy,
710 total_bytes - device->total_bytes);
712 total_bytes = btrfs_super_num_devices(&root->fs_info->super_copy);
713 btrfs_set_super_num_devices(&root->fs_info->super_copy,
716 btrfs_free_path(path);
717 btrfs_commit_transaction(trans, root);
721 int btrfs_rm_device(struct btrfs_root *root, char *device_path)
723 struct btrfs_device *device;
724 struct block_device *bdev;
725 struct buffer_head *bh = NULL;
726 struct btrfs_super_block *disk_super;
731 mutex_lock(&root->fs_info->fs_mutex);
732 mutex_lock(&uuid_mutex);
734 all_avail = root->fs_info->avail_data_alloc_bits |
735 root->fs_info->avail_system_alloc_bits |
736 root->fs_info->avail_metadata_alloc_bits;
738 if ((all_avail & BTRFS_BLOCK_GROUP_RAID10) &&
739 btrfs_super_num_devices(&root->fs_info->super_copy) <= 4) {
740 printk("btrfs: unable to go below four devices on raid10\n");
745 if ((all_avail & BTRFS_BLOCK_GROUP_RAID1) &&
746 btrfs_super_num_devices(&root->fs_info->super_copy) <= 2) {
747 printk("btrfs: unable to go below two devices on raid1\n");
752 if (strcmp(device_path, "missing") == 0) {
753 struct list_head *cur;
754 struct list_head *devices;
755 struct btrfs_device *tmp;
758 devices = &root->fs_info->fs_devices->devices;
759 list_for_each(cur, devices) {
760 tmp = list_entry(cur, struct btrfs_device, dev_list);
761 if (tmp->in_fs_metadata && !tmp->bdev) {
770 printk("btrfs: no missing devices found to remove\n");
775 bdev = open_bdev_excl(device_path, 0,
776 root->fs_info->bdev_holder);
782 bh = __bread(bdev, BTRFS_SUPER_INFO_OFFSET / 4096, 4096);
787 disk_super = (struct btrfs_super_block *)bh->b_data;
788 if (strncmp((char *)(&disk_super->magic), BTRFS_MAGIC,
789 sizeof(disk_super->magic))) {
793 if (memcmp(disk_super->fsid, root->fs_info->fsid,
798 devid = le64_to_cpu(disk_super->dev_item.devid);
799 device = btrfs_find_device(root, devid, NULL);
806 root->fs_info->fs_devices->num_devices--;
808 ret = btrfs_shrink_device(device, 0);
813 ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device);
818 /* make sure this device isn't detected as part of
821 memset(&disk_super->magic, 0, sizeof(disk_super->magic));
822 set_buffer_dirty(bh);
823 sync_dirty_buffer(bh);
829 /* one close for the device struct or super_block */
830 close_bdev_excl(device->bdev);
833 /* one close for us */
834 close_bdev_excl(bdev);
845 close_bdev_excl(bdev);
847 mutex_unlock(&uuid_mutex);
848 mutex_unlock(&root->fs_info->fs_mutex);
852 int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
854 struct btrfs_trans_handle *trans;
855 struct btrfs_device *device;
856 struct block_device *bdev;
857 struct list_head *cur;
858 struct list_head *devices;
863 bdev = open_bdev_excl(device_path, 0, root->fs_info->bdev_holder);
867 mutex_lock(&root->fs_info->fs_mutex);
868 trans = btrfs_start_transaction(root, 1);
869 devices = &root->fs_info->fs_devices->devices;
870 list_for_each(cur, devices) {
871 device = list_entry(cur, struct btrfs_device, dev_list);
872 if (device->bdev == bdev) {
878 device = kzalloc(sizeof(*device), GFP_NOFS);
880 /* we can safely leave the fs_devices entry around */
885 device->barriers = 1;
886 generate_random_uuid(device->uuid);
887 spin_lock_init(&device->io_lock);
888 device->name = kstrdup(device_path, GFP_NOFS);
893 device->io_width = root->sectorsize;
894 device->io_align = root->sectorsize;
895 device->sector_size = root->sectorsize;
896 device->total_bytes = i_size_read(bdev->bd_inode);
897 device->dev_root = root->fs_info->dev_root;
899 device->in_fs_metadata = 1;
901 ret = btrfs_add_device(trans, root, device);
905 total_bytes = btrfs_super_total_bytes(&root->fs_info->super_copy);
906 btrfs_set_super_total_bytes(&root->fs_info->super_copy,
907 total_bytes + device->total_bytes);
909 total_bytes = btrfs_super_num_devices(&root->fs_info->super_copy);
910 btrfs_set_super_num_devices(&root->fs_info->super_copy,
913 list_add(&device->dev_list, &root->fs_info->fs_devices->devices);
914 list_add(&device->dev_alloc_list,
915 &root->fs_info->fs_devices->alloc_list);
916 root->fs_info->fs_devices->num_devices++;
918 btrfs_end_transaction(trans, root);
919 mutex_unlock(&root->fs_info->fs_mutex);
923 close_bdev_excl(bdev);
927 int btrfs_update_device(struct btrfs_trans_handle *trans,
928 struct btrfs_device *device)
931 struct btrfs_path *path;
932 struct btrfs_root *root;
933 struct btrfs_dev_item *dev_item;
934 struct extent_buffer *leaf;
935 struct btrfs_key key;
937 root = device->dev_root->fs_info->chunk_root;
939 path = btrfs_alloc_path();
943 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
944 key.type = BTRFS_DEV_ITEM_KEY;
945 key.offset = device->devid;
947 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
956 leaf = path->nodes[0];
957 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
959 btrfs_set_device_id(leaf, dev_item, device->devid);
960 btrfs_set_device_type(leaf, dev_item, device->type);
961 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
962 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
963 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
964 btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
965 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
966 btrfs_mark_buffer_dirty(leaf);
969 btrfs_free_path(path);
973 int btrfs_grow_device(struct btrfs_trans_handle *trans,
974 struct btrfs_device *device, u64 new_size)
976 struct btrfs_super_block *super_copy =
977 &device->dev_root->fs_info->super_copy;
978 u64 old_total = btrfs_super_total_bytes(super_copy);
979 u64 diff = new_size - device->total_bytes;
981 btrfs_set_super_total_bytes(super_copy, old_total + diff);
982 return btrfs_update_device(trans, device);
985 static int btrfs_free_chunk(struct btrfs_trans_handle *trans,
986 struct btrfs_root *root,
987 u64 chunk_tree, u64 chunk_objectid,
991 struct btrfs_path *path;
992 struct btrfs_key key;
994 root = root->fs_info->chunk_root;
995 path = btrfs_alloc_path();
999 key.objectid = chunk_objectid;
1000 key.offset = chunk_offset;
1001 key.type = BTRFS_CHUNK_ITEM_KEY;
1003 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1006 ret = btrfs_del_item(trans, root, path);
1009 btrfs_free_path(path);
1013 int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64
1016 struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
1017 struct btrfs_disk_key *disk_key;
1018 struct btrfs_chunk *chunk;
1025 struct btrfs_key key;
1027 array_size = btrfs_super_sys_array_size(super_copy);
1029 ptr = super_copy->sys_chunk_array;
1032 while (cur < array_size) {
1033 disk_key = (struct btrfs_disk_key *)ptr;
1034 btrfs_disk_key_to_cpu(&key, disk_key);
1036 len = sizeof(*disk_key);
1038 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
1039 chunk = (struct btrfs_chunk *)(ptr + len);
1040 num_stripes = btrfs_stack_chunk_num_stripes(chunk);
1041 len += btrfs_chunk_item_size(num_stripes);
1046 if (key.objectid == chunk_objectid &&
1047 key.offset == chunk_offset) {
1048 memmove(ptr, ptr + len, array_size - (cur + len));
1050 btrfs_set_super_sys_array_size(super_copy, array_size);
1060 int btrfs_relocate_chunk(struct btrfs_root *root,
1061 u64 chunk_tree, u64 chunk_objectid,
1064 struct extent_map_tree *em_tree;
1065 struct btrfs_root *extent_root;
1066 struct btrfs_trans_handle *trans;
1067 struct extent_map *em;
1068 struct map_lookup *map;
1072 printk("btrfs relocating chunk %llu\n",
1073 (unsigned long long)chunk_offset);
1074 root = root->fs_info->chunk_root;
1075 extent_root = root->fs_info->extent_root;
1076 em_tree = &root->fs_info->mapping_tree.map_tree;
1078 /* step one, relocate all the extents inside this chunk */
1079 ret = btrfs_shrink_extent_tree(extent_root, chunk_offset);
1082 trans = btrfs_start_transaction(root, 1);
1086 * step two, delete the device extents and the
1087 * chunk tree entries
1089 spin_lock(&em_tree->lock);
1090 em = lookup_extent_mapping(em_tree, chunk_offset, 1);
1091 spin_unlock(&em_tree->lock);
1093 BUG_ON(em->start > chunk_offset ||
1094 em->start + em->len < chunk_offset);
1095 map = (struct map_lookup *)em->bdev;
1097 for (i = 0; i < map->num_stripes; i++) {
1098 ret = btrfs_free_dev_extent(trans, map->stripes[i].dev,
1099 map->stripes[i].physical);
1102 if (map->stripes[i].dev) {
1103 ret = btrfs_update_device(trans, map->stripes[i].dev);
1107 ret = btrfs_free_chunk(trans, root, chunk_tree, chunk_objectid,
1112 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
1113 ret = btrfs_del_sys_chunk(root, chunk_objectid, chunk_offset);
1117 spin_lock(&em_tree->lock);
1118 remove_extent_mapping(em_tree, em);
1122 /* once for the tree */
1123 free_extent_map(em);
1124 spin_unlock(&em_tree->lock);
1127 free_extent_map(em);
1129 btrfs_end_transaction(trans, root);
1133 static u64 div_factor(u64 num, int factor)
1143 int btrfs_balance(struct btrfs_root *dev_root)
1146 struct list_head *cur;
1147 struct list_head *devices = &dev_root->fs_info->fs_devices->devices;
1148 struct btrfs_device *device;
1151 struct btrfs_path *path;
1152 struct btrfs_key key;
1153 struct btrfs_chunk *chunk;
1154 struct btrfs_root *chunk_root = dev_root->fs_info->chunk_root;
1155 struct btrfs_trans_handle *trans;
1156 struct btrfs_key found_key;
1159 dev_root = dev_root->fs_info->dev_root;
1161 mutex_lock(&dev_root->fs_info->fs_mutex);
1162 /* step one make some room on all the devices */
1163 list_for_each(cur, devices) {
1164 device = list_entry(cur, struct btrfs_device, dev_list);
1165 old_size = device->total_bytes;
1166 size_to_free = div_factor(old_size, 1);
1167 size_to_free = min(size_to_free, (u64)1 * 1024 * 1024);
1168 if (device->total_bytes - device->bytes_used > size_to_free)
1171 ret = btrfs_shrink_device(device, old_size - size_to_free);
1174 trans = btrfs_start_transaction(dev_root, 1);
1177 ret = btrfs_grow_device(trans, device, old_size);
1180 btrfs_end_transaction(trans, dev_root);
1183 /* step two, relocate all the chunks */
1184 path = btrfs_alloc_path();
1187 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
1188 key.offset = (u64)-1;
1189 key.type = BTRFS_CHUNK_ITEM_KEY;
1192 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
1197 * this shouldn't happen, it means the last relocate
1203 ret = btrfs_previous_item(chunk_root, path, 0,
1204 BTRFS_CHUNK_ITEM_KEY);
1208 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1210 if (found_key.objectid != key.objectid)
1212 chunk = btrfs_item_ptr(path->nodes[0],
1214 struct btrfs_chunk);
1215 key.offset = found_key.offset;
1216 /* chunk zero is special */
1217 if (key.offset == 0)
1220 ret = btrfs_relocate_chunk(chunk_root,
1221 chunk_root->root_key.objectid,
1225 btrfs_release_path(chunk_root, path);
1229 btrfs_free_path(path);
1230 mutex_unlock(&dev_root->fs_info->fs_mutex);
1235 * shrinking a device means finding all of the device extents past
1236 * the new size, and then following the back refs to the chunks.
1237 * The chunk relocation code actually frees the device extent
1239 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
1241 struct btrfs_trans_handle *trans;
1242 struct btrfs_root *root = device->dev_root;
1243 struct btrfs_dev_extent *dev_extent = NULL;
1244 struct btrfs_path *path;
1251 struct extent_buffer *l;
1252 struct btrfs_key key;
1253 struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
1254 u64 old_total = btrfs_super_total_bytes(super_copy);
1255 u64 diff = device->total_bytes - new_size;
1258 path = btrfs_alloc_path();
1262 trans = btrfs_start_transaction(root, 1);
1270 device->total_bytes = new_size;
1271 ret = btrfs_update_device(trans, device);
1273 btrfs_end_transaction(trans, root);
1276 WARN_ON(diff > old_total);
1277 btrfs_set_super_total_bytes(super_copy, old_total - diff);
1278 btrfs_end_transaction(trans, root);
1280 key.objectid = device->devid;
1281 key.offset = (u64)-1;
1282 key.type = BTRFS_DEV_EXTENT_KEY;
1285 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1289 ret = btrfs_previous_item(root, path, 0, key.type);
1298 slot = path->slots[0];
1299 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
1301 if (key.objectid != device->devid)
1304 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
1305 length = btrfs_dev_extent_length(l, dev_extent);
1307 if (key.offset + length <= new_size)
1310 chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
1311 chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
1312 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
1313 btrfs_release_path(root, path);
1315 ret = btrfs_relocate_chunk(root, chunk_tree, chunk_objectid,
1322 btrfs_free_path(path);
1326 int btrfs_add_system_chunk(struct btrfs_trans_handle *trans,
1327 struct btrfs_root *root,
1328 struct btrfs_key *key,
1329 struct btrfs_chunk *chunk, int item_size)
1331 struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
1332 struct btrfs_disk_key disk_key;
1336 array_size = btrfs_super_sys_array_size(super_copy);
1337 if (array_size + item_size > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE)
1340 ptr = super_copy->sys_chunk_array + array_size;
1341 btrfs_cpu_key_to_disk(&disk_key, key);
1342 memcpy(ptr, &disk_key, sizeof(disk_key));
1343 ptr += sizeof(disk_key);
1344 memcpy(ptr, chunk, item_size);
1345 item_size += sizeof(disk_key);
1346 btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
1350 static u64 chunk_bytes_by_type(u64 type, u64 calc_size, int num_stripes,
1353 if (type & (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_DUP))
1355 else if (type & BTRFS_BLOCK_GROUP_RAID10)
1356 return calc_size * (num_stripes / sub_stripes);
1358 return calc_size * num_stripes;
1362 int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
1363 struct btrfs_root *extent_root, u64 *start,
1364 u64 *num_bytes, u64 type)
1367 struct btrfs_fs_info *info = extent_root->fs_info;
1368 struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
1369 struct btrfs_path *path;
1370 struct btrfs_stripe *stripes;
1371 struct btrfs_device *device = NULL;
1372 struct btrfs_chunk *chunk;
1373 struct list_head private_devs;
1374 struct list_head *dev_list;
1375 struct list_head *cur;
1376 struct extent_map_tree *em_tree;
1377 struct map_lookup *map;
1378 struct extent_map *em;
1379 int min_stripe_size = 1 * 1024 * 1024;
1381 u64 calc_size = 1024 * 1024 * 1024;
1382 u64 max_chunk_size = calc_size;
1387 int num_stripes = 1;
1388 int min_stripes = 1;
1389 int sub_stripes = 0;
1393 int stripe_len = 64 * 1024;
1394 struct btrfs_key key;
1396 if ((type & BTRFS_BLOCK_GROUP_RAID1) &&
1397 (type & BTRFS_BLOCK_GROUP_DUP)) {
1399 type &= ~BTRFS_BLOCK_GROUP_DUP;
1401 dev_list = &extent_root->fs_info->fs_devices->alloc_list;
1402 if (list_empty(dev_list))
1405 if (type & (BTRFS_BLOCK_GROUP_RAID0)) {
1406 num_stripes = btrfs_super_num_devices(&info->super_copy);
1409 if (type & (BTRFS_BLOCK_GROUP_DUP)) {
1413 if (type & (BTRFS_BLOCK_GROUP_RAID1)) {
1414 num_stripes = min_t(u64, 2,
1415 btrfs_super_num_devices(&info->super_copy));
1416 if (num_stripes < 2)
1420 if (type & (BTRFS_BLOCK_GROUP_RAID10)) {
1421 num_stripes = btrfs_super_num_devices(&info->super_copy);
1422 if (num_stripes < 4)
1424 num_stripes &= ~(u32)1;
1429 if (type & BTRFS_BLOCK_GROUP_DATA) {
1430 max_chunk_size = 10 * calc_size;
1431 min_stripe_size = 64 * 1024 * 1024;
1432 } else if (type & BTRFS_BLOCK_GROUP_METADATA) {
1433 max_chunk_size = 4 * calc_size;
1434 min_stripe_size = 32 * 1024 * 1024;
1435 } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
1436 calc_size = 8 * 1024 * 1024;
1437 max_chunk_size = calc_size * 2;
1438 min_stripe_size = 1 * 1024 * 1024;
1441 path = btrfs_alloc_path();
1445 /* we don't want a chunk larger than 10% of the FS */
1446 percent_max = div_factor(btrfs_super_total_bytes(&info->super_copy), 1);
1447 max_chunk_size = min(percent_max, max_chunk_size);
1450 if (calc_size * num_stripes > max_chunk_size) {
1451 calc_size = max_chunk_size;
1452 do_div(calc_size, num_stripes);
1453 do_div(calc_size, stripe_len);
1454 calc_size *= stripe_len;
1456 /* we don't want tiny stripes */
1457 calc_size = max_t(u64, min_stripe_size, calc_size);
1459 do_div(calc_size, stripe_len);
1460 calc_size *= stripe_len;
1462 INIT_LIST_HEAD(&private_devs);
1463 cur = dev_list->next;
1466 if (type & BTRFS_BLOCK_GROUP_DUP)
1467 min_free = calc_size * 2;
1469 min_free = calc_size;
1471 /* we add 1MB because we never use the first 1MB of the device */
1472 min_free += 1024 * 1024;
1474 /* build a private list of devices we will allocate from */
1475 while(index < num_stripes) {
1476 device = list_entry(cur, struct btrfs_device, dev_alloc_list);
1478 if (device->total_bytes > device->bytes_used)
1479 avail = device->total_bytes - device->bytes_used;
1484 if (device->in_fs_metadata && avail >= min_free) {
1485 u64 ignored_start = 0;
1486 ret = find_free_dev_extent(trans, device, path,
1490 list_move_tail(&device->dev_alloc_list,
1493 if (type & BTRFS_BLOCK_GROUP_DUP)
1496 } else if (device->in_fs_metadata && avail > max_avail)
1498 if (cur == dev_list)
1501 if (index < num_stripes) {
1502 list_splice(&private_devs, dev_list);
1503 if (index >= min_stripes) {
1504 num_stripes = index;
1505 if (type & (BTRFS_BLOCK_GROUP_RAID10)) {
1506 num_stripes /= sub_stripes;
1507 num_stripes *= sub_stripes;
1512 if (!looped && max_avail > 0) {
1514 calc_size = max_avail;
1517 btrfs_free_path(path);
1520 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
1521 key.type = BTRFS_CHUNK_ITEM_KEY;
1522 ret = find_next_chunk(chunk_root, BTRFS_FIRST_CHUNK_TREE_OBJECTID,
1525 btrfs_free_path(path);
1529 chunk = kmalloc(btrfs_chunk_item_size(num_stripes), GFP_NOFS);
1531 btrfs_free_path(path);
1535 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
1538 btrfs_free_path(path);
1541 btrfs_free_path(path);
1544 stripes = &chunk->stripe;
1545 *num_bytes = chunk_bytes_by_type(type, calc_size,
1546 num_stripes, sub_stripes);
1549 while(index < num_stripes) {
1550 struct btrfs_stripe *stripe;
1551 BUG_ON(list_empty(&private_devs));
1552 cur = private_devs.next;
1553 device = list_entry(cur, struct btrfs_device, dev_alloc_list);
1555 /* loop over this device again if we're doing a dup group */
1556 if (!(type & BTRFS_BLOCK_GROUP_DUP) ||
1557 (index == num_stripes - 1))
1558 list_move_tail(&device->dev_alloc_list, dev_list);
1560 ret = btrfs_alloc_dev_extent(trans, device,
1561 info->chunk_root->root_key.objectid,
1562 BTRFS_FIRST_CHUNK_TREE_OBJECTID, key.offset,
1563 calc_size, &dev_offset);
1565 device->bytes_used += calc_size;
1566 ret = btrfs_update_device(trans, device);
1569 map->stripes[index].dev = device;
1570 map->stripes[index].physical = dev_offset;
1571 stripe = stripes + index;
1572 btrfs_set_stack_stripe_devid(stripe, device->devid);
1573 btrfs_set_stack_stripe_offset(stripe, dev_offset);
1574 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
1575 physical = dev_offset;
1578 BUG_ON(!list_empty(&private_devs));
1580 /* key was set above */
1581 btrfs_set_stack_chunk_length(chunk, *num_bytes);
1582 btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
1583 btrfs_set_stack_chunk_stripe_len(chunk, stripe_len);
1584 btrfs_set_stack_chunk_type(chunk, type);
1585 btrfs_set_stack_chunk_num_stripes(chunk, num_stripes);
1586 btrfs_set_stack_chunk_io_align(chunk, stripe_len);
1587 btrfs_set_stack_chunk_io_width(chunk, stripe_len);
1588 btrfs_set_stack_chunk_sector_size(chunk, extent_root->sectorsize);
1589 btrfs_set_stack_chunk_sub_stripes(chunk, sub_stripes);
1590 map->sector_size = extent_root->sectorsize;
1591 map->stripe_len = stripe_len;
1592 map->io_align = stripe_len;
1593 map->io_width = stripe_len;
1595 map->num_stripes = num_stripes;
1596 map->sub_stripes = sub_stripes;
1598 ret = btrfs_insert_item(trans, chunk_root, &key, chunk,
1599 btrfs_chunk_item_size(num_stripes));
1601 *start = key.offset;;
1603 em = alloc_extent_map(GFP_NOFS);
1606 em->bdev = (struct block_device *)map;
1607 em->start = key.offset;
1608 em->len = *num_bytes;
1609 em->block_start = 0;
1611 if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
1612 ret = btrfs_add_system_chunk(trans, chunk_root, &key,
1613 chunk, btrfs_chunk_item_size(num_stripes));
1618 em_tree = &extent_root->fs_info->mapping_tree.map_tree;
1619 spin_lock(&em_tree->lock);
1620 ret = add_extent_mapping(em_tree, em);
1621 spin_unlock(&em_tree->lock);
1623 free_extent_map(em);
1627 void btrfs_mapping_init(struct btrfs_mapping_tree *tree)
1629 extent_map_tree_init(&tree->map_tree, GFP_NOFS);
1632 void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
1634 struct extent_map *em;
1637 spin_lock(&tree->map_tree.lock);
1638 em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1);
1640 remove_extent_mapping(&tree->map_tree, em);
1641 spin_unlock(&tree->map_tree.lock);
1646 free_extent_map(em);
1647 /* once for the tree */
1648 free_extent_map(em);
1652 int btrfs_num_copies(struct btrfs_mapping_tree *map_tree, u64 logical, u64 len)
1654 struct extent_map *em;
1655 struct map_lookup *map;
1656 struct extent_map_tree *em_tree = &map_tree->map_tree;
1659 spin_lock(&em_tree->lock);
1660 em = lookup_extent_mapping(em_tree, logical, len);
1661 spin_unlock(&em_tree->lock);
1664 BUG_ON(em->start > logical || em->start + em->len < logical);
1665 map = (struct map_lookup *)em->bdev;
1666 if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1))
1667 ret = map->num_stripes;
1668 else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
1669 ret = map->sub_stripes;
1672 free_extent_map(em);
1676 static int find_live_mirror(struct map_lookup *map, int first, int num,
1680 if (map->stripes[optimal].dev->bdev)
1682 for (i = first; i < first + num; i++) {
1683 if (map->stripes[i].dev->bdev)
1686 /* we couldn't find one that doesn't fail. Just return something
1687 * and the io error handling code will clean up eventually
1692 static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
1693 u64 logical, u64 *length,
1694 struct btrfs_multi_bio **multi_ret,
1695 int mirror_num, struct page *unplug_page)
1697 struct extent_map *em;
1698 struct map_lookup *map;
1699 struct extent_map_tree *em_tree = &map_tree->map_tree;
1703 int stripes_allocated = 8;
1704 int stripes_required = 1;
1709 struct btrfs_multi_bio *multi = NULL;
1711 if (multi_ret && !(rw & (1 << BIO_RW))) {
1712 stripes_allocated = 1;
1716 multi = kzalloc(btrfs_multi_bio_size(stripes_allocated),
1721 atomic_set(&multi->error, 0);
1724 spin_lock(&em_tree->lock);
1725 em = lookup_extent_mapping(em_tree, logical, *length);
1726 spin_unlock(&em_tree->lock);
1728 if (!em && unplug_page)
1732 printk("unable to find logical %Lu len %Lu\n", logical, *length);
1736 BUG_ON(em->start > logical || em->start + em->len < logical);
1737 map = (struct map_lookup *)em->bdev;
1738 offset = logical - em->start;
1740 if (mirror_num > map->num_stripes)
1743 /* if our multi bio struct is too small, back off and try again */
1744 if (rw & (1 << BIO_RW)) {
1745 if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
1746 BTRFS_BLOCK_GROUP_DUP)) {
1747 stripes_required = map->num_stripes;
1749 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
1750 stripes_required = map->sub_stripes;
1754 if (multi_ret && rw == WRITE &&
1755 stripes_allocated < stripes_required) {
1756 stripes_allocated = map->num_stripes;
1757 free_extent_map(em);
1763 * stripe_nr counts the total number of stripes we have to stride
1764 * to get to this block
1766 do_div(stripe_nr, map->stripe_len);
1768 stripe_offset = stripe_nr * map->stripe_len;
1769 BUG_ON(offset < stripe_offset);
1771 /* stripe_offset is the offset of this block in its stripe*/
1772 stripe_offset = offset - stripe_offset;
1774 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1 |
1775 BTRFS_BLOCK_GROUP_RAID10 |
1776 BTRFS_BLOCK_GROUP_DUP)) {
1777 /* we limit the length of each bio to what fits in a stripe */
1778 *length = min_t(u64, em->len - offset,
1779 map->stripe_len - stripe_offset);
1781 *length = em->len - offset;
1784 if (!multi_ret && !unplug_page)
1789 if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
1790 if (unplug_page || (rw & (1 << BIO_RW)))
1791 num_stripes = map->num_stripes;
1792 else if (mirror_num)
1793 stripe_index = mirror_num - 1;
1795 stripe_index = find_live_mirror(map, 0,
1797 current->pid % map->num_stripes);
1800 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
1801 if (rw & (1 << BIO_RW))
1802 num_stripes = map->num_stripes;
1803 else if (mirror_num)
1804 stripe_index = mirror_num - 1;
1806 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
1807 int factor = map->num_stripes / map->sub_stripes;
1809 stripe_index = do_div(stripe_nr, factor);
1810 stripe_index *= map->sub_stripes;
1812 if (unplug_page || (rw & (1 << BIO_RW)))
1813 num_stripes = map->sub_stripes;
1814 else if (mirror_num)
1815 stripe_index += mirror_num - 1;
1817 stripe_index = find_live_mirror(map, stripe_index,
1818 map->sub_stripes, stripe_index +
1819 current->pid % map->sub_stripes);
1823 * after this do_div call, stripe_nr is the number of stripes
1824 * on this device we have to walk to find the data, and
1825 * stripe_index is the number of our device in the stripe array
1827 stripe_index = do_div(stripe_nr, map->num_stripes);
1829 BUG_ON(stripe_index >= map->num_stripes);
1831 for (i = 0; i < num_stripes; i++) {
1833 struct btrfs_device *device;
1834 struct backing_dev_info *bdi;
1836 device = map->stripes[stripe_index].dev;
1838 bdi = blk_get_backing_dev_info(device->bdev);
1839 if (bdi->unplug_io_fn) {
1840 bdi->unplug_io_fn(bdi, unplug_page);
1844 multi->stripes[i].physical =
1845 map->stripes[stripe_index].physical +
1846 stripe_offset + stripe_nr * map->stripe_len;
1847 multi->stripes[i].dev = map->stripes[stripe_index].dev;
1853 multi->num_stripes = num_stripes;
1854 multi->max_errors = max_errors;
1857 free_extent_map(em);
1861 int btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
1862 u64 logical, u64 *length,
1863 struct btrfs_multi_bio **multi_ret, int mirror_num)
1865 return __btrfs_map_block(map_tree, rw, logical, length, multi_ret,
1869 int btrfs_unplug_page(struct btrfs_mapping_tree *map_tree,
1870 u64 logical, struct page *page)
1872 u64 length = PAGE_CACHE_SIZE;
1873 return __btrfs_map_block(map_tree, READ, logical, &length,
1878 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1879 static void end_bio_multi_stripe(struct bio *bio, int err)
1881 static int end_bio_multi_stripe(struct bio *bio,
1882 unsigned int bytes_done, int err)
1885 struct btrfs_multi_bio *multi = bio->bi_private;
1887 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1892 atomic_inc(&multi->error);
1894 if (atomic_dec_and_test(&multi->stripes_pending)) {
1895 bio->bi_private = multi->private;
1896 bio->bi_end_io = multi->end_io;
1897 /* only send an error to the higher layers if it is
1898 * beyond the tolerance of the multi-bio
1900 if (atomic_read(&multi->error) > multi->max_errors) {
1904 * this bio is actually up to date, we didn't
1905 * go over the max number of errors
1907 set_bit(BIO_UPTODATE, &bio->bi_flags);
1912 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1913 bio_endio(bio, bio->bi_size, err);
1915 bio_endio(bio, err);
1920 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1925 int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
1928 struct btrfs_mapping_tree *map_tree;
1929 struct btrfs_device *dev;
1930 struct bio *first_bio = bio;
1931 u64 logical = bio->bi_sector << 9;
1934 struct btrfs_multi_bio *multi = NULL;
1939 length = bio->bi_size;
1940 map_tree = &root->fs_info->mapping_tree;
1941 map_length = length;
1943 ret = btrfs_map_block(map_tree, rw, logical, &map_length, &multi,
1947 total_devs = multi->num_stripes;
1948 if (map_length < length) {
1949 printk("mapping failed logical %Lu bio len %Lu "
1950 "len %Lu\n", logical, length, map_length);
1953 multi->end_io = first_bio->bi_end_io;
1954 multi->private = first_bio->bi_private;
1955 atomic_set(&multi->stripes_pending, multi->num_stripes);
1957 while(dev_nr < total_devs) {
1958 if (total_devs > 1) {
1959 if (dev_nr < total_devs - 1) {
1960 bio = bio_clone(first_bio, GFP_NOFS);
1965 bio->bi_private = multi;
1966 bio->bi_end_io = end_bio_multi_stripe;
1968 bio->bi_sector = multi->stripes[dev_nr].physical >> 9;
1969 dev = multi->stripes[dev_nr].dev;
1970 if (dev && dev->bdev) {
1971 bio->bi_bdev = dev->bdev;
1972 spin_lock(&dev->io_lock);
1974 spin_unlock(&dev->io_lock);
1975 submit_bio(rw, bio);
1977 bio->bi_bdev = root->fs_info->fs_devices->latest_bdev;
1978 bio->bi_sector = logical >> 9;
1979 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1980 bio_endio(bio, bio->bi_size, -EIO);
1982 bio_endio(bio, -EIO);
1987 if (total_devs == 1)
1992 struct btrfs_device *btrfs_find_device(struct btrfs_root *root, u64 devid,
1995 struct list_head *head = &root->fs_info->fs_devices->devices;
1997 return __find_device(head, devid, uuid);
2000 static struct btrfs_device *add_missing_dev(struct btrfs_root *root,
2001 u64 devid, u8 *dev_uuid)
2003 struct btrfs_device *device;
2004 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
2006 device = kzalloc(sizeof(*device), GFP_NOFS);
2007 list_add(&device->dev_list,
2008 &fs_devices->devices);
2009 list_add(&device->dev_alloc_list,
2010 &fs_devices->alloc_list);
2011 device->barriers = 1;
2012 device->dev_root = root->fs_info->dev_root;
2013 device->devid = devid;
2014 fs_devices->num_devices++;
2015 spin_lock_init(&device->io_lock);
2016 memcpy(device->uuid, dev_uuid, BTRFS_UUID_SIZE);
2021 static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
2022 struct extent_buffer *leaf,
2023 struct btrfs_chunk *chunk)
2025 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
2026 struct map_lookup *map;
2027 struct extent_map *em;
2031 u8 uuid[BTRFS_UUID_SIZE];
2036 logical = key->offset;
2037 length = btrfs_chunk_length(leaf, chunk);
2039 spin_lock(&map_tree->map_tree.lock);
2040 em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
2041 spin_unlock(&map_tree->map_tree.lock);
2043 /* already mapped? */
2044 if (em && em->start <= logical && em->start + em->len > logical) {
2045 free_extent_map(em);
2048 free_extent_map(em);
2051 map = kzalloc(sizeof(*map), GFP_NOFS);
2055 em = alloc_extent_map(GFP_NOFS);
2058 num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
2059 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
2061 free_extent_map(em);
2065 em->bdev = (struct block_device *)map;
2066 em->start = logical;
2068 em->block_start = 0;
2070 map->num_stripes = num_stripes;
2071 map->io_width = btrfs_chunk_io_width(leaf, chunk);
2072 map->io_align = btrfs_chunk_io_align(leaf, chunk);
2073 map->sector_size = btrfs_chunk_sector_size(leaf, chunk);
2074 map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
2075 map->type = btrfs_chunk_type(leaf, chunk);
2076 map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
2077 for (i = 0; i < num_stripes; i++) {
2078 map->stripes[i].physical =
2079 btrfs_stripe_offset_nr(leaf, chunk, i);
2080 devid = btrfs_stripe_devid_nr(leaf, chunk, i);
2081 read_extent_buffer(leaf, uuid, (unsigned long)
2082 btrfs_stripe_dev_uuid_nr(chunk, i),
2084 map->stripes[i].dev = btrfs_find_device(root, devid, uuid);
2086 if (!map->stripes[i].dev && !btrfs_test_opt(root, DEGRADED)) {
2088 free_extent_map(em);
2091 if (!map->stripes[i].dev) {
2092 map->stripes[i].dev =
2093 add_missing_dev(root, devid, uuid);
2094 if (!map->stripes[i].dev) {
2096 free_extent_map(em);
2100 map->stripes[i].dev->in_fs_metadata = 1;
2103 spin_lock(&map_tree->map_tree.lock);
2104 ret = add_extent_mapping(&map_tree->map_tree, em);
2105 spin_unlock(&map_tree->map_tree.lock);
2107 free_extent_map(em);
2112 static int fill_device_from_item(struct extent_buffer *leaf,
2113 struct btrfs_dev_item *dev_item,
2114 struct btrfs_device *device)
2118 device->devid = btrfs_device_id(leaf, dev_item);
2119 device->total_bytes = btrfs_device_total_bytes(leaf, dev_item);
2120 device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
2121 device->type = btrfs_device_type(leaf, dev_item);
2122 device->io_align = btrfs_device_io_align(leaf, dev_item);
2123 device->io_width = btrfs_device_io_width(leaf, dev_item);
2124 device->sector_size = btrfs_device_sector_size(leaf, dev_item);
2126 ptr = (unsigned long)btrfs_device_uuid(dev_item);
2127 read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
2132 static int read_one_dev(struct btrfs_root *root,
2133 struct extent_buffer *leaf,
2134 struct btrfs_dev_item *dev_item)
2136 struct btrfs_device *device;
2139 u8 dev_uuid[BTRFS_UUID_SIZE];
2141 devid = btrfs_device_id(leaf, dev_item);
2142 read_extent_buffer(leaf, dev_uuid,
2143 (unsigned long)btrfs_device_uuid(dev_item),
2145 device = btrfs_find_device(root, devid, dev_uuid);
2147 printk("warning devid %Lu missing\n", devid);
2148 device = add_missing_dev(root, devid, dev_uuid);
2153 fill_device_from_item(leaf, dev_item, device);
2154 device->dev_root = root->fs_info->dev_root;
2155 device->in_fs_metadata = 1;
2158 ret = btrfs_open_device(device);
2166 int btrfs_read_super_device(struct btrfs_root *root, struct extent_buffer *buf)
2168 struct btrfs_dev_item *dev_item;
2170 dev_item = (struct btrfs_dev_item *)offsetof(struct btrfs_super_block,
2172 return read_one_dev(root, buf, dev_item);
2175 int btrfs_read_sys_array(struct btrfs_root *root)
2177 struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
2178 struct extent_buffer *sb;
2179 struct btrfs_disk_key *disk_key;
2180 struct btrfs_chunk *chunk;
2182 unsigned long sb_ptr;
2188 struct btrfs_key key;
2190 sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET,
2191 BTRFS_SUPER_INFO_SIZE);
2194 btrfs_set_buffer_uptodate(sb);
2195 write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
2196 array_size = btrfs_super_sys_array_size(super_copy);
2198 ptr = super_copy->sys_chunk_array;
2199 sb_ptr = offsetof(struct btrfs_super_block, sys_chunk_array);
2202 while (cur < array_size) {
2203 disk_key = (struct btrfs_disk_key *)ptr;
2204 btrfs_disk_key_to_cpu(&key, disk_key);
2206 len = sizeof(*disk_key); ptr += len;
2210 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
2211 chunk = (struct btrfs_chunk *)sb_ptr;
2212 ret = read_one_chunk(root, &key, sb, chunk);
2215 num_stripes = btrfs_chunk_num_stripes(sb, chunk);
2216 len = btrfs_chunk_item_size(num_stripes);
2225 free_extent_buffer(sb);
2229 int btrfs_read_chunk_tree(struct btrfs_root *root)
2231 struct btrfs_path *path;
2232 struct extent_buffer *leaf;
2233 struct btrfs_key key;
2234 struct btrfs_key found_key;
2238 root = root->fs_info->chunk_root;
2240 path = btrfs_alloc_path();
2244 /* first we search for all of the device items, and then we
2245 * read in all of the chunk items. This way we can create chunk
2246 * mappings that reference all of the devices that are afound
2248 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2252 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2254 leaf = path->nodes[0];
2255 slot = path->slots[0];
2256 if (slot >= btrfs_header_nritems(leaf)) {
2257 ret = btrfs_next_leaf(root, path);
2264 btrfs_item_key_to_cpu(leaf, &found_key, slot);
2265 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
2266 if (found_key.objectid != BTRFS_DEV_ITEMS_OBJECTID)
2268 if (found_key.type == BTRFS_DEV_ITEM_KEY) {
2269 struct btrfs_dev_item *dev_item;
2270 dev_item = btrfs_item_ptr(leaf, slot,
2271 struct btrfs_dev_item);
2272 ret = read_one_dev(root, leaf, dev_item);
2275 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
2276 struct btrfs_chunk *chunk;
2277 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
2278 ret = read_one_chunk(root, &found_key, leaf, chunk);
2282 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
2284 btrfs_release_path(root, path);
2288 btrfs_free_path(path);