2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
18 #include <linux/sched.h>
19 #include <linux/bio.h>
20 #include <linux/buffer_head.h>
21 #include <asm/div64.h>
23 #include "extent_map.h"
25 #include "transaction.h"
26 #include "print-tree.h"
37 struct btrfs_bio_stripe stripes[];
40 #define map_lookup_size(n) (sizeof(struct map_lookup) + \
41 (sizeof(struct btrfs_bio_stripe) * (n)))
43 static DEFINE_MUTEX(uuid_mutex);
44 static LIST_HEAD(fs_uuids);
46 int btrfs_cleanup_fs_uuids(void)
48 struct btrfs_fs_devices *fs_devices;
49 struct list_head *uuid_cur;
50 struct list_head *devices_cur;
51 struct btrfs_device *dev;
53 list_for_each(uuid_cur, &fs_uuids) {
54 fs_devices = list_entry(uuid_cur, struct btrfs_fs_devices,
56 while(!list_empty(&fs_devices->devices)) {
57 devices_cur = fs_devices->devices.next;
58 dev = list_entry(devices_cur, struct btrfs_device,
60 printk("uuid cleanup finds %s\n", dev->name);
63 close_bdev_excl(dev->bdev);
65 list_del(&dev->dev_list);
72 static struct btrfs_device *__find_device(struct list_head *head, u64 devid,
75 struct btrfs_device *dev;
76 struct list_head *cur;
78 list_for_each(cur, head) {
79 dev = list_entry(cur, struct btrfs_device, dev_list);
80 if (dev->devid == devid &&
81 !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE)) {
88 static struct btrfs_fs_devices *find_fsid(u8 *fsid)
90 struct list_head *cur;
91 struct btrfs_fs_devices *fs_devices;
93 list_for_each(cur, &fs_uuids) {
94 fs_devices = list_entry(cur, struct btrfs_fs_devices, list);
95 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
101 static int device_list_add(const char *path,
102 struct btrfs_super_block *disk_super,
103 u64 devid, struct btrfs_fs_devices **fs_devices_ret)
105 struct btrfs_device *device;
106 struct btrfs_fs_devices *fs_devices;
107 u64 found_transid = btrfs_super_generation(disk_super);
109 fs_devices = find_fsid(disk_super->fsid);
111 fs_devices = kmalloc(sizeof(*fs_devices), GFP_NOFS);
114 INIT_LIST_HEAD(&fs_devices->devices);
115 list_add(&fs_devices->list, &fs_uuids);
116 memcpy(fs_devices->fsid, disk_super->fsid, BTRFS_FSID_SIZE);
117 fs_devices->latest_devid = devid;
118 fs_devices->latest_trans = found_transid;
119 fs_devices->lowest_devid = (u64)-1;
120 fs_devices->num_devices = 0;
123 device = __find_device(&fs_devices->devices, devid,
124 disk_super->dev_item.uuid);
127 device = kzalloc(sizeof(*device), GFP_NOFS);
129 /* we can safely leave the fs_devices entry around */
132 device->devid = devid;
133 memcpy(device->uuid, disk_super->dev_item.uuid,
135 device->barriers = 1;
136 spin_lock_init(&device->io_lock);
137 device->name = kstrdup(path, GFP_NOFS);
142 list_add(&device->dev_list, &fs_devices->devices);
143 fs_devices->num_devices++;
146 if (found_transid > fs_devices->latest_trans) {
147 fs_devices->latest_devid = devid;
148 fs_devices->latest_trans = found_transid;
150 if (fs_devices->lowest_devid > devid) {
151 fs_devices->lowest_devid = devid;
152 printk("lowest devid now %Lu\n", devid);
154 *fs_devices_ret = fs_devices;
158 int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
160 struct list_head *head = &fs_devices->devices;
161 struct list_head *cur;
162 struct btrfs_device *device;
164 mutex_lock(&uuid_mutex);
165 list_for_each(cur, head) {
166 device = list_entry(cur, struct btrfs_device, dev_list);
168 close_bdev_excl(device->bdev);
169 printk("close devices closes %s\n", device->name);
173 mutex_unlock(&uuid_mutex);
177 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
178 int flags, void *holder)
180 struct block_device *bdev;
181 struct list_head *head = &fs_devices->devices;
182 struct list_head *cur;
183 struct btrfs_device *device;
186 mutex_lock(&uuid_mutex);
187 list_for_each(cur, head) {
188 device = list_entry(cur, struct btrfs_device, dev_list);
189 bdev = open_bdev_excl(device->name, flags, holder);
192 printk("open %s failed\n", device->name);
196 if (device->devid == fs_devices->latest_devid)
197 fs_devices->latest_bdev = bdev;
198 if (device->devid == fs_devices->lowest_devid) {
199 fs_devices->lowest_bdev = bdev;
203 mutex_unlock(&uuid_mutex);
206 mutex_unlock(&uuid_mutex);
207 btrfs_close_devices(fs_devices);
211 int btrfs_scan_one_device(const char *path, int flags, void *holder,
212 struct btrfs_fs_devices **fs_devices_ret)
214 struct btrfs_super_block *disk_super;
215 struct block_device *bdev;
216 struct buffer_head *bh;
221 mutex_lock(&uuid_mutex);
223 printk("scan one opens %s\n", path);
224 bdev = open_bdev_excl(path, flags, holder);
227 printk("open failed\n");
232 ret = set_blocksize(bdev, 4096);
235 bh = __bread(bdev, BTRFS_SUPER_INFO_OFFSET / 4096, 4096);
240 disk_super = (struct btrfs_super_block *)bh->b_data;
241 if (strncmp((char *)(&disk_super->magic), BTRFS_MAGIC,
242 sizeof(disk_super->magic))) {
243 printk("no btrfs found on %s\n", path);
247 devid = le64_to_cpu(disk_super->dev_item.devid);
248 transid = btrfs_super_generation(disk_super);
249 printk("found device %Lu transid %Lu on %s\n", devid, transid, path);
250 ret = device_list_add(path, disk_super, devid, fs_devices_ret);
255 close_bdev_excl(bdev);
257 mutex_unlock(&uuid_mutex);
262 * this uses a pretty simple search, the expectation is that it is
263 * called very infrequently and that a given device has a small number
266 static int find_free_dev_extent(struct btrfs_trans_handle *trans,
267 struct btrfs_device *device,
268 struct btrfs_path *path,
269 u64 num_bytes, u64 *start)
271 struct btrfs_key key;
272 struct btrfs_root *root = device->dev_root;
273 struct btrfs_dev_extent *dev_extent = NULL;
276 u64 search_start = 0;
277 u64 search_end = device->total_bytes;
281 struct extent_buffer *l;
286 /* FIXME use last free of some kind */
288 /* we don't want to overwrite the superblock on the drive,
289 * so we make sure to start at an offset of at least 1MB
291 search_start = max((u64)1024 * 1024, search_start);
292 key.objectid = device->devid;
293 key.offset = search_start;
294 key.type = BTRFS_DEV_EXTENT_KEY;
295 ret = btrfs_search_slot(trans, root, &key, path, 0, 0);
298 ret = btrfs_previous_item(root, path, 0, key.type);
302 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
305 slot = path->slots[0];
306 if (slot >= btrfs_header_nritems(l)) {
307 ret = btrfs_next_leaf(root, path);
314 if (search_start >= search_end) {
318 *start = search_start;
322 *start = last_byte > search_start ?
323 last_byte : search_start;
324 if (search_end <= *start) {
330 btrfs_item_key_to_cpu(l, &key, slot);
332 if (key.objectid < device->devid)
335 if (key.objectid > device->devid)
338 if (key.offset >= search_start && key.offset > last_byte &&
340 if (last_byte < search_start)
341 last_byte = search_start;
342 hole_size = key.offset - last_byte;
343 if (key.offset > last_byte &&
344 hole_size >= num_bytes) {
349 if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY) {
354 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
355 last_byte = key.offset + btrfs_dev_extent_length(l, dev_extent);
361 /* we have to make sure we didn't find an extent that has already
362 * been allocated by the map tree or the original allocation
364 btrfs_release_path(root, path);
365 BUG_ON(*start < search_start);
367 if (*start + num_bytes > search_end) {
371 /* check for pending inserts here */
375 btrfs_release_path(root, path);
379 int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
380 struct btrfs_device *device,
381 u64 chunk_tree, u64 chunk_objectid,
383 u64 num_bytes, u64 *start)
386 struct btrfs_path *path;
387 struct btrfs_root *root = device->dev_root;
388 struct btrfs_dev_extent *extent;
389 struct extent_buffer *leaf;
390 struct btrfs_key key;
392 path = btrfs_alloc_path();
396 ret = find_free_dev_extent(trans, device, path, num_bytes, start);
401 key.objectid = device->devid;
403 key.type = BTRFS_DEV_EXTENT_KEY;
404 ret = btrfs_insert_empty_item(trans, root, path, &key,
408 leaf = path->nodes[0];
409 extent = btrfs_item_ptr(leaf, path->slots[0],
410 struct btrfs_dev_extent);
411 btrfs_set_dev_extent_chunk_tree(leaf, extent, chunk_tree);
412 btrfs_set_dev_extent_chunk_objectid(leaf, extent, chunk_objectid);
413 btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
415 write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid,
416 (unsigned long)btrfs_dev_extent_chunk_tree_uuid(extent),
419 btrfs_set_dev_extent_length(leaf, extent, num_bytes);
420 btrfs_mark_buffer_dirty(leaf);
422 btrfs_free_path(path);
426 static int find_next_chunk(struct btrfs_root *root, u64 objectid, u64 *offset)
428 struct btrfs_path *path;
430 struct btrfs_key key;
431 struct btrfs_chunk *chunk;
432 struct btrfs_key found_key;
434 path = btrfs_alloc_path();
437 key.objectid = objectid;
438 key.offset = (u64)-1;
439 key.type = BTRFS_CHUNK_ITEM_KEY;
441 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
447 ret = btrfs_previous_item(root, path, 0, BTRFS_CHUNK_ITEM_KEY);
451 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
453 if (found_key.objectid != objectid)
456 chunk = btrfs_item_ptr(path->nodes[0], path->slots[0],
458 *offset = found_key.offset +
459 btrfs_chunk_length(path->nodes[0], chunk);
464 btrfs_free_path(path);
468 static int find_next_devid(struct btrfs_root *root, struct btrfs_path *path,
472 struct btrfs_key key;
473 struct btrfs_key found_key;
475 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
476 key.type = BTRFS_DEV_ITEM_KEY;
477 key.offset = (u64)-1;
479 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
485 ret = btrfs_previous_item(root, path, BTRFS_DEV_ITEMS_OBJECTID,
490 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
492 *objectid = found_key.offset + 1;
496 btrfs_release_path(root, path);
501 * the device information is stored in the chunk root
502 * the btrfs_device struct should be fully filled in
504 int btrfs_add_device(struct btrfs_trans_handle *trans,
505 struct btrfs_root *root,
506 struct btrfs_device *device)
509 struct btrfs_path *path;
510 struct btrfs_dev_item *dev_item;
511 struct extent_buffer *leaf;
512 struct btrfs_key key;
516 root = root->fs_info->chunk_root;
518 path = btrfs_alloc_path();
522 ret = find_next_devid(root, path, &free_devid);
526 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
527 key.type = BTRFS_DEV_ITEM_KEY;
528 key.offset = free_devid;
530 ret = btrfs_insert_empty_item(trans, root, path, &key,
535 leaf = path->nodes[0];
536 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
538 device->devid = free_devid;
539 btrfs_set_device_id(leaf, dev_item, device->devid);
540 btrfs_set_device_type(leaf, dev_item, device->type);
541 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
542 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
543 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
544 btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
545 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
546 btrfs_set_device_group(leaf, dev_item, 0);
547 btrfs_set_device_seek_speed(leaf, dev_item, 0);
548 btrfs_set_device_bandwidth(leaf, dev_item, 0);
550 ptr = (unsigned long)btrfs_device_uuid(dev_item);
551 write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
552 btrfs_mark_buffer_dirty(leaf);
556 btrfs_free_path(path);
559 int btrfs_update_device(struct btrfs_trans_handle *trans,
560 struct btrfs_device *device)
563 struct btrfs_path *path;
564 struct btrfs_root *root;
565 struct btrfs_dev_item *dev_item;
566 struct extent_buffer *leaf;
567 struct btrfs_key key;
569 root = device->dev_root->fs_info->chunk_root;
571 path = btrfs_alloc_path();
575 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
576 key.type = BTRFS_DEV_ITEM_KEY;
577 key.offset = device->devid;
579 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
588 leaf = path->nodes[0];
589 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
591 btrfs_set_device_id(leaf, dev_item, device->devid);
592 btrfs_set_device_type(leaf, dev_item, device->type);
593 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
594 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
595 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
596 btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
597 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
598 btrfs_mark_buffer_dirty(leaf);
601 btrfs_free_path(path);
605 int btrfs_add_system_chunk(struct btrfs_trans_handle *trans,
606 struct btrfs_root *root,
607 struct btrfs_key *key,
608 struct btrfs_chunk *chunk, int item_size)
610 struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
611 struct btrfs_disk_key disk_key;
615 array_size = btrfs_super_sys_array_size(super_copy);
616 if (array_size + item_size > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE)
619 ptr = super_copy->sys_chunk_array + array_size;
620 btrfs_cpu_key_to_disk(&disk_key, key);
621 memcpy(ptr, &disk_key, sizeof(disk_key));
622 ptr += sizeof(disk_key);
623 memcpy(ptr, chunk, item_size);
624 item_size += sizeof(disk_key);
625 btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
629 int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
630 struct btrfs_root *extent_root, u64 *start,
631 u64 *num_bytes, u64 type)
634 struct btrfs_fs_info *info = extent_root->fs_info;
635 struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
636 struct btrfs_stripe *stripes;
637 struct btrfs_device *device = NULL;
638 struct btrfs_chunk *chunk;
639 struct list_head private_devs;
640 struct list_head *dev_list = &extent_root->fs_info->fs_devices->devices;
641 struct list_head *cur;
642 struct extent_map_tree *em_tree;
643 struct map_lookup *map;
644 struct extent_map *em;
646 u64 calc_size = 1024 * 1024 * 1024;
647 u64 min_free = calc_size;
655 int stripe_len = 64 * 1024;
656 struct btrfs_key key;
658 if (list_empty(dev_list))
661 if (type & (BTRFS_BLOCK_GROUP_RAID0))
662 num_stripes = btrfs_super_num_devices(&info->super_copy);
663 if (type & (BTRFS_BLOCK_GROUP_DUP))
665 if (type & (BTRFS_BLOCK_GROUP_RAID1)) {
666 num_stripes = min_t(u64, 2,
667 btrfs_super_num_devices(&info->super_copy));
669 if (type & (BTRFS_BLOCK_GROUP_RAID10)) {
670 num_stripes = btrfs_super_num_devices(&info->super_copy);
673 num_stripes &= ~(u32)1;
677 INIT_LIST_HEAD(&private_devs);
678 cur = dev_list->next;
681 if (type & BTRFS_BLOCK_GROUP_DUP)
682 min_free = calc_size * 2;
684 /* build a private list of devices we will allocate from */
685 while(index < num_stripes) {
686 device = list_entry(cur, struct btrfs_device, dev_list);
688 avail = device->total_bytes - device->bytes_used;
690 if (avail > max_avail)
692 if (avail >= min_free) {
693 list_move_tail(&device->dev_list, &private_devs);
695 if (type & BTRFS_BLOCK_GROUP_DUP)
701 if (index < num_stripes) {
702 list_splice(&private_devs, dev_list);
703 if (!looped && max_avail > 0) {
705 calc_size = max_avail;
711 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
712 key.type = BTRFS_CHUNK_ITEM_KEY;
713 ret = find_next_chunk(chunk_root, BTRFS_FIRST_CHUNK_TREE_OBJECTID,
718 chunk = kmalloc(btrfs_chunk_item_size(num_stripes), GFP_NOFS);
722 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
728 stripes = &chunk->stripe;
730 if (type & (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_DUP))
731 *num_bytes = calc_size;
732 else if (type & BTRFS_BLOCK_GROUP_RAID10)
733 *num_bytes = calc_size * (num_stripes / sub_stripes);
735 *num_bytes = calc_size * num_stripes;
738 printk("new chunk type %Lu start %Lu size %Lu\n", type, key.offset, *num_bytes);
739 while(index < num_stripes) {
740 struct btrfs_stripe *stripe;
741 BUG_ON(list_empty(&private_devs));
742 cur = private_devs.next;
743 device = list_entry(cur, struct btrfs_device, dev_list);
745 /* loop over this device again if we're doing a dup group */
746 if (!(type & BTRFS_BLOCK_GROUP_DUP) ||
747 (index == num_stripes - 1))
748 list_move_tail(&device->dev_list, dev_list);
750 ret = btrfs_alloc_dev_extent(trans, device,
751 info->chunk_root->root_key.objectid,
752 BTRFS_FIRST_CHUNK_TREE_OBJECTID, key.offset,
753 calc_size, &dev_offset);
755 printk("alloc chunk start %Lu size %Lu from dev %Lu type %Lu\n", key.offset, calc_size, device->devid, type);
756 device->bytes_used += calc_size;
757 ret = btrfs_update_device(trans, device);
760 map->stripes[index].dev = device;
761 map->stripes[index].physical = dev_offset;
762 stripe = stripes + index;
763 btrfs_set_stack_stripe_devid(stripe, device->devid);
764 btrfs_set_stack_stripe_offset(stripe, dev_offset);
765 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
766 physical = dev_offset;
769 BUG_ON(!list_empty(&private_devs));
771 /* key was set above */
772 btrfs_set_stack_chunk_length(chunk, *num_bytes);
773 btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
774 btrfs_set_stack_chunk_stripe_len(chunk, stripe_len);
775 btrfs_set_stack_chunk_type(chunk, type);
776 btrfs_set_stack_chunk_num_stripes(chunk, num_stripes);
777 btrfs_set_stack_chunk_io_align(chunk, stripe_len);
778 btrfs_set_stack_chunk_io_width(chunk, stripe_len);
779 btrfs_set_stack_chunk_sector_size(chunk, extent_root->sectorsize);
780 btrfs_set_stack_chunk_sub_stripes(chunk, sub_stripes);
781 map->sector_size = extent_root->sectorsize;
782 map->stripe_len = stripe_len;
783 map->io_align = stripe_len;
784 map->io_width = stripe_len;
786 map->num_stripes = num_stripes;
787 map->sub_stripes = sub_stripes;
789 ret = btrfs_insert_item(trans, chunk_root, &key, chunk,
790 btrfs_chunk_item_size(num_stripes));
792 *start = key.offset;;
794 em = alloc_extent_map(GFP_NOFS);
797 em->bdev = (struct block_device *)map;
798 em->start = key.offset;
799 em->len = *num_bytes;
804 em_tree = &extent_root->fs_info->mapping_tree.map_tree;
805 spin_lock(&em_tree->lock);
806 ret = add_extent_mapping(em_tree, em);
807 spin_unlock(&em_tree->lock);
813 void btrfs_mapping_init(struct btrfs_mapping_tree *tree)
815 extent_map_tree_init(&tree->map_tree, GFP_NOFS);
818 void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
820 struct extent_map *em;
823 spin_lock(&tree->map_tree.lock);
824 em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1);
826 remove_extent_mapping(&tree->map_tree, em);
827 spin_unlock(&tree->map_tree.lock);
833 /* once for the tree */
838 int btrfs_num_copies(struct btrfs_mapping_tree *map_tree, u64 logical, u64 len)
840 struct extent_map *em;
841 struct map_lookup *map;
842 struct extent_map_tree *em_tree = &map_tree->map_tree;
845 spin_lock(&em_tree->lock);
846 em = lookup_extent_mapping(em_tree, logical, len);
847 spin_unlock(&em_tree->lock);
850 BUG_ON(em->start > logical || em->start + em->len < logical);
851 map = (struct map_lookup *)em->bdev;
852 if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1))
853 ret = map->num_stripes;
854 else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
855 ret = map->sub_stripes;
862 int btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
863 u64 logical, u64 *length,
864 struct btrfs_multi_bio **multi_ret, int mirror_num)
866 struct extent_map *em;
867 struct map_lookup *map;
868 struct extent_map_tree *em_tree = &map_tree->map_tree;
872 int stripes_allocated = 8;
873 int stripes_required = 1;
876 struct btrfs_multi_bio *multi = NULL;
878 if (multi_ret && !(rw & (1 << BIO_RW))) {
879 stripes_allocated = 1;
883 multi = kzalloc(btrfs_multi_bio_size(stripes_allocated),
889 spin_lock(&em_tree->lock);
890 em = lookup_extent_mapping(em_tree, logical, *length);
891 spin_unlock(&em_tree->lock);
893 printk("unable to find logical %Lu\n", logical);
897 BUG_ON(em->start > logical || em->start + em->len < logical);
898 map = (struct map_lookup *)em->bdev;
899 offset = logical - em->start;
901 if (mirror_num > map->num_stripes)
904 /* if our multi bio struct is too small, back off and try again */
905 if (rw & (1 << BIO_RW)) {
906 if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
907 BTRFS_BLOCK_GROUP_DUP)) {
908 stripes_required = map->num_stripes;
909 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
910 stripes_required = map->sub_stripes;
913 if (multi_ret && rw == WRITE &&
914 stripes_allocated < stripes_required) {
915 stripes_allocated = map->num_stripes;
922 * stripe_nr counts the total number of stripes we have to stride
923 * to get to this block
925 do_div(stripe_nr, map->stripe_len);
927 stripe_offset = stripe_nr * map->stripe_len;
928 BUG_ON(offset < stripe_offset);
930 /* stripe_offset is the offset of this block in its stripe*/
931 stripe_offset = offset - stripe_offset;
933 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1 |
934 BTRFS_BLOCK_GROUP_RAID10 |
935 BTRFS_BLOCK_GROUP_DUP)) {
936 /* we limit the length of each bio to what fits in a stripe */
937 *length = min_t(u64, em->len - offset,
938 map->stripe_len - stripe_offset);
940 *length = em->len - offset;
945 multi->num_stripes = 1;
947 if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
948 if (rw & (1 << BIO_RW))
949 multi->num_stripes = map->num_stripes;
950 else if (mirror_num) {
951 stripe_index = mirror_num - 1;
955 struct btrfs_device *cur;
957 for (i = 0; i < map->num_stripes; i++) {
958 cur = map->stripes[i].dev;
959 spin_lock(&cur->io_lock);
960 if (cur->total_ios < least) {
961 least = cur->total_ios;
964 spin_unlock(&cur->io_lock);
967 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
968 if (rw & (1 << BIO_RW))
969 multi->num_stripes = map->num_stripes;
971 stripe_index = mirror_num - 1;
972 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
973 int factor = map->num_stripes / map->sub_stripes;
974 int orig_stripe_nr = stripe_nr;
976 stripe_index = do_div(stripe_nr, factor);
977 stripe_index *= map->sub_stripes;
979 if (rw & (1 << BIO_RW))
980 multi->num_stripes = map->sub_stripes;
982 stripe_index += mirror_num - 1;
984 stripe_index += orig_stripe_nr % map->sub_stripes;
987 * after this do_div call, stripe_nr is the number of stripes
988 * on this device we have to walk to find the data, and
989 * stripe_index is the number of our device in the stripe array
991 stripe_index = do_div(stripe_nr, map->num_stripes);
993 BUG_ON(stripe_index >= map->num_stripes);
995 for (i = 0; i < multi->num_stripes; i++) {
996 multi->stripes[i].physical =
997 map->stripes[stripe_index].physical + stripe_offset +
998 stripe_nr * map->stripe_len;
999 multi->stripes[i].dev = map->stripes[stripe_index].dev;
1004 free_extent_map(em);
1008 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1009 static void end_bio_multi_stripe(struct bio *bio, int err)
1011 static int end_bio_multi_stripe(struct bio *bio,
1012 unsigned int bytes_done, int err)
1015 struct btrfs_multi_bio *multi = bio->bi_private;
1017 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1024 if (atomic_dec_and_test(&multi->stripes_pending)) {
1025 bio->bi_private = multi->private;
1026 bio->bi_end_io = multi->end_io;
1028 if (!err && multi->error)
1032 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1033 bio_endio(bio, bio->bi_size, err);
1035 bio_endio(bio, err);
1040 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1045 int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
1048 struct btrfs_mapping_tree *map_tree;
1049 struct btrfs_device *dev;
1050 struct bio *first_bio = bio;
1051 u64 logical = bio->bi_sector << 9;
1054 struct bio_vec *bvec;
1055 struct btrfs_multi_bio *multi = NULL;
1061 bio_for_each_segment(bvec, bio, i) {
1062 length += bvec->bv_len;
1065 map_tree = &root->fs_info->mapping_tree;
1066 map_length = length;
1068 ret = btrfs_map_block(map_tree, rw, logical, &map_length, &multi,
1072 total_devs = multi->num_stripes;
1073 if (map_length < length) {
1074 printk("mapping failed logical %Lu bio len %Lu "
1075 "len %Lu\n", logical, length, map_length);
1078 multi->end_io = first_bio->bi_end_io;
1079 multi->private = first_bio->bi_private;
1080 atomic_set(&multi->stripes_pending, multi->num_stripes);
1082 while(dev_nr < total_devs) {
1083 if (total_devs > 1) {
1084 if (dev_nr < total_devs - 1) {
1085 bio = bio_clone(first_bio, GFP_NOFS);
1090 bio->bi_private = multi;
1091 bio->bi_end_io = end_bio_multi_stripe;
1093 bio->bi_sector = multi->stripes[dev_nr].physical >> 9;
1094 dev = multi->stripes[dev_nr].dev;
1095 bio->bi_bdev = dev->bdev;
1096 spin_lock(&dev->io_lock);
1098 spin_unlock(&dev->io_lock);
1099 submit_bio(rw, bio);
1102 if (total_devs == 1)
1107 struct btrfs_device *btrfs_find_device(struct btrfs_root *root, u64 devid,
1110 struct list_head *head = &root->fs_info->fs_devices->devices;
1112 return __find_device(head, devid, uuid);
1115 static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
1116 struct extent_buffer *leaf,
1117 struct btrfs_chunk *chunk)
1119 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
1120 struct map_lookup *map;
1121 struct extent_map *em;
1125 u8 uuid[BTRFS_UUID_SIZE];
1130 logical = key->offset;
1131 length = btrfs_chunk_length(leaf, chunk);
1132 spin_lock(&map_tree->map_tree.lock);
1133 em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
1134 spin_unlock(&map_tree->map_tree.lock);
1136 /* already mapped? */
1137 if (em && em->start <= logical && em->start + em->len > logical) {
1138 free_extent_map(em);
1141 free_extent_map(em);
1144 map = kzalloc(sizeof(*map), GFP_NOFS);
1148 em = alloc_extent_map(GFP_NOFS);
1151 num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
1152 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
1154 free_extent_map(em);
1158 em->bdev = (struct block_device *)map;
1159 em->start = logical;
1161 em->block_start = 0;
1163 map->num_stripes = num_stripes;
1164 map->io_width = btrfs_chunk_io_width(leaf, chunk);
1165 map->io_align = btrfs_chunk_io_align(leaf, chunk);
1166 map->sector_size = btrfs_chunk_sector_size(leaf, chunk);
1167 map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
1168 map->type = btrfs_chunk_type(leaf, chunk);
1169 map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
1170 for (i = 0; i < num_stripes; i++) {
1171 map->stripes[i].physical =
1172 btrfs_stripe_offset_nr(leaf, chunk, i);
1173 devid = btrfs_stripe_devid_nr(leaf, chunk, i);
1174 read_extent_buffer(leaf, uuid, (unsigned long)
1175 btrfs_stripe_dev_uuid_nr(chunk, i),
1177 map->stripes[i].dev = btrfs_find_device(root, devid, uuid);
1178 if (!map->stripes[i].dev) {
1180 free_extent_map(em);
1185 spin_lock(&map_tree->map_tree.lock);
1186 ret = add_extent_mapping(&map_tree->map_tree, em);
1187 spin_unlock(&map_tree->map_tree.lock);
1189 free_extent_map(em);
1194 static int fill_device_from_item(struct extent_buffer *leaf,
1195 struct btrfs_dev_item *dev_item,
1196 struct btrfs_device *device)
1200 device->devid = btrfs_device_id(leaf, dev_item);
1201 device->total_bytes = btrfs_device_total_bytes(leaf, dev_item);
1202 device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
1203 device->type = btrfs_device_type(leaf, dev_item);
1204 device->io_align = btrfs_device_io_align(leaf, dev_item);
1205 device->io_width = btrfs_device_io_width(leaf, dev_item);
1206 device->sector_size = btrfs_device_sector_size(leaf, dev_item);
1208 ptr = (unsigned long)btrfs_device_uuid(dev_item);
1209 read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
1214 static int read_one_dev(struct btrfs_root *root,
1215 struct extent_buffer *leaf,
1216 struct btrfs_dev_item *dev_item)
1218 struct btrfs_device *device;
1221 u8 dev_uuid[BTRFS_UUID_SIZE];
1223 devid = btrfs_device_id(leaf, dev_item);
1224 read_extent_buffer(leaf, dev_uuid,
1225 (unsigned long)btrfs_device_uuid(dev_item),
1227 device = btrfs_find_device(root, devid, dev_uuid);
1229 printk("warning devid %Lu not found already\n", devid);
1230 device = kzalloc(sizeof(*device), GFP_NOFS);
1233 list_add(&device->dev_list,
1234 &root->fs_info->fs_devices->devices);
1235 device->barriers = 1;
1236 spin_lock_init(&device->io_lock);
1239 fill_device_from_item(leaf, dev_item, device);
1240 device->dev_root = root->fs_info->dev_root;
1243 ret = btrfs_open_device(device);
1251 int btrfs_read_super_device(struct btrfs_root *root, struct extent_buffer *buf)
1253 struct btrfs_dev_item *dev_item;
1255 dev_item = (struct btrfs_dev_item *)offsetof(struct btrfs_super_block,
1257 return read_one_dev(root, buf, dev_item);
1260 int btrfs_read_sys_array(struct btrfs_root *root)
1262 struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
1263 struct extent_buffer *sb = root->fs_info->sb_buffer;
1264 struct btrfs_disk_key *disk_key;
1265 struct btrfs_chunk *chunk;
1266 struct btrfs_key key;
1271 unsigned long sb_ptr;
1275 array_size = btrfs_super_sys_array_size(super_copy);
1278 * we do this loop twice, once for the device items and
1279 * once for all of the chunks. This way there are device
1280 * structs filled in for every chunk
1282 ptr = super_copy->sys_chunk_array;
1283 sb_ptr = offsetof(struct btrfs_super_block, sys_chunk_array);
1286 while (cur < array_size) {
1287 disk_key = (struct btrfs_disk_key *)ptr;
1288 btrfs_disk_key_to_cpu(&key, disk_key);
1290 len = sizeof(*disk_key);
1295 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
1296 chunk = (struct btrfs_chunk *)sb_ptr;
1297 ret = read_one_chunk(root, &key, sb, chunk);
1299 num_stripes = btrfs_chunk_num_stripes(sb, chunk);
1300 len = btrfs_chunk_item_size(num_stripes);
1311 int btrfs_read_chunk_tree(struct btrfs_root *root)
1313 struct btrfs_path *path;
1314 struct extent_buffer *leaf;
1315 struct btrfs_key key;
1316 struct btrfs_key found_key;
1320 root = root->fs_info->chunk_root;
1322 path = btrfs_alloc_path();
1326 /* first we search for all of the device items, and then we
1327 * read in all of the chunk items. This way we can create chunk
1328 * mappings that reference all of the devices that are afound
1330 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1334 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1336 leaf = path->nodes[0];
1337 slot = path->slots[0];
1338 if (slot >= btrfs_header_nritems(leaf)) {
1339 ret = btrfs_next_leaf(root, path);
1346 btrfs_item_key_to_cpu(leaf, &found_key, slot);
1347 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
1348 if (found_key.objectid != BTRFS_DEV_ITEMS_OBJECTID)
1350 if (found_key.type == BTRFS_DEV_ITEM_KEY) {
1351 struct btrfs_dev_item *dev_item;
1352 dev_item = btrfs_item_ptr(leaf, slot,
1353 struct btrfs_dev_item);
1354 ret = read_one_dev(root, leaf, dev_item);
1357 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
1358 struct btrfs_chunk *chunk;
1359 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
1360 ret = read_one_chunk(root, &found_key, leaf, chunk);
1364 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
1366 btrfs_release_path(root, path);
1370 btrfs_free_path(path);