2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
18 #include <linux/sched.h>
19 #include <linux/bio.h>
20 #include <linux/buffer_head.h>
22 #include "extent_map.h"
24 #include "transaction.h"
25 #include "print-tree.h"
29 struct btrfs_device *dev;
32 static DEFINE_MUTEX(uuid_mutex);
33 static LIST_HEAD(fs_uuids);
35 int btrfs_cleanup_fs_uuids(void)
37 struct btrfs_fs_devices *fs_devices;
38 struct list_head *uuid_cur;
39 struct list_head *devices_cur;
40 struct btrfs_device *dev;
42 list_for_each(uuid_cur, &fs_uuids) {
43 fs_devices = list_entry(uuid_cur, struct btrfs_fs_devices,
45 while(!list_empty(&fs_devices->devices)) {
46 devices_cur = fs_devices->devices.next;
47 dev = list_entry(devices_cur, struct btrfs_device,
49 printk("uuid cleanup finds %s\n", dev->name);
52 close_bdev_excl(dev->bdev);
54 list_del(&dev->dev_list);
61 static struct btrfs_device *__find_device(struct list_head *head, u64 devid)
63 struct btrfs_device *dev;
64 struct list_head *cur;
66 list_for_each(cur, head) {
67 dev = list_entry(cur, struct btrfs_device, dev_list);
68 if (dev->devid == devid)
74 static struct btrfs_fs_devices *find_fsid(u8 *fsid)
76 struct list_head *cur;
77 struct btrfs_fs_devices *fs_devices;
79 list_for_each(cur, &fs_uuids) {
80 fs_devices = list_entry(cur, struct btrfs_fs_devices, list);
81 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
87 static int device_list_add(const char *path,
88 struct btrfs_super_block *disk_super,
89 u64 devid, struct btrfs_fs_devices **fs_devices_ret)
91 struct btrfs_device *device;
92 struct btrfs_fs_devices *fs_devices;
93 u64 found_transid = btrfs_super_generation(disk_super);
95 fs_devices = find_fsid(disk_super->fsid);
97 fs_devices = kmalloc(sizeof(*fs_devices), GFP_NOFS);
100 INIT_LIST_HEAD(&fs_devices->devices);
101 list_add(&fs_devices->list, &fs_uuids);
102 memcpy(fs_devices->fsid, disk_super->fsid, BTRFS_FSID_SIZE);
103 fs_devices->latest_devid = devid;
104 fs_devices->latest_trans = found_transid;
105 fs_devices->lowest_devid = (u64)-1;
106 fs_devices->num_devices = 0;
109 device = __find_device(&fs_devices->devices, devid);
112 device = kzalloc(sizeof(*device), GFP_NOFS);
114 /* we can safely leave the fs_devices entry around */
117 device->devid = devid;
118 device->name = kstrdup(path, GFP_NOFS);
123 list_add(&device->dev_list, &fs_devices->devices);
124 fs_devices->num_devices++;
127 if (found_transid > fs_devices->latest_trans) {
128 fs_devices->latest_devid = devid;
129 fs_devices->latest_trans = found_transid;
131 if (fs_devices->lowest_devid > devid) {
132 fs_devices->lowest_devid = devid;
133 printk("lowest devid now %Lu\n", devid);
135 *fs_devices_ret = fs_devices;
139 int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
141 struct list_head *head = &fs_devices->devices;
142 struct list_head *cur;
143 struct btrfs_device *device;
145 mutex_lock(&uuid_mutex);
146 list_for_each(cur, head) {
147 device = list_entry(cur, struct btrfs_device, dev_list);
149 close_bdev_excl(device->bdev);
150 printk("close devices closes %s\n", device->name);
154 mutex_unlock(&uuid_mutex);
158 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
159 int flags, void *holder)
161 struct block_device *bdev;
162 struct list_head *head = &fs_devices->devices;
163 struct list_head *cur;
164 struct btrfs_device *device;
167 mutex_lock(&uuid_mutex);
168 list_for_each(cur, head) {
169 device = list_entry(cur, struct btrfs_device, dev_list);
170 bdev = open_bdev_excl(device->name, flags, holder);
171 printk("opening %s devid %Lu\n", device->name, device->devid);
173 printk("open %s failed\n", device->name);
177 if (device->devid == fs_devices->latest_devid)
178 fs_devices->latest_bdev = bdev;
179 if (device->devid == fs_devices->lowest_devid) {
180 fs_devices->lowest_bdev = bdev;
181 printk("lowest bdev %s\n", device->name);
185 mutex_unlock(&uuid_mutex);
188 mutex_unlock(&uuid_mutex);
189 btrfs_close_devices(fs_devices);
193 int btrfs_scan_one_device(const char *path, int flags, void *holder,
194 struct btrfs_fs_devices **fs_devices_ret)
196 struct btrfs_super_block *disk_super;
197 struct block_device *bdev;
198 struct buffer_head *bh;
202 mutex_lock(&uuid_mutex);
204 printk("scan one opens %s\n", path);
205 bdev = open_bdev_excl(path, flags, holder);
208 printk("open failed\n");
213 ret = set_blocksize(bdev, 4096);
216 bh = __bread(bdev, BTRFS_SUPER_INFO_OFFSET / 4096, 4096);
221 disk_super = (struct btrfs_super_block *)bh->b_data;
222 if (strncmp((char *)(&disk_super->magic), BTRFS_MAGIC,
223 sizeof(disk_super->magic))) {
224 printk("no btrfs found on %s\n", path);
228 devid = le64_to_cpu(disk_super->dev_item.devid);
229 printk("found device %Lu on %s\n", devid, path);
230 ret = device_list_add(path, disk_super, devid, fs_devices_ret);
235 close_bdev_excl(bdev);
236 printk("scan one closes bdev %s\n", path);
238 mutex_unlock(&uuid_mutex);
243 * this uses a pretty simple search, the expectation is that it is
244 * called very infrequently and that a given device has a small number
247 static int find_free_dev_extent(struct btrfs_trans_handle *trans,
248 struct btrfs_device *device,
249 struct btrfs_path *path,
250 u64 num_bytes, u64 *start)
252 struct btrfs_key key;
253 struct btrfs_root *root = device->dev_root;
254 struct btrfs_dev_extent *dev_extent = NULL;
257 u64 search_start = 0;
258 u64 search_end = device->total_bytes;
262 struct extent_buffer *l;
267 /* FIXME use last free of some kind */
269 /* we don't want to overwrite the superblock on the drive,
270 * so we make sure to start at an offset of at least 1MB
272 search_start = max((u64)1024 * 1024, search_start);
273 key.objectid = device->devid;
274 key.offset = search_start;
275 key.type = BTRFS_DEV_EXTENT_KEY;
276 ret = btrfs_search_slot(trans, root, &key, path, 0, 0);
279 ret = btrfs_previous_item(root, path, 0, key.type);
283 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
286 slot = path->slots[0];
287 if (slot >= btrfs_header_nritems(l)) {
288 ret = btrfs_next_leaf(root, path);
295 if (search_start >= search_end) {
299 *start = search_start;
303 *start = last_byte > search_start ?
304 last_byte : search_start;
305 if (search_end <= *start) {
311 btrfs_item_key_to_cpu(l, &key, slot);
313 if (key.objectid < device->devid)
316 if (key.objectid > device->devid)
319 if (key.offset >= search_start && key.offset > last_byte &&
321 if (last_byte < search_start)
322 last_byte = search_start;
323 hole_size = key.offset - last_byte;
324 if (key.offset > last_byte &&
325 hole_size >= num_bytes) {
330 if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY) {
335 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
336 last_byte = key.offset + btrfs_dev_extent_length(l, dev_extent);
342 /* we have to make sure we didn't find an extent that has already
343 * been allocated by the map tree or the original allocation
345 btrfs_release_path(root, path);
346 BUG_ON(*start < search_start);
348 if (*start + num_bytes > search_end) {
352 /* check for pending inserts here */
356 btrfs_release_path(root, path);
360 int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
361 struct btrfs_device *device,
362 u64 owner, u64 num_bytes, u64 *start)
365 struct btrfs_path *path;
366 struct btrfs_root *root = device->dev_root;
367 struct btrfs_dev_extent *extent;
368 struct extent_buffer *leaf;
369 struct btrfs_key key;
371 path = btrfs_alloc_path();
375 ret = find_free_dev_extent(trans, device, path, num_bytes, start);
380 key.objectid = device->devid;
382 key.type = BTRFS_DEV_EXTENT_KEY;
383 ret = btrfs_insert_empty_item(trans, root, path, &key,
387 leaf = path->nodes[0];
388 extent = btrfs_item_ptr(leaf, path->slots[0],
389 struct btrfs_dev_extent);
390 btrfs_set_dev_extent_owner(leaf, extent, owner);
391 btrfs_set_dev_extent_length(leaf, extent, num_bytes);
392 btrfs_mark_buffer_dirty(leaf);
394 btrfs_free_path(path);
398 static int find_next_chunk(struct btrfs_root *root, u64 *objectid)
400 struct btrfs_path *path;
402 struct btrfs_key key;
403 struct btrfs_key found_key;
405 path = btrfs_alloc_path();
408 key.objectid = (u64)-1;
409 key.offset = (u64)-1;
410 key.type = BTRFS_CHUNK_ITEM_KEY;
412 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
418 ret = btrfs_previous_item(root, path, 0, BTRFS_CHUNK_ITEM_KEY);
422 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
424 *objectid = found_key.objectid + found_key.offset;
428 btrfs_free_path(path);
432 static int find_next_devid(struct btrfs_root *root, struct btrfs_path *path,
436 struct btrfs_key key;
437 struct btrfs_key found_key;
439 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
440 key.type = BTRFS_DEV_ITEM_KEY;
441 key.offset = (u64)-1;
443 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
449 ret = btrfs_previous_item(root, path, BTRFS_DEV_ITEMS_OBJECTID,
454 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
456 *objectid = found_key.offset + 1;
460 btrfs_release_path(root, path);
465 * the device information is stored in the chunk root
466 * the btrfs_device struct should be fully filled in
468 int btrfs_add_device(struct btrfs_trans_handle *trans,
469 struct btrfs_root *root,
470 struct btrfs_device *device)
473 struct btrfs_path *path;
474 struct btrfs_dev_item *dev_item;
475 struct extent_buffer *leaf;
476 struct btrfs_key key;
480 root = root->fs_info->chunk_root;
482 path = btrfs_alloc_path();
486 ret = find_next_devid(root, path, &free_devid);
490 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
491 key.type = BTRFS_DEV_ITEM_KEY;
492 key.offset = free_devid;
494 ret = btrfs_insert_empty_item(trans, root, path, &key,
499 leaf = path->nodes[0];
500 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
502 device->devid = free_devid;
503 btrfs_set_device_id(leaf, dev_item, device->devid);
504 btrfs_set_device_type(leaf, dev_item, device->type);
505 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
506 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
507 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
508 btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
509 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
511 ptr = (unsigned long)btrfs_device_uuid(dev_item);
512 write_extent_buffer(leaf, device->uuid, ptr, BTRFS_DEV_UUID_SIZE);
513 btrfs_mark_buffer_dirty(leaf);
517 btrfs_free_path(path);
520 int btrfs_update_device(struct btrfs_trans_handle *trans,
521 struct btrfs_device *device)
524 struct btrfs_path *path;
525 struct btrfs_root *root;
526 struct btrfs_dev_item *dev_item;
527 struct extent_buffer *leaf;
528 struct btrfs_key key;
530 root = device->dev_root->fs_info->chunk_root;
532 path = btrfs_alloc_path();
536 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
537 key.type = BTRFS_DEV_ITEM_KEY;
538 key.offset = device->devid;
540 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
549 leaf = path->nodes[0];
550 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
552 btrfs_set_device_id(leaf, dev_item, device->devid);
553 btrfs_set_device_type(leaf, dev_item, device->type);
554 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
555 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
556 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
557 btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
558 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
559 btrfs_mark_buffer_dirty(leaf);
562 btrfs_free_path(path);
566 int btrfs_add_system_chunk(struct btrfs_trans_handle *trans,
567 struct btrfs_root *root,
568 struct btrfs_key *key,
569 struct btrfs_chunk *chunk, int item_size)
571 struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
572 struct btrfs_disk_key disk_key;
576 array_size = btrfs_super_sys_array_size(super_copy);
577 if (array_size + item_size > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE)
580 ptr = super_copy->sys_chunk_array + array_size;
581 btrfs_cpu_key_to_disk(&disk_key, key);
582 memcpy(ptr, &disk_key, sizeof(disk_key));
583 ptr += sizeof(disk_key);
584 memcpy(ptr, chunk, item_size);
585 item_size += sizeof(disk_key);
586 btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
590 int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
591 struct btrfs_root *extent_root, u64 *start,
592 u64 *num_bytes, u64 type)
595 struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
596 struct btrfs_stripe *stripes;
597 struct btrfs_device *device = NULL;
598 struct btrfs_chunk *chunk;
599 struct list_head private_devs;
600 struct list_head *dev_list = &extent_root->fs_info->fs_devices->devices;
601 struct list_head *cur;
602 struct extent_map_tree *em_tree;
603 struct map_lookup *map;
604 struct extent_map *em;
606 u64 calc_size = 1024 * 1024 * 1024;
613 struct btrfs_key key;
615 if (list_empty(dev_list))
618 INIT_LIST_HEAD(&private_devs);
619 cur = dev_list->next;
621 /* build a private list of devices we will allocate from */
622 while(index < num_stripes) {
623 device = list_entry(cur, struct btrfs_device, dev_list);
624 avail = device->total_bytes - device->bytes_used;
626 if (avail > max_avail)
628 if (avail >= calc_size) {
629 list_move_tail(&device->dev_list, &private_devs);
635 if (index < num_stripes) {
636 list_splice(&private_devs, dev_list);
637 if (!looped && max_avail > 0) {
639 calc_size = max_avail;
645 ret = find_next_chunk(chunk_root, &key.objectid);
649 chunk = kmalloc(btrfs_chunk_item_size(num_stripes), GFP_NOFS);
653 stripes = &chunk->stripe;
655 *num_bytes = calc_size;
657 while(index < num_stripes) {
658 BUG_ON(list_empty(&private_devs));
659 cur = private_devs.next;
660 device = list_entry(cur, struct btrfs_device, dev_list);
661 list_move_tail(&device->dev_list, dev_list);
663 ret = btrfs_alloc_dev_extent(trans, device,
665 calc_size, &dev_offset);
667 printk("alloc chunk size %Lu from dev %Lu\n", calc_size, device->devid);
668 device->bytes_used += calc_size;
669 ret = btrfs_update_device(trans, device);
672 btrfs_set_stack_stripe_devid(stripes + index, device->devid);
673 btrfs_set_stack_stripe_offset(stripes + index, dev_offset);
674 physical = dev_offset;
677 BUG_ON(!list_empty(&private_devs));
679 /* key.objectid was set above */
680 key.offset = *num_bytes;
681 key.type = BTRFS_CHUNK_ITEM_KEY;
682 btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
683 btrfs_set_stack_chunk_stripe_len(chunk, 64 * 1024);
684 btrfs_set_stack_chunk_type(chunk, type);
685 btrfs_set_stack_chunk_num_stripes(chunk, num_stripes);
686 btrfs_set_stack_chunk_io_align(chunk, extent_root->sectorsize);
687 btrfs_set_stack_chunk_io_width(chunk, extent_root->sectorsize);
688 btrfs_set_stack_chunk_sector_size(chunk, extent_root->sectorsize);
690 ret = btrfs_insert_item(trans, chunk_root, &key, chunk,
691 btrfs_chunk_item_size(num_stripes));
693 *start = key.objectid;
695 em = alloc_extent_map(GFP_NOFS);
698 map = kmalloc(sizeof(*map), GFP_NOFS);
704 em->bdev = (struct block_device *)map;
705 em->start = key.objectid;
706 em->len = key.offset;
709 map->physical = physical;
719 em_tree = &extent_root->fs_info->mapping_tree.map_tree;
720 spin_lock(&em_tree->lock);
721 ret = add_extent_mapping(em_tree, em);
723 spin_unlock(&em_tree->lock);
728 void btrfs_mapping_init(struct btrfs_mapping_tree *tree)
730 extent_map_tree_init(&tree->map_tree, GFP_NOFS);
733 void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
735 struct extent_map *em;
738 spin_lock(&tree->map_tree.lock);
739 em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1);
741 remove_extent_mapping(&tree->map_tree, em);
742 spin_unlock(&tree->map_tree.lock);
748 /* once for the tree */
753 int btrfs_map_block(struct btrfs_mapping_tree *map_tree,
754 u64 logical, u64 *phys, u64 *length,
755 struct btrfs_device **dev)
757 struct extent_map *em;
758 struct map_lookup *map;
759 struct extent_map_tree *em_tree = &map_tree->map_tree;
763 spin_lock(&em_tree->lock);
764 em = lookup_extent_mapping(em_tree, logical, *length);
767 BUG_ON(em->start > logical || em->start + em->len < logical);
768 map = (struct map_lookup *)em->bdev;
769 offset = logical - em->start;
770 *phys = map->physical + offset;
771 *length = em->len - offset;
774 spin_unlock(&em_tree->lock);
778 int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio)
780 struct btrfs_mapping_tree *map_tree;
781 struct btrfs_device *dev;
782 u64 logical = bio->bi_sector << 9;
786 struct bio_vec *bvec;
790 bio_for_each_segment(bvec, bio, i) {
791 length += bvec->bv_len;
793 map_tree = &root->fs_info->mapping_tree;
795 ret = btrfs_map_block(map_tree, logical, &physical, &map_length, &dev);
796 if (map_length < length) {
797 printk("mapping failed logical %Lu bio len %Lu physical %Lu "
798 "len %Lu\n", logical, length, physical, map_length);
801 BUG_ON(map_length < length);
802 bio->bi_sector = physical >> 9;
803 bio->bi_bdev = dev->bdev;
808 struct btrfs_device *btrfs_find_device(struct btrfs_root *root, u64 devid)
810 struct list_head *head = &root->fs_info->fs_devices->devices;
812 return __find_device(head, devid);
815 static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
816 struct extent_buffer *leaf,
817 struct btrfs_chunk *chunk)
819 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
820 struct map_lookup *map;
821 struct extent_map *em;
827 logical = key->objectid;
828 length = key->offset;
829 spin_lock(&map_tree->map_tree.lock);
830 em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
832 /* already mapped? */
833 if (em && em->start <= logical && em->start + em->len > logical) {
835 spin_unlock(&map_tree->map_tree.lock);
840 spin_unlock(&map_tree->map_tree.lock);
842 map = kzalloc(sizeof(*map), GFP_NOFS);
846 em = alloc_extent_map(GFP_NOFS);
849 map = kmalloc(sizeof(*map), GFP_NOFS);
855 em->bdev = (struct block_device *)map;
860 map->physical = btrfs_stripe_offset_nr(leaf, chunk, 0);
861 devid = btrfs_stripe_devid_nr(leaf, chunk, 0);
862 map->dev = btrfs_find_device(root, devid);
869 spin_lock(&map_tree->map_tree.lock);
870 ret = add_extent_mapping(&map_tree->map_tree, em);
872 spin_unlock(&map_tree->map_tree.lock);
878 static int fill_device_from_item(struct extent_buffer *leaf,
879 struct btrfs_dev_item *dev_item,
880 struct btrfs_device *device)
884 device->devid = btrfs_device_id(leaf, dev_item);
885 device->total_bytes = btrfs_device_total_bytes(leaf, dev_item);
886 device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
887 device->type = btrfs_device_type(leaf, dev_item);
888 device->io_align = btrfs_device_io_align(leaf, dev_item);
889 device->io_width = btrfs_device_io_width(leaf, dev_item);
890 device->sector_size = btrfs_device_sector_size(leaf, dev_item);
892 ptr = (unsigned long)btrfs_device_uuid(dev_item);
893 read_extent_buffer(leaf, device->uuid, ptr, BTRFS_DEV_UUID_SIZE);
898 static int read_one_dev(struct btrfs_root *root,
899 struct extent_buffer *leaf,
900 struct btrfs_dev_item *dev_item)
902 struct btrfs_device *device;
906 devid = btrfs_device_id(leaf, dev_item);
907 device = btrfs_find_device(root, devid);
909 printk("warning devid %Lu not found already\n", devid);
910 device = kmalloc(sizeof(*device), GFP_NOFS);
913 list_add(&device->dev_list,
914 &root->fs_info->fs_devices->devices);
917 fill_device_from_item(leaf, dev_item, device);
918 device->dev_root = root->fs_info->dev_root;
921 ret = btrfs_open_device(device);
929 int btrfs_read_super_device(struct btrfs_root *root, struct extent_buffer *buf)
931 struct btrfs_dev_item *dev_item;
933 dev_item = (struct btrfs_dev_item *)offsetof(struct btrfs_super_block,
935 return read_one_dev(root, buf, dev_item);
938 int btrfs_read_sys_array(struct btrfs_root *root)
940 struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
941 struct extent_buffer *sb = root->fs_info->sb_buffer;
942 struct btrfs_disk_key *disk_key;
943 struct btrfs_chunk *chunk;
944 struct btrfs_key key;
949 unsigned long sb_ptr;
953 array_size = btrfs_super_sys_array_size(super_copy);
956 * we do this loop twice, once for the device items and
957 * once for all of the chunks. This way there are device
958 * structs filled in for every chunk
960 ptr = super_copy->sys_chunk_array;
961 sb_ptr = offsetof(struct btrfs_super_block, sys_chunk_array);
964 while (cur < array_size) {
965 disk_key = (struct btrfs_disk_key *)ptr;
966 btrfs_disk_key_to_cpu(&key, disk_key);
968 len = sizeof(*disk_key);
973 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
974 chunk = (struct btrfs_chunk *)sb_ptr;
975 ret = read_one_chunk(root, &key, sb, chunk);
977 num_stripes = btrfs_chunk_num_stripes(sb, chunk);
978 len = btrfs_chunk_item_size(num_stripes);
989 int btrfs_read_chunk_tree(struct btrfs_root *root)
991 struct btrfs_path *path;
992 struct extent_buffer *leaf;
993 struct btrfs_key key;
994 struct btrfs_key found_key;
998 root = root->fs_info->chunk_root;
1000 path = btrfs_alloc_path();
1004 /* first we search for all of the device items, and then we
1005 * read in all of the chunk items. This way we can create chunk
1006 * mappings that reference all of the devices that are afound
1008 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1012 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1014 leaf = path->nodes[0];
1015 slot = path->slots[0];
1016 if (slot >= btrfs_header_nritems(leaf)) {
1017 ret = btrfs_next_leaf(root, path);
1024 btrfs_item_key_to_cpu(leaf, &found_key, slot);
1025 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
1026 if (found_key.objectid != BTRFS_DEV_ITEMS_OBJECTID)
1028 if (found_key.type == BTRFS_DEV_ITEM_KEY) {
1029 struct btrfs_dev_item *dev_item;
1030 dev_item = btrfs_item_ptr(leaf, slot,
1031 struct btrfs_dev_item);
1032 ret = read_one_dev(root, leaf, dev_item);
1035 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
1036 struct btrfs_chunk *chunk;
1037 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
1038 ret = read_one_chunk(root, &found_key, leaf, chunk);
1042 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
1044 btrfs_release_path(root, path);
1048 btrfs_free_path(path);