Btrfs: Make the FS tree the last objectid in the tree of tree roots
[linux-block.git] / fs / btrfs / volumes.c
CommitLineData
0b86a832
CM
1/*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18#include <linux/sched.h>
19#include <linux/bio.h>
20#include "ctree.h"
21#include "extent_map.h"
22#include "disk-io.h"
23#include "transaction.h"
24#include "print-tree.h"
25#include "volumes.h"
26
27struct map_lookup {
28 struct btrfs_device *dev;
29 u64 physical;
30};
31
32/*
33 * this uses a pretty simple search, the expectation is that it is
34 * called very infrequently and that a given device has a small number
35 * of extents
36 */
37static int find_free_dev_extent(struct btrfs_trans_handle *trans,
38 struct btrfs_device *device,
39 struct btrfs_path *path,
40 u64 num_bytes, u64 *start)
41{
42 struct btrfs_key key;
43 struct btrfs_root *root = device->dev_root;
44 struct btrfs_dev_extent *dev_extent = NULL;
45 u64 hole_size = 0;
46 u64 last_byte = 0;
47 u64 search_start = 0;
48 u64 search_end = device->total_bytes;
49 int ret;
50 int slot = 0;
51 int start_found;
52 struct extent_buffer *l;
53
54 start_found = 0;
55 path->reada = 2;
56
57 /* FIXME use last free of some kind */
58
59 key.objectid = device->devid;
60 key.offset = search_start;
61 key.type = BTRFS_DEV_EXTENT_KEY;
62 ret = btrfs_search_slot(trans, root, &key, path, 0, 0);
63 if (ret < 0)
64 goto error;
65 ret = btrfs_previous_item(root, path, 0, key.type);
66 if (ret < 0)
67 goto error;
68 l = path->nodes[0];
69 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
70 while (1) {
71 l = path->nodes[0];
72 slot = path->slots[0];
73 if (slot >= btrfs_header_nritems(l)) {
74 ret = btrfs_next_leaf(root, path);
75 if (ret == 0)
76 continue;
77 if (ret < 0)
78 goto error;
79no_more_items:
80 if (!start_found) {
81 if (search_start >= search_end) {
82 ret = -ENOSPC;
83 goto error;
84 }
85 *start = search_start;
86 start_found = 1;
87 goto check_pending;
88 }
89 *start = last_byte > search_start ?
90 last_byte : search_start;
91 if (search_end <= *start) {
92 ret = -ENOSPC;
93 goto error;
94 }
95 goto check_pending;
96 }
97 btrfs_item_key_to_cpu(l, &key, slot);
98
99 if (key.objectid < device->devid)
100 goto next;
101
102 if (key.objectid > device->devid)
103 goto no_more_items;
104
105 if (key.offset >= search_start && key.offset > last_byte &&
106 start_found) {
107 if (last_byte < search_start)
108 last_byte = search_start;
109 hole_size = key.offset - last_byte;
110 if (key.offset > last_byte &&
111 hole_size >= num_bytes) {
112 *start = last_byte;
113 goto check_pending;
114 }
115 }
116 if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY) {
117 goto next;
118 }
119
120 start_found = 1;
121 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
122 last_byte = key.offset + btrfs_dev_extent_length(l, dev_extent);
123next:
124 path->slots[0]++;
125 cond_resched();
126 }
127check_pending:
128 /* we have to make sure we didn't find an extent that has already
129 * been allocated by the map tree or the original allocation
130 */
131 btrfs_release_path(root, path);
132 BUG_ON(*start < search_start);
133
6324fbf3 134 if (*start + num_bytes > search_end) {
0b86a832
CM
135 ret = -ENOSPC;
136 goto error;
137 }
138 /* check for pending inserts here */
139 return 0;
140
141error:
142 btrfs_release_path(root, path);
143 return ret;
144}
145
146int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
147 struct btrfs_device *device,
148 u64 owner, u64 num_bytes, u64 *start)
149{
150 int ret;
151 struct btrfs_path *path;
152 struct btrfs_root *root = device->dev_root;
153 struct btrfs_dev_extent *extent;
154 struct extent_buffer *leaf;
155 struct btrfs_key key;
156
157 path = btrfs_alloc_path();
158 if (!path)
159 return -ENOMEM;
160
161 ret = find_free_dev_extent(trans, device, path, num_bytes, start);
6324fbf3 162 if (ret) {
0b86a832 163 goto err;
6324fbf3 164 }
0b86a832
CM
165
166 key.objectid = device->devid;
167 key.offset = *start;
168 key.type = BTRFS_DEV_EXTENT_KEY;
169 ret = btrfs_insert_empty_item(trans, root, path, &key,
170 sizeof(*extent));
171 BUG_ON(ret);
172
173 leaf = path->nodes[0];
174 extent = btrfs_item_ptr(leaf, path->slots[0],
175 struct btrfs_dev_extent);
176 btrfs_set_dev_extent_owner(leaf, extent, owner);
177 btrfs_set_dev_extent_length(leaf, extent, num_bytes);
178 btrfs_mark_buffer_dirty(leaf);
179err:
180 btrfs_free_path(path);
181 return ret;
182}
183
184static int find_next_chunk(struct btrfs_root *root, u64 *objectid)
185{
186 struct btrfs_path *path;
187 int ret;
188 struct btrfs_key key;
189 struct btrfs_key found_key;
190
191 path = btrfs_alloc_path();
192 BUG_ON(!path);
193
194 key.objectid = (u64)-1;
195 key.offset = (u64)-1;
196 key.type = BTRFS_CHUNK_ITEM_KEY;
197
198 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
199 if (ret < 0)
200 goto error;
201
202 BUG_ON(ret == 0);
203
204 ret = btrfs_previous_item(root, path, 0, BTRFS_CHUNK_ITEM_KEY);
205 if (ret) {
206 *objectid = 0;
207 } else {
208 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
209 path->slots[0]);
210 *objectid = found_key.objectid + found_key.offset;
211 }
212 ret = 0;
213error:
214 btrfs_free_path(path);
215 return ret;
216}
217
0b86a832
CM
218static int find_next_devid(struct btrfs_root *root, struct btrfs_path *path,
219 u64 *objectid)
220{
221 int ret;
222 struct btrfs_key key;
223 struct btrfs_key found_key;
224
225 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
226 key.type = BTRFS_DEV_ITEM_KEY;
227 key.offset = (u64)-1;
228
229 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
230 if (ret < 0)
231 goto error;
232
233 BUG_ON(ret == 0);
234
235 ret = btrfs_previous_item(root, path, BTRFS_DEV_ITEMS_OBJECTID,
236 BTRFS_DEV_ITEM_KEY);
237 if (ret) {
238 *objectid = 1;
239 } else {
240 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
241 path->slots[0]);
242 *objectid = found_key.offset + 1;
243 }
244 ret = 0;
245error:
246 btrfs_release_path(root, path);
247 return ret;
248}
249
250/*
251 * the device information is stored in the chunk root
252 * the btrfs_device struct should be fully filled in
253 */
254int btrfs_add_device(struct btrfs_trans_handle *trans,
255 struct btrfs_root *root,
256 struct btrfs_device *device)
257{
258 int ret;
259 struct btrfs_path *path;
260 struct btrfs_dev_item *dev_item;
261 struct extent_buffer *leaf;
262 struct btrfs_key key;
263 unsigned long ptr;
264 u64 free_devid;
265
266 root = root->fs_info->chunk_root;
267
268 path = btrfs_alloc_path();
269 if (!path)
270 return -ENOMEM;
271
272 ret = find_next_devid(root, path, &free_devid);
273 if (ret)
274 goto out;
275
276 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
277 key.type = BTRFS_DEV_ITEM_KEY;
278 key.offset = free_devid;
279
280 ret = btrfs_insert_empty_item(trans, root, path, &key,
281 sizeof(*dev_item) + device->name_len);
282 if (ret)
283 goto out;
284
285 leaf = path->nodes[0];
286 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
287
288 btrfs_set_device_id(leaf, dev_item, device->devid);
289 btrfs_set_device_type(leaf, dev_item, device->type);
290 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
291 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
292 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
293 btrfs_set_device_rdev(leaf, dev_item, device->rdev);
294 btrfs_set_device_partition(leaf, dev_item, device->partition);
295 btrfs_set_device_name_len(leaf, dev_item, device->name_len);
296 btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
297 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
298
299 ptr = (unsigned long)btrfs_device_name(dev_item);
300 write_extent_buffer(leaf, device->name, ptr, device->name_len);
301
302 ptr = (unsigned long)btrfs_device_uuid(dev_item);
303 write_extent_buffer(leaf, device->uuid, ptr, BTRFS_DEV_UUID_SIZE);
304 btrfs_mark_buffer_dirty(leaf);
305 ret = 0;
306
307out:
308 btrfs_free_path(path);
309 return ret;
310}
311int btrfs_update_device(struct btrfs_trans_handle *trans,
312 struct btrfs_device *device)
313{
314 int ret;
315 struct btrfs_path *path;
316 struct btrfs_root *root;
317 struct btrfs_dev_item *dev_item;
318 struct extent_buffer *leaf;
319 struct btrfs_key key;
320
321 root = device->dev_root->fs_info->chunk_root;
322
323 path = btrfs_alloc_path();
324 if (!path)
325 return -ENOMEM;
326
327 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
328 key.type = BTRFS_DEV_ITEM_KEY;
329 key.offset = device->devid;
330
331 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
332 if (ret < 0)
333 goto out;
334
335 if (ret > 0) {
336 ret = -ENOENT;
337 goto out;
338 }
339
340 leaf = path->nodes[0];
341 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
342
343 btrfs_set_device_id(leaf, dev_item, device->devid);
344 btrfs_set_device_type(leaf, dev_item, device->type);
345 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
346 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
347 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
348 btrfs_set_device_rdev(leaf, dev_item, device->rdev);
349 btrfs_set_device_partition(leaf, dev_item, device->partition);
350 btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
351 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
352 btrfs_mark_buffer_dirty(leaf);
353
354out:
355 btrfs_free_path(path);
356 return ret;
357}
358
359int btrfs_add_system_chunk(struct btrfs_trans_handle *trans,
360 struct btrfs_root *root,
361 struct btrfs_key *key,
362 struct btrfs_chunk *chunk, int item_size)
363{
364 struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
365 struct btrfs_disk_key disk_key;
366 u32 array_size;
367 u8 *ptr;
368
369 array_size = btrfs_super_sys_array_size(super_copy);
370 if (array_size + item_size > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE)
371 return -EFBIG;
372
373 ptr = super_copy->sys_chunk_array + array_size;
374 btrfs_cpu_key_to_disk(&disk_key, key);
375 memcpy(ptr, &disk_key, sizeof(disk_key));
376 ptr += sizeof(disk_key);
377 memcpy(ptr, chunk, item_size);
378 item_size += sizeof(disk_key);
379 btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
380 return 0;
381}
382
383int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
384 struct btrfs_root *extent_root, u64 *start,
6324fbf3 385 u64 *num_bytes, u64 type)
0b86a832
CM
386{
387 u64 dev_offset;
388 struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
389 struct btrfs_stripe *stripes;
390 struct btrfs_device *device = NULL;
391 struct btrfs_chunk *chunk;
6324fbf3 392 struct list_head private_devs;
0b86a832 393 struct list_head *dev_list = &extent_root->fs_info->devices;
6324fbf3 394 struct list_head *cur;
0b86a832
CM
395 struct extent_map_tree *em_tree;
396 struct map_lookup *map;
397 struct extent_map *em;
398 u64 physical;
399 u64 calc_size = 1024 * 1024 * 1024;
6324fbf3
CM
400 u64 avail;
401 u64 max_avail = 0;
402 int num_stripes = 1;
403 int looped = 0;
0b86a832 404 int ret;
6324fbf3 405 int index;
0b86a832
CM
406 struct btrfs_key key;
407
6324fbf3
CM
408 if (list_empty(dev_list))
409 return -ENOSPC;
410again:
411 INIT_LIST_HEAD(&private_devs);
412 cur = dev_list->next;
413 index = 0;
414 /* build a private list of devices we will allocate from */
415 while(index < num_stripes) {
416 device = list_entry(cur, struct btrfs_device, dev_list);
417 avail = device->total_bytes - device->bytes_used;
418 cur = cur->next;
419 if (avail > max_avail)
420 max_avail = avail;
421 if (avail >= calc_size) {
422 list_move_tail(&device->dev_list, &private_devs);
423 index++;
424 }
425 if (cur == dev_list)
426 break;
427 }
428 if (index < num_stripes) {
429 list_splice(&private_devs, dev_list);
430 if (!looped && max_avail > 0) {
431 looped = 1;
432 calc_size = max_avail;
433 goto again;
434 }
435 return -ENOSPC;
436 }
0b86a832
CM
437
438 ret = find_next_chunk(chunk_root, &key.objectid);
439 if (ret)
440 return ret;
441
0b86a832
CM
442 chunk = kmalloc(btrfs_chunk_item_size(num_stripes), GFP_NOFS);
443 if (!chunk)
444 return -ENOMEM;
445
446 stripes = &chunk->stripe;
447
448 *num_bytes = calc_size;
6324fbf3 449 index = 0;
0b86a832 450 while(index < num_stripes) {
6324fbf3
CM
451 BUG_ON(list_empty(&private_devs));
452 cur = private_devs.next;
453 device = list_entry(cur, struct btrfs_device, dev_list);
454 list_move_tail(&device->dev_list, dev_list);
0b86a832
CM
455
456 ret = btrfs_alloc_dev_extent(trans, device,
457 key.objectid,
458 calc_size, &dev_offset);
459 BUG_ON(ret);
460
461 device->bytes_used += calc_size;
462 ret = btrfs_update_device(trans, device);
463 BUG_ON(ret);
464
465 btrfs_set_stack_stripe_devid(stripes + index, device->devid);
466 btrfs_set_stack_stripe_offset(stripes + index, dev_offset);
467 physical = dev_offset;
468 index++;
469 }
6324fbf3 470 BUG_ON(!list_empty(&private_devs));
0b86a832
CM
471
472 /* key.objectid was set above */
473 key.offset = *num_bytes;
474 key.type = BTRFS_CHUNK_ITEM_KEY;
475 btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
476 btrfs_set_stack_chunk_stripe_len(chunk, 64 * 1024);
477 btrfs_set_stack_chunk_type(chunk, type);
478 btrfs_set_stack_chunk_num_stripes(chunk, num_stripes);
479 btrfs_set_stack_chunk_io_align(chunk, extent_root->sectorsize);
480 btrfs_set_stack_chunk_io_width(chunk, extent_root->sectorsize);
481 btrfs_set_stack_chunk_sector_size(chunk, extent_root->sectorsize);
482
483 ret = btrfs_insert_item(trans, chunk_root, &key, chunk,
484 btrfs_chunk_item_size(num_stripes));
485 BUG_ON(ret);
486 *start = key.objectid;
487
488 em = alloc_extent_map(GFP_NOFS);
489 if (!em)
490 return -ENOMEM;
491 map = kmalloc(sizeof(*map), GFP_NOFS);
492 if (!map) {
493 free_extent_map(em);
494 return -ENOMEM;
495 }
496
497 em->bdev = (struct block_device *)map;
498 em->start = key.objectid;
499 em->len = key.offset;
500 em->block_start = 0;
501
502 map->physical = physical;
503 map->dev = device;
504
505 if (!map->dev) {
506 kfree(map);
507 free_extent_map(em);
508 return -EIO;
509 }
510 kfree(chunk);
511
512 em_tree = &extent_root->fs_info->mapping_tree.map_tree;
513 spin_lock(&em_tree->lock);
514 ret = add_extent_mapping(em_tree, em);
515 BUG_ON(ret);
516 spin_unlock(&em_tree->lock);
517 free_extent_map(em);
518 return ret;
519}
520
521void btrfs_mapping_init(struct btrfs_mapping_tree *tree)
522{
523 extent_map_tree_init(&tree->map_tree, GFP_NOFS);
524}
525
526void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
527{
528 struct extent_map *em;
529
530 while(1) {
531 spin_lock(&tree->map_tree.lock);
532 em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1);
533 if (em)
534 remove_extent_mapping(&tree->map_tree, em);
535 spin_unlock(&tree->map_tree.lock);
536 if (!em)
537 break;
538 kfree(em->bdev);
539 /* once for us */
540 free_extent_map(em);
541 /* once for the tree */
542 free_extent_map(em);
543 }
544}
545
546int btrfs_map_block(struct btrfs_mapping_tree *map_tree,
547 u64 logical, u64 *phys, u64 *length,
548 struct btrfs_device **dev)
549{
550 struct extent_map *em;
551 struct map_lookup *map;
552 struct extent_map_tree *em_tree = &map_tree->map_tree;
553 u64 offset;
554
555
556 spin_lock(&em_tree->lock);
557 em = lookup_extent_mapping(em_tree, logical, *length);
558 BUG_ON(!em);
559
560 BUG_ON(em->start > logical || em->start + em->len < logical);
561 map = (struct map_lookup *)em->bdev;
562 offset = logical - em->start;
563 *phys = map->physical + offset;
564 *length = em->len - offset;
565 *dev = map->dev;
566 free_extent_map(em);
567 spin_unlock(&em_tree->lock);
568 return 0;
569}
570
571int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio)
572{
573 struct btrfs_mapping_tree *map_tree;
574 struct btrfs_device *dev;
575 u64 logical = bio->bi_sector << 9;
576 u64 physical;
577 u64 length = 0;
578 u64 map_length;
579 struct bio_vec *bvec;
580 int i;
581 int ret;
582
583 bio_for_each_segment(bvec, bio, i) {
584 length += bvec->bv_len;
585 }
586 map_tree = &root->fs_info->mapping_tree;
587 map_length = length;
588 ret = btrfs_map_block(map_tree, logical, &physical, &map_length, &dev);
589 BUG_ON(map_length < length);
590 bio->bi_sector = physical >> 9;
591 bio->bi_bdev = dev->bdev;
592 submit_bio(rw, bio);
593 return 0;
594}
595
596struct btrfs_device *btrfs_find_device(struct btrfs_root *root, u64 devid)
597{
598 struct btrfs_device *dev;
599 struct list_head *cur = root->fs_info->devices.next;
600 struct list_head *head = &root->fs_info->devices;
601
602 while(cur != head) {
603 dev = list_entry(cur, struct btrfs_device, dev_list);
604 if (dev->devid == devid)
605 return dev;
606 cur = cur->next;
607 }
608 return NULL;
609}
610
611static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
612 struct extent_buffer *leaf,
613 struct btrfs_chunk *chunk)
614{
615 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
616 struct map_lookup *map;
617 struct extent_map *em;
618 u64 logical;
619 u64 length;
620 u64 devid;
621 int ret;
622
623 logical = key->objectid;
624 length = key->offset;
625 spin_lock(&map_tree->map_tree.lock);
626 em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
627
628 /* already mapped? */
629 if (em && em->start <= logical && em->start + em->len > logical) {
630 free_extent_map(em);
631 spin_unlock(&map_tree->map_tree.lock);
632 return 0;
633 } else if (em) {
634 free_extent_map(em);
635 }
636 spin_unlock(&map_tree->map_tree.lock);
637
638 map = kzalloc(sizeof(*map), GFP_NOFS);
639 if (!map)
640 return -ENOMEM;
641
642 em = alloc_extent_map(GFP_NOFS);
643 if (!em)
644 return -ENOMEM;
645 map = kmalloc(sizeof(*map), GFP_NOFS);
646 if (!map) {
647 free_extent_map(em);
648 return -ENOMEM;
649 }
650
651 em->bdev = (struct block_device *)map;
652 em->start = logical;
653 em->len = length;
654 em->block_start = 0;
655
656 map->physical = btrfs_stripe_offset_nr(leaf, chunk, 0);
657 devid = btrfs_stripe_devid_nr(leaf, chunk, 0);
658 map->dev = btrfs_find_device(root, devid);
659 if (!map->dev) {
660 kfree(map);
661 free_extent_map(em);
662 return -EIO;
663 }
664
665 spin_lock(&map_tree->map_tree.lock);
666 ret = add_extent_mapping(&map_tree->map_tree, em);
667 BUG_ON(ret);
668 spin_unlock(&map_tree->map_tree.lock);
669 free_extent_map(em);
670
671 return 0;
672}
673
674static int fill_device_from_item(struct extent_buffer *leaf,
675 struct btrfs_dev_item *dev_item,
676 struct btrfs_device *device)
677{
678 unsigned long ptr;
679 char *name;
680
681 device->devid = btrfs_device_id(leaf, dev_item);
682 device->total_bytes = btrfs_device_total_bytes(leaf, dev_item);
683 device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
684 device->type = btrfs_device_type(leaf, dev_item);
685 device->io_align = btrfs_device_io_align(leaf, dev_item);
686 device->io_width = btrfs_device_io_width(leaf, dev_item);
687 device->sector_size = btrfs_device_sector_size(leaf, dev_item);
688 device->rdev = btrfs_device_rdev(leaf, dev_item);
689 device->partition = btrfs_device_partition(leaf, dev_item);
690 device->name_len = btrfs_device_name_len(leaf, dev_item);
691
692 ptr = (unsigned long)btrfs_device_uuid(dev_item);
693 read_extent_buffer(leaf, device->uuid, ptr, BTRFS_DEV_UUID_SIZE);
694
695 name = kmalloc(device->name_len + 1, GFP_NOFS);
696 if (!name)
697 return -ENOMEM;
698 device->name = name;
699 ptr = (unsigned long)btrfs_device_name(dev_item);
700 read_extent_buffer(leaf, name, ptr, device->name_len);
701 name[device->name_len] = '\0';
702 return 0;
703}
704
705static int read_one_dev(struct btrfs_root *root, struct btrfs_key *key,
706 struct extent_buffer *leaf,
707 struct btrfs_dev_item *dev_item)
708{
709 struct btrfs_device *device;
710 u64 devid;
711 int ret;
712
713 devid = btrfs_device_id(leaf, dev_item);
6324fbf3
CM
714 device = btrfs_find_device(root, devid);
715 if (!device) {
716 device = kmalloc(sizeof(*device), GFP_NOFS);
717 if (!device)
718 return -ENOMEM;
719 list_add(&device->dev_list, &root->fs_info->devices);
720 }
0b86a832
CM
721
722 fill_device_from_item(leaf, dev_item, device);
723 device->dev_root = root->fs_info->dev_root;
724 device->bdev = root->fs_info->sb->s_bdev;
0b86a832
CM
725 memcpy(&device->dev_key, key, sizeof(*key));
726 ret = 0;
727#if 0
728 ret = btrfs_open_device(device);
729 if (ret) {
730 kfree(device);
731 }
732#endif
733 return ret;
734}
735
736int btrfs_read_sys_array(struct btrfs_root *root)
737{
738 struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
739 struct extent_buffer *sb = root->fs_info->sb_buffer;
740 struct btrfs_disk_key *disk_key;
741 struct btrfs_dev_item *dev_item;
742 struct btrfs_chunk *chunk;
743 struct btrfs_key key;
744 u32 num_stripes;
745 u32 array_size;
746 u32 len = 0;
747 u8 *ptr;
748 unsigned long sb_ptr;
749 u32 cur;
750 int ret;
751 int dev_only = 1;
752
753 array_size = btrfs_super_sys_array_size(super_copy);
754
755 /*
756 * we do this loop twice, once for the device items and
757 * once for all of the chunks. This way there are device
758 * structs filled in for every chunk
759 */
760again:
761 ptr = super_copy->sys_chunk_array;
762 sb_ptr = offsetof(struct btrfs_super_block, sys_chunk_array);
763 cur = 0;
764
765 while (cur < array_size) {
766 disk_key = (struct btrfs_disk_key *)ptr;
767 btrfs_disk_key_to_cpu(&key, disk_key);
768
769 len = sizeof(*disk_key);
770 ptr += len;
771 sb_ptr += len;
772 cur += len;
773
774 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID &&
775 key.type == BTRFS_DEV_ITEM_KEY) {
776 dev_item = (struct btrfs_dev_item *)sb_ptr;
777 if (dev_only) {
778 ret = read_one_dev(root, &key, sb, dev_item);
779 BUG_ON(ret);
780 }
781 len = sizeof(*dev_item);
782 len += btrfs_device_name_len(sb, dev_item);
783 } else if (key.type == BTRFS_CHUNK_ITEM_KEY) {
784
785 chunk = (struct btrfs_chunk *)sb_ptr;
786 if (!dev_only) {
787 ret = read_one_chunk(root, &key, sb, chunk);
788 BUG_ON(ret);
789 }
790 num_stripes = btrfs_chunk_num_stripes(sb, chunk);
791 len = btrfs_chunk_item_size(num_stripes);
792 } else {
793 BUG();
794 }
795 ptr += len;
796 sb_ptr += len;
797 cur += len;
798 }
799 if (dev_only == 1) {
800 dev_only = 0;
801 goto again;
802 }
803 return 0;
804}
805
806int btrfs_read_chunk_tree(struct btrfs_root *root)
807{
808 struct btrfs_path *path;
809 struct extent_buffer *leaf;
810 struct btrfs_key key;
811 struct btrfs_key found_key;
812 int ret;
813 int slot;
814
815 root = root->fs_info->chunk_root;
816
817 path = btrfs_alloc_path();
818 if (!path)
819 return -ENOMEM;
820
821 /* first we search for all of the device items, and then we
822 * read in all of the chunk items. This way we can create chunk
823 * mappings that reference all of the devices that are afound
824 */
825 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
826 key.offset = 0;
827 key.type = 0;
828again:
829 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
830 while(1) {
831 leaf = path->nodes[0];
832 slot = path->slots[0];
833 if (slot >= btrfs_header_nritems(leaf)) {
834 ret = btrfs_next_leaf(root, path);
835 if (ret == 0)
836 continue;
837 if (ret < 0)
838 goto error;
839 break;
840 }
841 btrfs_item_key_to_cpu(leaf, &found_key, slot);
842 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
843 if (found_key.objectid != BTRFS_DEV_ITEMS_OBJECTID)
844 break;
845 if (found_key.type == BTRFS_DEV_ITEM_KEY) {
846 struct btrfs_dev_item *dev_item;
847 dev_item = btrfs_item_ptr(leaf, slot,
848 struct btrfs_dev_item);
849 ret = read_one_dev(root, &found_key, leaf,
850 dev_item);
851 BUG_ON(ret);
852 }
853 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
854 struct btrfs_chunk *chunk;
855 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
856 ret = read_one_chunk(root, &found_key, leaf, chunk);
857 }
858 path->slots[0]++;
859 }
860 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
861 key.objectid = 0;
862 btrfs_release_path(root, path);
863 goto again;
864 }
865
866 btrfs_free_path(path);
867 ret = 0;
868error:
869 return ret;
870}
871