Btrfs: Bring back mount -o ssd optimizations
[linux-block.git] / fs / btrfs / volumes.c
CommitLineData
0b86a832
CM
1/*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18#include <linux/sched.h>
19#include <linux/bio.h>
20#include "ctree.h"
21#include "extent_map.h"
22#include "disk-io.h"
23#include "transaction.h"
24#include "print-tree.h"
25#include "volumes.h"
26
27struct map_lookup {
28 struct btrfs_device *dev;
29 u64 physical;
30};
31
32/*
33 * this uses a pretty simple search, the expectation is that it is
34 * called very infrequently and that a given device has a small number
35 * of extents
36 */
37static int find_free_dev_extent(struct btrfs_trans_handle *trans,
38 struct btrfs_device *device,
39 struct btrfs_path *path,
40 u64 num_bytes, u64 *start)
41{
42 struct btrfs_key key;
43 struct btrfs_root *root = device->dev_root;
44 struct btrfs_dev_extent *dev_extent = NULL;
45 u64 hole_size = 0;
46 u64 last_byte = 0;
47 u64 search_start = 0;
48 u64 search_end = device->total_bytes;
49 int ret;
50 int slot = 0;
51 int start_found;
52 struct extent_buffer *l;
53
54 start_found = 0;
55 path->reada = 2;
56
57 /* FIXME use last free of some kind */
58
59 key.objectid = device->devid;
60 key.offset = search_start;
61 key.type = BTRFS_DEV_EXTENT_KEY;
62 ret = btrfs_search_slot(trans, root, &key, path, 0, 0);
63 if (ret < 0)
64 goto error;
65 ret = btrfs_previous_item(root, path, 0, key.type);
66 if (ret < 0)
67 goto error;
68 l = path->nodes[0];
69 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
70 while (1) {
71 l = path->nodes[0];
72 slot = path->slots[0];
73 if (slot >= btrfs_header_nritems(l)) {
74 ret = btrfs_next_leaf(root, path);
75 if (ret == 0)
76 continue;
77 if (ret < 0)
78 goto error;
79no_more_items:
80 if (!start_found) {
81 if (search_start >= search_end) {
82 ret = -ENOSPC;
83 goto error;
84 }
85 *start = search_start;
86 start_found = 1;
87 goto check_pending;
88 }
89 *start = last_byte > search_start ?
90 last_byte : search_start;
91 if (search_end <= *start) {
92 ret = -ENOSPC;
93 goto error;
94 }
95 goto check_pending;
96 }
97 btrfs_item_key_to_cpu(l, &key, slot);
98
99 if (key.objectid < device->devid)
100 goto next;
101
102 if (key.objectid > device->devid)
103 goto no_more_items;
104
105 if (key.offset >= search_start && key.offset > last_byte &&
106 start_found) {
107 if (last_byte < search_start)
108 last_byte = search_start;
109 hole_size = key.offset - last_byte;
110 if (key.offset > last_byte &&
111 hole_size >= num_bytes) {
112 *start = last_byte;
113 goto check_pending;
114 }
115 }
116 if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY) {
117 goto next;
118 }
119
120 start_found = 1;
121 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
122 last_byte = key.offset + btrfs_dev_extent_length(l, dev_extent);
123next:
124 path->slots[0]++;
125 cond_resched();
126 }
127check_pending:
128 /* we have to make sure we didn't find an extent that has already
129 * been allocated by the map tree or the original allocation
130 */
131 btrfs_release_path(root, path);
132 BUG_ON(*start < search_start);
133
6324fbf3 134 if (*start + num_bytes > search_end) {
0b86a832
CM
135 ret = -ENOSPC;
136 goto error;
137 }
138 /* check for pending inserts here */
139 return 0;
140
141error:
142 btrfs_release_path(root, path);
143 return ret;
144}
145
146int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
147 struct btrfs_device *device,
148 u64 owner, u64 num_bytes, u64 *start)
149{
150 int ret;
151 struct btrfs_path *path;
152 struct btrfs_root *root = device->dev_root;
153 struct btrfs_dev_extent *extent;
154 struct extent_buffer *leaf;
155 struct btrfs_key key;
156
157 path = btrfs_alloc_path();
158 if (!path)
159 return -ENOMEM;
160
161 ret = find_free_dev_extent(trans, device, path, num_bytes, start);
6324fbf3 162 if (ret) {
0b86a832 163 goto err;
6324fbf3 164 }
0b86a832
CM
165
166 key.objectid = device->devid;
167 key.offset = *start;
168 key.type = BTRFS_DEV_EXTENT_KEY;
169 ret = btrfs_insert_empty_item(trans, root, path, &key,
170 sizeof(*extent));
171 BUG_ON(ret);
172
173 leaf = path->nodes[0];
174 extent = btrfs_item_ptr(leaf, path->slots[0],
175 struct btrfs_dev_extent);
176 btrfs_set_dev_extent_owner(leaf, extent, owner);
177 btrfs_set_dev_extent_length(leaf, extent, num_bytes);
178 btrfs_mark_buffer_dirty(leaf);
179err:
180 btrfs_free_path(path);
181 return ret;
182}
183
184static int find_next_chunk(struct btrfs_root *root, u64 *objectid)
185{
186 struct btrfs_path *path;
187 int ret;
188 struct btrfs_key key;
189 struct btrfs_key found_key;
190
191 path = btrfs_alloc_path();
192 BUG_ON(!path);
193
194 key.objectid = (u64)-1;
195 key.offset = (u64)-1;
196 key.type = BTRFS_CHUNK_ITEM_KEY;
197
198 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
199 if (ret < 0)
200 goto error;
201
202 BUG_ON(ret == 0);
203
204 ret = btrfs_previous_item(root, path, 0, BTRFS_CHUNK_ITEM_KEY);
205 if (ret) {
206 *objectid = 0;
207 } else {
208 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
209 path->slots[0]);
210 *objectid = found_key.objectid + found_key.offset;
211 }
212 ret = 0;
213error:
214 btrfs_free_path(path);
215 return ret;
216}
217
0b86a832
CM
218static int find_next_devid(struct btrfs_root *root, struct btrfs_path *path,
219 u64 *objectid)
220{
221 int ret;
222 struct btrfs_key key;
223 struct btrfs_key found_key;
224
225 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
226 key.type = BTRFS_DEV_ITEM_KEY;
227 key.offset = (u64)-1;
228
229 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
230 if (ret < 0)
231 goto error;
232
233 BUG_ON(ret == 0);
234
235 ret = btrfs_previous_item(root, path, BTRFS_DEV_ITEMS_OBJECTID,
236 BTRFS_DEV_ITEM_KEY);
237 if (ret) {
238 *objectid = 1;
239 } else {
240 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
241 path->slots[0]);
242 *objectid = found_key.offset + 1;
243 }
244 ret = 0;
245error:
246 btrfs_release_path(root, path);
247 return ret;
248}
249
250/*
251 * the device information is stored in the chunk root
252 * the btrfs_device struct should be fully filled in
253 */
254int btrfs_add_device(struct btrfs_trans_handle *trans,
255 struct btrfs_root *root,
256 struct btrfs_device *device)
257{
258 int ret;
259 struct btrfs_path *path;
260 struct btrfs_dev_item *dev_item;
261 struct extent_buffer *leaf;
262 struct btrfs_key key;
263 unsigned long ptr;
264 u64 free_devid;
265
266 root = root->fs_info->chunk_root;
267
268 path = btrfs_alloc_path();
269 if (!path)
270 return -ENOMEM;
271
272 ret = find_next_devid(root, path, &free_devid);
273 if (ret)
274 goto out;
275
276 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
277 key.type = BTRFS_DEV_ITEM_KEY;
278 key.offset = free_devid;
279
280 ret = btrfs_insert_empty_item(trans, root, path, &key,
0d81ba5d 281 sizeof(*dev_item));
0b86a832
CM
282 if (ret)
283 goto out;
284
285 leaf = path->nodes[0];
286 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
287
288 btrfs_set_device_id(leaf, dev_item, device->devid);
289 btrfs_set_device_type(leaf, dev_item, device->type);
290 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
291 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
292 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
0b86a832
CM
293 btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
294 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
295
0b86a832
CM
296 ptr = (unsigned long)btrfs_device_uuid(dev_item);
297 write_extent_buffer(leaf, device->uuid, ptr, BTRFS_DEV_UUID_SIZE);
298 btrfs_mark_buffer_dirty(leaf);
299 ret = 0;
300
301out:
302 btrfs_free_path(path);
303 return ret;
304}
305int btrfs_update_device(struct btrfs_trans_handle *trans,
306 struct btrfs_device *device)
307{
308 int ret;
309 struct btrfs_path *path;
310 struct btrfs_root *root;
311 struct btrfs_dev_item *dev_item;
312 struct extent_buffer *leaf;
313 struct btrfs_key key;
314
315 root = device->dev_root->fs_info->chunk_root;
316
317 path = btrfs_alloc_path();
318 if (!path)
319 return -ENOMEM;
320
321 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
322 key.type = BTRFS_DEV_ITEM_KEY;
323 key.offset = device->devid;
324
325 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
326 if (ret < 0)
327 goto out;
328
329 if (ret > 0) {
330 ret = -ENOENT;
331 goto out;
332 }
333
334 leaf = path->nodes[0];
335 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
336
337 btrfs_set_device_id(leaf, dev_item, device->devid);
338 btrfs_set_device_type(leaf, dev_item, device->type);
339 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
340 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
341 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
0b86a832
CM
342 btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
343 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
344 btrfs_mark_buffer_dirty(leaf);
345
346out:
347 btrfs_free_path(path);
348 return ret;
349}
350
351int btrfs_add_system_chunk(struct btrfs_trans_handle *trans,
352 struct btrfs_root *root,
353 struct btrfs_key *key,
354 struct btrfs_chunk *chunk, int item_size)
355{
356 struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
357 struct btrfs_disk_key disk_key;
358 u32 array_size;
359 u8 *ptr;
360
361 array_size = btrfs_super_sys_array_size(super_copy);
362 if (array_size + item_size > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE)
363 return -EFBIG;
364
365 ptr = super_copy->sys_chunk_array + array_size;
366 btrfs_cpu_key_to_disk(&disk_key, key);
367 memcpy(ptr, &disk_key, sizeof(disk_key));
368 ptr += sizeof(disk_key);
369 memcpy(ptr, chunk, item_size);
370 item_size += sizeof(disk_key);
371 btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
372 return 0;
373}
374
375int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
376 struct btrfs_root *extent_root, u64 *start,
6324fbf3 377 u64 *num_bytes, u64 type)
0b86a832
CM
378{
379 u64 dev_offset;
380 struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
381 struct btrfs_stripe *stripes;
382 struct btrfs_device *device = NULL;
383 struct btrfs_chunk *chunk;
6324fbf3 384 struct list_head private_devs;
0b86a832 385 struct list_head *dev_list = &extent_root->fs_info->devices;
6324fbf3 386 struct list_head *cur;
0b86a832
CM
387 struct extent_map_tree *em_tree;
388 struct map_lookup *map;
389 struct extent_map *em;
390 u64 physical;
391 u64 calc_size = 1024 * 1024 * 1024;
6324fbf3
CM
392 u64 avail;
393 u64 max_avail = 0;
394 int num_stripes = 1;
395 int looped = 0;
0b86a832 396 int ret;
6324fbf3 397 int index;
0b86a832
CM
398 struct btrfs_key key;
399
6324fbf3
CM
400 if (list_empty(dev_list))
401 return -ENOSPC;
402again:
403 INIT_LIST_HEAD(&private_devs);
404 cur = dev_list->next;
405 index = 0;
406 /* build a private list of devices we will allocate from */
407 while(index < num_stripes) {
408 device = list_entry(cur, struct btrfs_device, dev_list);
409 avail = device->total_bytes - device->bytes_used;
410 cur = cur->next;
411 if (avail > max_avail)
412 max_avail = avail;
413 if (avail >= calc_size) {
414 list_move_tail(&device->dev_list, &private_devs);
415 index++;
416 }
417 if (cur == dev_list)
418 break;
419 }
420 if (index < num_stripes) {
421 list_splice(&private_devs, dev_list);
422 if (!looped && max_avail > 0) {
423 looped = 1;
424 calc_size = max_avail;
425 goto again;
426 }
427 return -ENOSPC;
428 }
0b86a832
CM
429
430 ret = find_next_chunk(chunk_root, &key.objectid);
431 if (ret)
432 return ret;
433
0b86a832
CM
434 chunk = kmalloc(btrfs_chunk_item_size(num_stripes), GFP_NOFS);
435 if (!chunk)
436 return -ENOMEM;
437
438 stripes = &chunk->stripe;
439
440 *num_bytes = calc_size;
6324fbf3 441 index = 0;
0b86a832 442 while(index < num_stripes) {
6324fbf3
CM
443 BUG_ON(list_empty(&private_devs));
444 cur = private_devs.next;
445 device = list_entry(cur, struct btrfs_device, dev_list);
446 list_move_tail(&device->dev_list, dev_list);
0b86a832
CM
447
448 ret = btrfs_alloc_dev_extent(trans, device,
449 key.objectid,
450 calc_size, &dev_offset);
451 BUG_ON(ret);
452
453 device->bytes_used += calc_size;
454 ret = btrfs_update_device(trans, device);
455 BUG_ON(ret);
456
457 btrfs_set_stack_stripe_devid(stripes + index, device->devid);
458 btrfs_set_stack_stripe_offset(stripes + index, dev_offset);
459 physical = dev_offset;
460 index++;
461 }
6324fbf3 462 BUG_ON(!list_empty(&private_devs));
0b86a832
CM
463
464 /* key.objectid was set above */
465 key.offset = *num_bytes;
466 key.type = BTRFS_CHUNK_ITEM_KEY;
467 btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
468 btrfs_set_stack_chunk_stripe_len(chunk, 64 * 1024);
469 btrfs_set_stack_chunk_type(chunk, type);
470 btrfs_set_stack_chunk_num_stripes(chunk, num_stripes);
471 btrfs_set_stack_chunk_io_align(chunk, extent_root->sectorsize);
472 btrfs_set_stack_chunk_io_width(chunk, extent_root->sectorsize);
473 btrfs_set_stack_chunk_sector_size(chunk, extent_root->sectorsize);
474
475 ret = btrfs_insert_item(trans, chunk_root, &key, chunk,
476 btrfs_chunk_item_size(num_stripes));
477 BUG_ON(ret);
478 *start = key.objectid;
479
480 em = alloc_extent_map(GFP_NOFS);
481 if (!em)
482 return -ENOMEM;
483 map = kmalloc(sizeof(*map), GFP_NOFS);
484 if (!map) {
485 free_extent_map(em);
486 return -ENOMEM;
487 }
488
489 em->bdev = (struct block_device *)map;
490 em->start = key.objectid;
491 em->len = key.offset;
492 em->block_start = 0;
493
494 map->physical = physical;
495 map->dev = device;
496
497 if (!map->dev) {
498 kfree(map);
499 free_extent_map(em);
500 return -EIO;
501 }
502 kfree(chunk);
503
504 em_tree = &extent_root->fs_info->mapping_tree.map_tree;
505 spin_lock(&em_tree->lock);
506 ret = add_extent_mapping(em_tree, em);
507 BUG_ON(ret);
508 spin_unlock(&em_tree->lock);
509 free_extent_map(em);
510 return ret;
511}
512
513void btrfs_mapping_init(struct btrfs_mapping_tree *tree)
514{
515 extent_map_tree_init(&tree->map_tree, GFP_NOFS);
516}
517
518void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
519{
520 struct extent_map *em;
521
522 while(1) {
523 spin_lock(&tree->map_tree.lock);
524 em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1);
525 if (em)
526 remove_extent_mapping(&tree->map_tree, em);
527 spin_unlock(&tree->map_tree.lock);
528 if (!em)
529 break;
530 kfree(em->bdev);
531 /* once for us */
532 free_extent_map(em);
533 /* once for the tree */
534 free_extent_map(em);
535 }
536}
537
538int btrfs_map_block(struct btrfs_mapping_tree *map_tree,
539 u64 logical, u64 *phys, u64 *length,
540 struct btrfs_device **dev)
541{
542 struct extent_map *em;
543 struct map_lookup *map;
544 struct extent_map_tree *em_tree = &map_tree->map_tree;
545 u64 offset;
546
547
548 spin_lock(&em_tree->lock);
549 em = lookup_extent_mapping(em_tree, logical, *length);
550 BUG_ON(!em);
551
552 BUG_ON(em->start > logical || em->start + em->len < logical);
553 map = (struct map_lookup *)em->bdev;
554 offset = logical - em->start;
555 *phys = map->physical + offset;
556 *length = em->len - offset;
557 *dev = map->dev;
558 free_extent_map(em);
559 spin_unlock(&em_tree->lock);
560 return 0;
561}
562
563int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio)
564{
565 struct btrfs_mapping_tree *map_tree;
566 struct btrfs_device *dev;
567 u64 logical = bio->bi_sector << 9;
568 u64 physical;
569 u64 length = 0;
570 u64 map_length;
571 struct bio_vec *bvec;
572 int i;
573 int ret;
574
575 bio_for_each_segment(bvec, bio, i) {
576 length += bvec->bv_len;
577 }
578 map_tree = &root->fs_info->mapping_tree;
579 map_length = length;
580 ret = btrfs_map_block(map_tree, logical, &physical, &map_length, &dev);
239b14b3
CM
581 if (map_length < length) {
582 printk("mapping failed logical %Lu bio len %Lu physical %Lu "
583 "len %Lu\n", logical, length, physical, map_length);
584 BUG();
585 }
0b86a832
CM
586 BUG_ON(map_length < length);
587 bio->bi_sector = physical >> 9;
588 bio->bi_bdev = dev->bdev;
589 submit_bio(rw, bio);
590 return 0;
591}
592
593struct btrfs_device *btrfs_find_device(struct btrfs_root *root, u64 devid)
594{
595 struct btrfs_device *dev;
596 struct list_head *cur = root->fs_info->devices.next;
597 struct list_head *head = &root->fs_info->devices;
598
599 while(cur != head) {
600 dev = list_entry(cur, struct btrfs_device, dev_list);
601 if (dev->devid == devid)
602 return dev;
603 cur = cur->next;
604 }
605 return NULL;
606}
607
608static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
609 struct extent_buffer *leaf,
610 struct btrfs_chunk *chunk)
611{
612 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
613 struct map_lookup *map;
614 struct extent_map *em;
615 u64 logical;
616 u64 length;
617 u64 devid;
618 int ret;
619
620 logical = key->objectid;
621 length = key->offset;
622 spin_lock(&map_tree->map_tree.lock);
623 em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
624
625 /* already mapped? */
626 if (em && em->start <= logical && em->start + em->len > logical) {
627 free_extent_map(em);
628 spin_unlock(&map_tree->map_tree.lock);
629 return 0;
630 } else if (em) {
631 free_extent_map(em);
632 }
633 spin_unlock(&map_tree->map_tree.lock);
634
635 map = kzalloc(sizeof(*map), GFP_NOFS);
636 if (!map)
637 return -ENOMEM;
638
639 em = alloc_extent_map(GFP_NOFS);
640 if (!em)
641 return -ENOMEM;
642 map = kmalloc(sizeof(*map), GFP_NOFS);
643 if (!map) {
644 free_extent_map(em);
645 return -ENOMEM;
646 }
647
648 em->bdev = (struct block_device *)map;
649 em->start = logical;
650 em->len = length;
651 em->block_start = 0;
652
653 map->physical = btrfs_stripe_offset_nr(leaf, chunk, 0);
654 devid = btrfs_stripe_devid_nr(leaf, chunk, 0);
655 map->dev = btrfs_find_device(root, devid);
656 if (!map->dev) {
657 kfree(map);
658 free_extent_map(em);
659 return -EIO;
660 }
661
662 spin_lock(&map_tree->map_tree.lock);
663 ret = add_extent_mapping(&map_tree->map_tree, em);
664 BUG_ON(ret);
665 spin_unlock(&map_tree->map_tree.lock);
666 free_extent_map(em);
667
668 return 0;
669}
670
671static int fill_device_from_item(struct extent_buffer *leaf,
672 struct btrfs_dev_item *dev_item,
673 struct btrfs_device *device)
674{
675 unsigned long ptr;
0b86a832
CM
676
677 device->devid = btrfs_device_id(leaf, dev_item);
678 device->total_bytes = btrfs_device_total_bytes(leaf, dev_item);
679 device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
680 device->type = btrfs_device_type(leaf, dev_item);
681 device->io_align = btrfs_device_io_align(leaf, dev_item);
682 device->io_width = btrfs_device_io_width(leaf, dev_item);
683 device->sector_size = btrfs_device_sector_size(leaf, dev_item);
0b86a832
CM
684
685 ptr = (unsigned long)btrfs_device_uuid(dev_item);
686 read_extent_buffer(leaf, device->uuid, ptr, BTRFS_DEV_UUID_SIZE);
687
0b86a832
CM
688 return 0;
689}
690
0d81ba5d 691static int read_one_dev(struct btrfs_root *root,
0b86a832
CM
692 struct extent_buffer *leaf,
693 struct btrfs_dev_item *dev_item)
694{
695 struct btrfs_device *device;
696 u64 devid;
697 int ret;
698
699 devid = btrfs_device_id(leaf, dev_item);
6324fbf3
CM
700 device = btrfs_find_device(root, devid);
701 if (!device) {
702 device = kmalloc(sizeof(*device), GFP_NOFS);
703 if (!device)
704 return -ENOMEM;
705 list_add(&device->dev_list, &root->fs_info->devices);
706 }
0b86a832
CM
707
708 fill_device_from_item(leaf, dev_item, device);
709 device->dev_root = root->fs_info->dev_root;
710 device->bdev = root->fs_info->sb->s_bdev;
0b86a832
CM
711 ret = 0;
712#if 0
713 ret = btrfs_open_device(device);
714 if (ret) {
715 kfree(device);
716 }
717#endif
718 return ret;
719}
720
0d81ba5d
CM
721int btrfs_read_super_device(struct btrfs_root *root, struct extent_buffer *buf)
722{
723 struct btrfs_dev_item *dev_item;
724
725 dev_item = (struct btrfs_dev_item *)offsetof(struct btrfs_super_block,
726 dev_item);
727 return read_one_dev(root, buf, dev_item);
728}
729
0b86a832
CM
730int btrfs_read_sys_array(struct btrfs_root *root)
731{
732 struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
733 struct extent_buffer *sb = root->fs_info->sb_buffer;
734 struct btrfs_disk_key *disk_key;
0b86a832
CM
735 struct btrfs_chunk *chunk;
736 struct btrfs_key key;
737 u32 num_stripes;
738 u32 array_size;
739 u32 len = 0;
740 u8 *ptr;
741 unsigned long sb_ptr;
742 u32 cur;
743 int ret;
0b86a832
CM
744
745 array_size = btrfs_super_sys_array_size(super_copy);
746
747 /*
748 * we do this loop twice, once for the device items and
749 * once for all of the chunks. This way there are device
750 * structs filled in for every chunk
751 */
0b86a832
CM
752 ptr = super_copy->sys_chunk_array;
753 sb_ptr = offsetof(struct btrfs_super_block, sys_chunk_array);
754 cur = 0;
755
756 while (cur < array_size) {
757 disk_key = (struct btrfs_disk_key *)ptr;
758 btrfs_disk_key_to_cpu(&key, disk_key);
759
760 len = sizeof(*disk_key);
761 ptr += len;
762 sb_ptr += len;
763 cur += len;
764
0d81ba5d 765 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
0b86a832 766 chunk = (struct btrfs_chunk *)sb_ptr;
0d81ba5d
CM
767 ret = read_one_chunk(root, &key, sb, chunk);
768 BUG_ON(ret);
0b86a832
CM
769 num_stripes = btrfs_chunk_num_stripes(sb, chunk);
770 len = btrfs_chunk_item_size(num_stripes);
771 } else {
772 BUG();
773 }
774 ptr += len;
775 sb_ptr += len;
776 cur += len;
777 }
0b86a832
CM
778 return 0;
779}
780
781int btrfs_read_chunk_tree(struct btrfs_root *root)
782{
783 struct btrfs_path *path;
784 struct extent_buffer *leaf;
785 struct btrfs_key key;
786 struct btrfs_key found_key;
787 int ret;
788 int slot;
789
790 root = root->fs_info->chunk_root;
791
792 path = btrfs_alloc_path();
793 if (!path)
794 return -ENOMEM;
795
796 /* first we search for all of the device items, and then we
797 * read in all of the chunk items. This way we can create chunk
798 * mappings that reference all of the devices that are afound
799 */
800 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
801 key.offset = 0;
802 key.type = 0;
803again:
804 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
805 while(1) {
806 leaf = path->nodes[0];
807 slot = path->slots[0];
808 if (slot >= btrfs_header_nritems(leaf)) {
809 ret = btrfs_next_leaf(root, path);
810 if (ret == 0)
811 continue;
812 if (ret < 0)
813 goto error;
814 break;
815 }
816 btrfs_item_key_to_cpu(leaf, &found_key, slot);
817 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
818 if (found_key.objectid != BTRFS_DEV_ITEMS_OBJECTID)
819 break;
820 if (found_key.type == BTRFS_DEV_ITEM_KEY) {
821 struct btrfs_dev_item *dev_item;
822 dev_item = btrfs_item_ptr(leaf, slot,
823 struct btrfs_dev_item);
0d81ba5d 824 ret = read_one_dev(root, leaf, dev_item);
0b86a832
CM
825 BUG_ON(ret);
826 }
827 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
828 struct btrfs_chunk *chunk;
829 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
830 ret = read_one_chunk(root, &found_key, leaf, chunk);
831 }
832 path->slots[0]++;
833 }
834 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
835 key.objectid = 0;
836 btrfs_release_path(root, path);
837 goto again;
838 }
839
840 btrfs_free_path(path);
841 ret = 0;
842error:
843 return ret;
844}
845