BTRFS_IOC_TRANS_START should be privilegued
[linux-2.6-block.git] / fs / btrfs / volumes.c
CommitLineData
0b86a832
CM
1/*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18#include <linux/sched.h>
19#include <linux/bio.h>
8a4b83cc 20#include <linux/buffer_head.h>
f2d8d74d 21#include <linux/blkdev.h>
788f20eb 22#include <linux/random.h>
593060d7 23#include <asm/div64.h>
0b86a832
CM
24#include "ctree.h"
25#include "extent_map.h"
26#include "disk-io.h"
27#include "transaction.h"
28#include "print-tree.h"
29#include "volumes.h"
8b712842 30#include "async-thread.h"
0b86a832 31
593060d7
CM
32struct map_lookup {
33 u64 type;
34 int io_align;
35 int io_width;
36 int stripe_len;
37 int sector_size;
38 int num_stripes;
321aecc6 39 int sub_stripes;
cea9e445 40 struct btrfs_bio_stripe stripes[];
593060d7
CM
41};
42
43#define map_lookup_size(n) (sizeof(struct map_lookup) + \
cea9e445 44 (sizeof(struct btrfs_bio_stripe) * (n)))
593060d7 45
8a4b83cc
CM
46static DEFINE_MUTEX(uuid_mutex);
47static LIST_HEAD(fs_uuids);
48
a061fc8d
CM
49void btrfs_lock_volumes(void)
50{
51 mutex_lock(&uuid_mutex);
52}
53
54void btrfs_unlock_volumes(void)
55{
56 mutex_unlock(&uuid_mutex);
57}
58
8a4b83cc
CM
59int btrfs_cleanup_fs_uuids(void)
60{
61 struct btrfs_fs_devices *fs_devices;
62 struct list_head *uuid_cur;
63 struct list_head *devices_cur;
64 struct btrfs_device *dev;
65
66 list_for_each(uuid_cur, &fs_uuids) {
67 fs_devices = list_entry(uuid_cur, struct btrfs_fs_devices,
68 list);
69 while(!list_empty(&fs_devices->devices)) {
70 devices_cur = fs_devices->devices.next;
71 dev = list_entry(devices_cur, struct btrfs_device,
72 dev_list);
8a4b83cc 73 if (dev->bdev) {
8a4b83cc 74 close_bdev_excl(dev->bdev);
a0af469b 75 fs_devices->open_devices--;
8a4b83cc
CM
76 }
77 list_del(&dev->dev_list);
dfe25020 78 kfree(dev->name);
8a4b83cc
CM
79 kfree(dev);
80 }
81 }
82 return 0;
83}
84
a443755f
CM
85static struct btrfs_device *__find_device(struct list_head *head, u64 devid,
86 u8 *uuid)
8a4b83cc
CM
87{
88 struct btrfs_device *dev;
89 struct list_head *cur;
90
91 list_for_each(cur, head) {
92 dev = list_entry(cur, struct btrfs_device, dev_list);
a443755f 93 if (dev->devid == devid &&
8f18cf13 94 (!uuid || !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE))) {
8a4b83cc 95 return dev;
a443755f 96 }
8a4b83cc
CM
97 }
98 return NULL;
99}
100
101static struct btrfs_fs_devices *find_fsid(u8 *fsid)
102{
103 struct list_head *cur;
104 struct btrfs_fs_devices *fs_devices;
105
106 list_for_each(cur, &fs_uuids) {
107 fs_devices = list_entry(cur, struct btrfs_fs_devices, list);
108 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
109 return fs_devices;
110 }
111 return NULL;
112}
113
8b712842
CM
114/*
115 * we try to collect pending bios for a device so we don't get a large
116 * number of procs sending bios down to the same device. This greatly
117 * improves the schedulers ability to collect and merge the bios.
118 *
119 * But, it also turns into a long list of bios to process and that is sure
120 * to eventually make the worker thread block. The solution here is to
121 * make some progress and then put this work struct back at the end of
122 * the list if the block device is congested. This way, multiple devices
123 * can make progress from a single worker thread.
124 */
125int run_scheduled_bios(struct btrfs_device *device)
126{
127 struct bio *pending;
128 struct backing_dev_info *bdi;
129 struct bio *tail;
130 struct bio *cur;
131 int again = 0;
132 unsigned long num_run = 0;
133
134 bdi = device->bdev->bd_inode->i_mapping->backing_dev_info;
135loop:
136 spin_lock(&device->io_lock);
137
138 /* take all the bios off the list at once and process them
139 * later on (without the lock held). But, remember the
140 * tail and other pointers so the bios can be properly reinserted
141 * into the list if we hit congestion
142 */
143 pending = device->pending_bios;
144 tail = device->pending_bio_tail;
145 WARN_ON(pending && !tail);
146 device->pending_bios = NULL;
147 device->pending_bio_tail = NULL;
148
149 /*
150 * if pending was null this time around, no bios need processing
151 * at all and we can stop. Otherwise it'll loop back up again
152 * and do an additional check so no bios are missed.
153 *
154 * device->running_pending is used to synchronize with the
155 * schedule_bio code.
156 */
157 if (pending) {
158 again = 1;
159 device->running_pending = 1;
160 } else {
161 again = 0;
162 device->running_pending = 0;
163 }
164 spin_unlock(&device->io_lock);
165
166 while(pending) {
167 cur = pending;
168 pending = pending->bi_next;
169 cur->bi_next = NULL;
170 atomic_dec(&device->dev_root->fs_info->nr_async_submits);
171 submit_bio(cur->bi_rw, cur);
172 num_run++;
173
174 /*
175 * we made progress, there is more work to do and the bdi
176 * is now congested. Back off and let other work structs
177 * run instead
178 */
179 if (pending && num_run && bdi_write_congested(bdi)) {
180 struct bio *old_head;
181
182 spin_lock(&device->io_lock);
183 old_head = device->pending_bios;
184 device->pending_bios = pending;
185 if (device->pending_bio_tail)
186 tail->bi_next = old_head;
187 else
188 device->pending_bio_tail = tail;
189
190 spin_unlock(&device->io_lock);
191 btrfs_requeue_work(&device->work);
192 goto done;
193 }
194 }
195 if (again)
196 goto loop;
197done:
198 return 0;
199}
200
201void pending_bios_fn(struct btrfs_work *work)
202{
203 struct btrfs_device *device;
204
205 device = container_of(work, struct btrfs_device, work);
206 run_scheduled_bios(device);
207}
208
8a4b83cc
CM
209static int device_list_add(const char *path,
210 struct btrfs_super_block *disk_super,
211 u64 devid, struct btrfs_fs_devices **fs_devices_ret)
212{
213 struct btrfs_device *device;
214 struct btrfs_fs_devices *fs_devices;
215 u64 found_transid = btrfs_super_generation(disk_super);
216
217 fs_devices = find_fsid(disk_super->fsid);
218 if (!fs_devices) {
515dc322 219 fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
8a4b83cc
CM
220 if (!fs_devices)
221 return -ENOMEM;
222 INIT_LIST_HEAD(&fs_devices->devices);
b3075717 223 INIT_LIST_HEAD(&fs_devices->alloc_list);
8a4b83cc
CM
224 list_add(&fs_devices->list, &fs_uuids);
225 memcpy(fs_devices->fsid, disk_super->fsid, BTRFS_FSID_SIZE);
226 fs_devices->latest_devid = devid;
227 fs_devices->latest_trans = found_transid;
8a4b83cc
CM
228 device = NULL;
229 } else {
a443755f
CM
230 device = __find_device(&fs_devices->devices, devid,
231 disk_super->dev_item.uuid);
8a4b83cc
CM
232 }
233 if (!device) {
234 device = kzalloc(sizeof(*device), GFP_NOFS);
235 if (!device) {
236 /* we can safely leave the fs_devices entry around */
237 return -ENOMEM;
238 }
239 device->devid = devid;
8b712842 240 device->work.func = pending_bios_fn;
a443755f
CM
241 memcpy(device->uuid, disk_super->dev_item.uuid,
242 BTRFS_UUID_SIZE);
f2984462 243 device->barriers = 1;
b248a415 244 spin_lock_init(&device->io_lock);
8a4b83cc
CM
245 device->name = kstrdup(path, GFP_NOFS);
246 if (!device->name) {
247 kfree(device);
248 return -ENOMEM;
249 }
250 list_add(&device->dev_list, &fs_devices->devices);
b3075717 251 list_add(&device->dev_alloc_list, &fs_devices->alloc_list);
8a4b83cc
CM
252 fs_devices->num_devices++;
253 }
254
255 if (found_transid > fs_devices->latest_trans) {
256 fs_devices->latest_devid = devid;
257 fs_devices->latest_trans = found_transid;
258 }
8a4b83cc
CM
259 *fs_devices_ret = fs_devices;
260 return 0;
261}
262
dfe25020
CM
263int btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices)
264{
265 struct list_head *head = &fs_devices->devices;
266 struct list_head *cur;
267 struct btrfs_device *device;
268
269 mutex_lock(&uuid_mutex);
270again:
271 list_for_each(cur, head) {
272 device = list_entry(cur, struct btrfs_device, dev_list);
273 if (!device->in_fs_metadata) {
a0af469b 274 if (device->bdev) {
dfe25020 275 close_bdev_excl(device->bdev);
a0af469b
CM
276 fs_devices->open_devices--;
277 }
dfe25020
CM
278 list_del(&device->dev_list);
279 list_del(&device->dev_alloc_list);
280 fs_devices->num_devices--;
281 kfree(device->name);
282 kfree(device);
283 goto again;
284 }
285 }
286 mutex_unlock(&uuid_mutex);
287 return 0;
288}
a0af469b 289
8a4b83cc
CM
290int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
291{
292 struct list_head *head = &fs_devices->devices;
293 struct list_head *cur;
294 struct btrfs_device *device;
295
296 mutex_lock(&uuid_mutex);
297 list_for_each(cur, head) {
298 device = list_entry(cur, struct btrfs_device, dev_list);
299 if (device->bdev) {
300 close_bdev_excl(device->bdev);
a0af469b 301 fs_devices->open_devices--;
8a4b83cc
CM
302 }
303 device->bdev = NULL;
dfe25020 304 device->in_fs_metadata = 0;
8a4b83cc 305 }
a0af469b 306 fs_devices->mounted = 0;
8a4b83cc
CM
307 mutex_unlock(&uuid_mutex);
308 return 0;
309}
310
311int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
312 int flags, void *holder)
313{
314 struct block_device *bdev;
315 struct list_head *head = &fs_devices->devices;
316 struct list_head *cur;
317 struct btrfs_device *device;
a0af469b
CM
318 struct block_device *latest_bdev = NULL;
319 struct buffer_head *bh;
320 struct btrfs_super_block *disk_super;
321 u64 latest_devid = 0;
322 u64 latest_transid = 0;
323 u64 transid;
324 u64 devid;
325 int ret = 0;
8a4b83cc
CM
326
327 mutex_lock(&uuid_mutex);
a0af469b
CM
328 if (fs_devices->mounted)
329 goto out;
330
8a4b83cc
CM
331 list_for_each(cur, head) {
332 device = list_entry(cur, struct btrfs_device, dev_list);
c1c4d91c
CM
333 if (device->bdev)
334 continue;
335
dfe25020
CM
336 if (!device->name)
337 continue;
338
8a4b83cc 339 bdev = open_bdev_excl(device->name, flags, holder);
e17cade2 340
8a4b83cc
CM
341 if (IS_ERR(bdev)) {
342 printk("open %s failed\n", device->name);
a0af469b 343 goto error;
8a4b83cc 344 }
a061fc8d 345 set_blocksize(bdev, 4096);
a0af469b
CM
346
347 bh = __bread(bdev, BTRFS_SUPER_INFO_OFFSET / 4096, 4096);
348 if (!bh)
349 goto error_close;
350
351 disk_super = (struct btrfs_super_block *)bh->b_data;
352 if (strncmp((char *)(&disk_super->magic), BTRFS_MAGIC,
353 sizeof(disk_super->magic)))
354 goto error_brelse;
355
356 devid = le64_to_cpu(disk_super->dev_item.devid);
357 if (devid != device->devid)
358 goto error_brelse;
359
360 transid = btrfs_super_generation(disk_super);
6af5ac3c 361 if (!latest_transid || transid > latest_transid) {
a0af469b
CM
362 latest_devid = devid;
363 latest_transid = transid;
364 latest_bdev = bdev;
365 }
366
8a4b83cc 367 device->bdev = bdev;
dfe25020 368 device->in_fs_metadata = 0;
a0af469b
CM
369 fs_devices->open_devices++;
370 continue;
a061fc8d 371
a0af469b
CM
372error_brelse:
373 brelse(bh);
374error_close:
375 close_bdev_excl(bdev);
376error:
377 continue;
8a4b83cc 378 }
a0af469b
CM
379 if (fs_devices->open_devices == 0) {
380 ret = -EIO;
381 goto out;
382 }
383 fs_devices->mounted = 1;
384 fs_devices->latest_bdev = latest_bdev;
385 fs_devices->latest_devid = latest_devid;
386 fs_devices->latest_trans = latest_transid;
387out:
8a4b83cc 388 mutex_unlock(&uuid_mutex);
8a4b83cc
CM
389 return ret;
390}
391
392int btrfs_scan_one_device(const char *path, int flags, void *holder,
393 struct btrfs_fs_devices **fs_devices_ret)
394{
395 struct btrfs_super_block *disk_super;
396 struct block_device *bdev;
397 struct buffer_head *bh;
398 int ret;
399 u64 devid;
f2984462 400 u64 transid;
8a4b83cc
CM
401
402 mutex_lock(&uuid_mutex);
403
8a4b83cc
CM
404 bdev = open_bdev_excl(path, flags, holder);
405
406 if (IS_ERR(bdev)) {
8a4b83cc
CM
407 ret = PTR_ERR(bdev);
408 goto error;
409 }
410
411 ret = set_blocksize(bdev, 4096);
412 if (ret)
413 goto error_close;
414 bh = __bread(bdev, BTRFS_SUPER_INFO_OFFSET / 4096, 4096);
415 if (!bh) {
416 ret = -EIO;
417 goto error_close;
418 }
419 disk_super = (struct btrfs_super_block *)bh->b_data;
420 if (strncmp((char *)(&disk_super->magic), BTRFS_MAGIC,
421 sizeof(disk_super->magic))) {
e58ca020 422 ret = -EINVAL;
8a4b83cc
CM
423 goto error_brelse;
424 }
425 devid = le64_to_cpu(disk_super->dev_item.devid);
f2984462 426 transid = btrfs_super_generation(disk_super);
7ae9c09d
CM
427 if (disk_super->label[0])
428 printk("device label %s ", disk_super->label);
429 else {
430 /* FIXME, make a readl uuid parser */
431 printk("device fsid %llx-%llx ",
432 *(unsigned long long *)disk_super->fsid,
433 *(unsigned long long *)(disk_super->fsid + 8));
434 }
435 printk("devid %Lu transid %Lu %s\n", devid, transid, path);
8a4b83cc
CM
436 ret = device_list_add(path, disk_super, devid, fs_devices_ret);
437
438error_brelse:
439 brelse(bh);
440error_close:
441 close_bdev_excl(bdev);
8a4b83cc
CM
442error:
443 mutex_unlock(&uuid_mutex);
444 return ret;
445}
0b86a832
CM
446
447/*
448 * this uses a pretty simple search, the expectation is that it is
449 * called very infrequently and that a given device has a small number
450 * of extents
451 */
452static int find_free_dev_extent(struct btrfs_trans_handle *trans,
453 struct btrfs_device *device,
454 struct btrfs_path *path,
455 u64 num_bytes, u64 *start)
456{
457 struct btrfs_key key;
458 struct btrfs_root *root = device->dev_root;
459 struct btrfs_dev_extent *dev_extent = NULL;
460 u64 hole_size = 0;
461 u64 last_byte = 0;
462 u64 search_start = 0;
463 u64 search_end = device->total_bytes;
464 int ret;
465 int slot = 0;
466 int start_found;
467 struct extent_buffer *l;
468
469 start_found = 0;
470 path->reada = 2;
471
472 /* FIXME use last free of some kind */
473
8a4b83cc
CM
474 /* we don't want to overwrite the superblock on the drive,
475 * so we make sure to start at an offset of at least 1MB
476 */
477 search_start = max((u64)1024 * 1024, search_start);
8f18cf13
CM
478
479 if (root->fs_info->alloc_start + num_bytes <= device->total_bytes)
480 search_start = max(root->fs_info->alloc_start, search_start);
481
0b86a832
CM
482 key.objectid = device->devid;
483 key.offset = search_start;
484 key.type = BTRFS_DEV_EXTENT_KEY;
485 ret = btrfs_search_slot(trans, root, &key, path, 0, 0);
486 if (ret < 0)
487 goto error;
488 ret = btrfs_previous_item(root, path, 0, key.type);
489 if (ret < 0)
490 goto error;
491 l = path->nodes[0];
492 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
493 while (1) {
494 l = path->nodes[0];
495 slot = path->slots[0];
496 if (slot >= btrfs_header_nritems(l)) {
497 ret = btrfs_next_leaf(root, path);
498 if (ret == 0)
499 continue;
500 if (ret < 0)
501 goto error;
502no_more_items:
503 if (!start_found) {
504 if (search_start >= search_end) {
505 ret = -ENOSPC;
506 goto error;
507 }
508 *start = search_start;
509 start_found = 1;
510 goto check_pending;
511 }
512 *start = last_byte > search_start ?
513 last_byte : search_start;
514 if (search_end <= *start) {
515 ret = -ENOSPC;
516 goto error;
517 }
518 goto check_pending;
519 }
520 btrfs_item_key_to_cpu(l, &key, slot);
521
522 if (key.objectid < device->devid)
523 goto next;
524
525 if (key.objectid > device->devid)
526 goto no_more_items;
527
528 if (key.offset >= search_start && key.offset > last_byte &&
529 start_found) {
530 if (last_byte < search_start)
531 last_byte = search_start;
532 hole_size = key.offset - last_byte;
533 if (key.offset > last_byte &&
534 hole_size >= num_bytes) {
535 *start = last_byte;
536 goto check_pending;
537 }
538 }
539 if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY) {
540 goto next;
541 }
542
543 start_found = 1;
544 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
545 last_byte = key.offset + btrfs_dev_extent_length(l, dev_extent);
546next:
547 path->slots[0]++;
548 cond_resched();
549 }
550check_pending:
551 /* we have to make sure we didn't find an extent that has already
552 * been allocated by the map tree or the original allocation
553 */
554 btrfs_release_path(root, path);
555 BUG_ON(*start < search_start);
556
6324fbf3 557 if (*start + num_bytes > search_end) {
0b86a832
CM
558 ret = -ENOSPC;
559 goto error;
560 }
561 /* check for pending inserts here */
562 return 0;
563
564error:
565 btrfs_release_path(root, path);
566 return ret;
567}
568
8f18cf13
CM
569int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
570 struct btrfs_device *device,
571 u64 start)
572{
573 int ret;
574 struct btrfs_path *path;
575 struct btrfs_root *root = device->dev_root;
576 struct btrfs_key key;
a061fc8d
CM
577 struct btrfs_key found_key;
578 struct extent_buffer *leaf = NULL;
579 struct btrfs_dev_extent *extent = NULL;
8f18cf13
CM
580
581 path = btrfs_alloc_path();
582 if (!path)
583 return -ENOMEM;
584
585 key.objectid = device->devid;
586 key.offset = start;
587 key.type = BTRFS_DEV_EXTENT_KEY;
588
589 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
a061fc8d
CM
590 if (ret > 0) {
591 ret = btrfs_previous_item(root, path, key.objectid,
592 BTRFS_DEV_EXTENT_KEY);
593 BUG_ON(ret);
594 leaf = path->nodes[0];
595 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
596 extent = btrfs_item_ptr(leaf, path->slots[0],
597 struct btrfs_dev_extent);
598 BUG_ON(found_key.offset > start || found_key.offset +
599 btrfs_dev_extent_length(leaf, extent) < start);
600 ret = 0;
601 } else if (ret == 0) {
602 leaf = path->nodes[0];
603 extent = btrfs_item_ptr(leaf, path->slots[0],
604 struct btrfs_dev_extent);
605 }
8f18cf13
CM
606 BUG_ON(ret);
607
dfe25020
CM
608 if (device->bytes_used > 0)
609 device->bytes_used -= btrfs_dev_extent_length(leaf, extent);
8f18cf13
CM
610 ret = btrfs_del_item(trans, root, path);
611 BUG_ON(ret);
612
613 btrfs_free_path(path);
614 return ret;
615}
616
0b86a832
CM
617int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
618 struct btrfs_device *device,
e17cade2
CM
619 u64 chunk_tree, u64 chunk_objectid,
620 u64 chunk_offset,
621 u64 num_bytes, u64 *start)
0b86a832
CM
622{
623 int ret;
624 struct btrfs_path *path;
625 struct btrfs_root *root = device->dev_root;
626 struct btrfs_dev_extent *extent;
627 struct extent_buffer *leaf;
628 struct btrfs_key key;
629
dfe25020 630 WARN_ON(!device->in_fs_metadata);
0b86a832
CM
631 path = btrfs_alloc_path();
632 if (!path)
633 return -ENOMEM;
634
635 ret = find_free_dev_extent(trans, device, path, num_bytes, start);
6324fbf3 636 if (ret) {
0b86a832 637 goto err;
6324fbf3 638 }
0b86a832
CM
639
640 key.objectid = device->devid;
641 key.offset = *start;
642 key.type = BTRFS_DEV_EXTENT_KEY;
643 ret = btrfs_insert_empty_item(trans, root, path, &key,
644 sizeof(*extent));
645 BUG_ON(ret);
646
647 leaf = path->nodes[0];
648 extent = btrfs_item_ptr(leaf, path->slots[0],
649 struct btrfs_dev_extent);
e17cade2
CM
650 btrfs_set_dev_extent_chunk_tree(leaf, extent, chunk_tree);
651 btrfs_set_dev_extent_chunk_objectid(leaf, extent, chunk_objectid);
652 btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
653
654 write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid,
655 (unsigned long)btrfs_dev_extent_chunk_tree_uuid(extent),
656 BTRFS_UUID_SIZE);
657
0b86a832
CM
658 btrfs_set_dev_extent_length(leaf, extent, num_bytes);
659 btrfs_mark_buffer_dirty(leaf);
660err:
661 btrfs_free_path(path);
662 return ret;
663}
664
e17cade2 665static int find_next_chunk(struct btrfs_root *root, u64 objectid, u64 *offset)
0b86a832
CM
666{
667 struct btrfs_path *path;
668 int ret;
669 struct btrfs_key key;
e17cade2 670 struct btrfs_chunk *chunk;
0b86a832
CM
671 struct btrfs_key found_key;
672
673 path = btrfs_alloc_path();
674 BUG_ON(!path);
675
e17cade2 676 key.objectid = objectid;
0b86a832
CM
677 key.offset = (u64)-1;
678 key.type = BTRFS_CHUNK_ITEM_KEY;
679
680 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
681 if (ret < 0)
682 goto error;
683
684 BUG_ON(ret == 0);
685
686 ret = btrfs_previous_item(root, path, 0, BTRFS_CHUNK_ITEM_KEY);
687 if (ret) {
e17cade2 688 *offset = 0;
0b86a832
CM
689 } else {
690 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
691 path->slots[0]);
e17cade2
CM
692 if (found_key.objectid != objectid)
693 *offset = 0;
694 else {
695 chunk = btrfs_item_ptr(path->nodes[0], path->slots[0],
696 struct btrfs_chunk);
697 *offset = found_key.offset +
698 btrfs_chunk_length(path->nodes[0], chunk);
699 }
0b86a832
CM
700 }
701 ret = 0;
702error:
703 btrfs_free_path(path);
704 return ret;
705}
706
0b86a832
CM
707static int find_next_devid(struct btrfs_root *root, struct btrfs_path *path,
708 u64 *objectid)
709{
710 int ret;
711 struct btrfs_key key;
712 struct btrfs_key found_key;
713
714 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
715 key.type = BTRFS_DEV_ITEM_KEY;
716 key.offset = (u64)-1;
717
718 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
719 if (ret < 0)
720 goto error;
721
722 BUG_ON(ret == 0);
723
724 ret = btrfs_previous_item(root, path, BTRFS_DEV_ITEMS_OBJECTID,
725 BTRFS_DEV_ITEM_KEY);
726 if (ret) {
727 *objectid = 1;
728 } else {
729 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
730 path->slots[0]);
731 *objectid = found_key.offset + 1;
732 }
733 ret = 0;
734error:
735 btrfs_release_path(root, path);
736 return ret;
737}
738
739/*
740 * the device information is stored in the chunk root
741 * the btrfs_device struct should be fully filled in
742 */
743int btrfs_add_device(struct btrfs_trans_handle *trans,
744 struct btrfs_root *root,
745 struct btrfs_device *device)
746{
747 int ret;
748 struct btrfs_path *path;
749 struct btrfs_dev_item *dev_item;
750 struct extent_buffer *leaf;
751 struct btrfs_key key;
752 unsigned long ptr;
006a58a2 753 u64 free_devid = 0;
0b86a832
CM
754
755 root = root->fs_info->chunk_root;
756
757 path = btrfs_alloc_path();
758 if (!path)
759 return -ENOMEM;
760
761 ret = find_next_devid(root, path, &free_devid);
762 if (ret)
763 goto out;
764
765 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
766 key.type = BTRFS_DEV_ITEM_KEY;
767 key.offset = free_devid;
768
769 ret = btrfs_insert_empty_item(trans, root, path, &key,
0d81ba5d 770 sizeof(*dev_item));
0b86a832
CM
771 if (ret)
772 goto out;
773
774 leaf = path->nodes[0];
775 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
776
8a4b83cc 777 device->devid = free_devid;
0b86a832
CM
778 btrfs_set_device_id(leaf, dev_item, device->devid);
779 btrfs_set_device_type(leaf, dev_item, device->type);
780 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
781 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
782 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
0b86a832
CM
783 btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
784 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
e17cade2
CM
785 btrfs_set_device_group(leaf, dev_item, 0);
786 btrfs_set_device_seek_speed(leaf, dev_item, 0);
787 btrfs_set_device_bandwidth(leaf, dev_item, 0);
0b86a832 788
0b86a832 789 ptr = (unsigned long)btrfs_device_uuid(dev_item);
e17cade2 790 write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
0b86a832
CM
791 btrfs_mark_buffer_dirty(leaf);
792 ret = 0;
793
794out:
795 btrfs_free_path(path);
796 return ret;
797}
8f18cf13 798
a061fc8d
CM
799static int btrfs_rm_dev_item(struct btrfs_root *root,
800 struct btrfs_device *device)
801{
802 int ret;
803 struct btrfs_path *path;
804 struct block_device *bdev = device->bdev;
805 struct btrfs_device *next_dev;
806 struct btrfs_key key;
807 u64 total_bytes;
808 struct btrfs_fs_devices *fs_devices;
809 struct btrfs_trans_handle *trans;
810
811 root = root->fs_info->chunk_root;
812
813 path = btrfs_alloc_path();
814 if (!path)
815 return -ENOMEM;
816
817 trans = btrfs_start_transaction(root, 1);
818 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
819 key.type = BTRFS_DEV_ITEM_KEY;
820 key.offset = device->devid;
821
822 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
823 if (ret < 0)
824 goto out;
825
826 if (ret > 0) {
827 ret = -ENOENT;
828 goto out;
829 }
830
831 ret = btrfs_del_item(trans, root, path);
832 if (ret)
833 goto out;
834
835 /*
836 * at this point, the device is zero sized. We want to
837 * remove it from the devices list and zero out the old super
838 */
839 list_del_init(&device->dev_list);
840 list_del_init(&device->dev_alloc_list);
841 fs_devices = root->fs_info->fs_devices;
842
843 next_dev = list_entry(fs_devices->devices.next, struct btrfs_device,
844 dev_list);
a061fc8d
CM
845 if (bdev == root->fs_info->sb->s_bdev)
846 root->fs_info->sb->s_bdev = next_dev->bdev;
847 if (bdev == fs_devices->latest_bdev)
848 fs_devices->latest_bdev = next_dev->bdev;
849
a061fc8d
CM
850 total_bytes = btrfs_super_num_devices(&root->fs_info->super_copy);
851 btrfs_set_super_num_devices(&root->fs_info->super_copy,
852 total_bytes - 1);
853out:
854 btrfs_free_path(path);
855 btrfs_commit_transaction(trans, root);
856 return ret;
857}
858
859int btrfs_rm_device(struct btrfs_root *root, char *device_path)
860{
861 struct btrfs_device *device;
862 struct block_device *bdev;
dfe25020 863 struct buffer_head *bh = NULL;
a061fc8d
CM
864 struct btrfs_super_block *disk_super;
865 u64 all_avail;
866 u64 devid;
867 int ret = 0;
868
869 mutex_lock(&root->fs_info->fs_mutex);
870 mutex_lock(&uuid_mutex);
871
872 all_avail = root->fs_info->avail_data_alloc_bits |
873 root->fs_info->avail_system_alloc_bits |
874 root->fs_info->avail_metadata_alloc_bits;
875
876 if ((all_avail & BTRFS_BLOCK_GROUP_RAID10) &&
dfe25020 877 btrfs_super_num_devices(&root->fs_info->super_copy) <= 4) {
a061fc8d
CM
878 printk("btrfs: unable to go below four devices on raid10\n");
879 ret = -EINVAL;
880 goto out;
881 }
882
883 if ((all_avail & BTRFS_BLOCK_GROUP_RAID1) &&
dfe25020 884 btrfs_super_num_devices(&root->fs_info->super_copy) <= 2) {
a061fc8d
CM
885 printk("btrfs: unable to go below two devices on raid1\n");
886 ret = -EINVAL;
887 goto out;
888 }
889
dfe25020
CM
890 if (strcmp(device_path, "missing") == 0) {
891 struct list_head *cur;
892 struct list_head *devices;
893 struct btrfs_device *tmp;
a061fc8d 894
dfe25020
CM
895 device = NULL;
896 devices = &root->fs_info->fs_devices->devices;
897 list_for_each(cur, devices) {
898 tmp = list_entry(cur, struct btrfs_device, dev_list);
899 if (tmp->in_fs_metadata && !tmp->bdev) {
900 device = tmp;
901 break;
902 }
903 }
904 bdev = NULL;
905 bh = NULL;
906 disk_super = NULL;
907 if (!device) {
908 printk("btrfs: no missing devices found to remove\n");
909 goto out;
910 }
911
912 } else {
913 bdev = open_bdev_excl(device_path, 0,
914 root->fs_info->bdev_holder);
915 if (IS_ERR(bdev)) {
916 ret = PTR_ERR(bdev);
917 goto out;
918 }
a061fc8d 919
dfe25020
CM
920 bh = __bread(bdev, BTRFS_SUPER_INFO_OFFSET / 4096, 4096);
921 if (!bh) {
922 ret = -EIO;
923 goto error_close;
924 }
925 disk_super = (struct btrfs_super_block *)bh->b_data;
926 if (strncmp((char *)(&disk_super->magic), BTRFS_MAGIC,
927 sizeof(disk_super->magic))) {
928 ret = -ENOENT;
929 goto error_brelse;
930 }
931 if (memcmp(disk_super->fsid, root->fs_info->fsid,
932 BTRFS_FSID_SIZE)) {
933 ret = -ENOENT;
934 goto error_brelse;
935 }
936 devid = le64_to_cpu(disk_super->dev_item.devid);
937 device = btrfs_find_device(root, devid, NULL);
938 if (!device) {
939 ret = -ENOENT;
940 goto error_brelse;
941 }
942
943 }
a061fc8d 944 root->fs_info->fs_devices->num_devices--;
0ef3e66b 945 root->fs_info->fs_devices->open_devices--;
a061fc8d
CM
946
947 ret = btrfs_shrink_device(device, 0);
948 if (ret)
949 goto error_brelse;
950
951
952 ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device);
953 if (ret)
954 goto error_brelse;
955
dfe25020
CM
956 if (bh) {
957 /* make sure this device isn't detected as part of
958 * the FS anymore
959 */
960 memset(&disk_super->magic, 0, sizeof(disk_super->magic));
961 set_buffer_dirty(bh);
962 sync_dirty_buffer(bh);
a061fc8d 963
dfe25020
CM
964 brelse(bh);
965 }
a061fc8d 966
dfe25020
CM
967 if (device->bdev) {
968 /* one close for the device struct or super_block */
969 close_bdev_excl(device->bdev);
970 }
971 if (bdev) {
972 /* one close for us */
973 close_bdev_excl(bdev);
974 }
a061fc8d
CM
975 kfree(device->name);
976 kfree(device);
977 ret = 0;
978 goto out;
979
980error_brelse:
981 brelse(bh);
982error_close:
dfe25020
CM
983 if (bdev)
984 close_bdev_excl(bdev);
a061fc8d
CM
985out:
986 mutex_unlock(&uuid_mutex);
987 mutex_unlock(&root->fs_info->fs_mutex);
988 return ret;
989}
990
788f20eb
CM
991int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
992{
993 struct btrfs_trans_handle *trans;
994 struct btrfs_device *device;
995 struct block_device *bdev;
996 struct list_head *cur;
997 struct list_head *devices;
998 u64 total_bytes;
999 int ret = 0;
1000
1001
1002 bdev = open_bdev_excl(device_path, 0, root->fs_info->bdev_holder);
1003 if (!bdev) {
1004 return -EIO;
1005 }
1006 mutex_lock(&root->fs_info->fs_mutex);
1007 trans = btrfs_start_transaction(root, 1);
1008 devices = &root->fs_info->fs_devices->devices;
1009 list_for_each(cur, devices) {
1010 device = list_entry(cur, struct btrfs_device, dev_list);
1011 if (device->bdev == bdev) {
1012 ret = -EEXIST;
1013 goto out;
1014 }
1015 }
1016
1017 device = kzalloc(sizeof(*device), GFP_NOFS);
1018 if (!device) {
1019 /* we can safely leave the fs_devices entry around */
1020 ret = -ENOMEM;
1021 goto out_close_bdev;
1022 }
1023
1024 device->barriers = 1;
8b712842 1025 device->work.func = pending_bios_fn;
788f20eb
CM
1026 generate_random_uuid(device->uuid);
1027 spin_lock_init(&device->io_lock);
1028 device->name = kstrdup(device_path, GFP_NOFS);
1029 if (!device->name) {
1030 kfree(device);
1031 goto out_close_bdev;
1032 }
1033 device->io_width = root->sectorsize;
1034 device->io_align = root->sectorsize;
1035 device->sector_size = root->sectorsize;
1036 device->total_bytes = i_size_read(bdev->bd_inode);
1037 device->dev_root = root->fs_info->dev_root;
1038 device->bdev = bdev;
dfe25020 1039 device->in_fs_metadata = 1;
788f20eb
CM
1040
1041 ret = btrfs_add_device(trans, root, device);
1042 if (ret)
1043 goto out_close_bdev;
1044
1045 total_bytes = btrfs_super_total_bytes(&root->fs_info->super_copy);
1046 btrfs_set_super_total_bytes(&root->fs_info->super_copy,
1047 total_bytes + device->total_bytes);
1048
1049 total_bytes = btrfs_super_num_devices(&root->fs_info->super_copy);
1050 btrfs_set_super_num_devices(&root->fs_info->super_copy,
1051 total_bytes + 1);
1052
1053 list_add(&device->dev_list, &root->fs_info->fs_devices->devices);
1054 list_add(&device->dev_alloc_list,
1055 &root->fs_info->fs_devices->alloc_list);
1056 root->fs_info->fs_devices->num_devices++;
a0af469b 1057 root->fs_info->fs_devices->open_devices++;
788f20eb
CM
1058out:
1059 btrfs_end_transaction(trans, root);
1060 mutex_unlock(&root->fs_info->fs_mutex);
1061 return ret;
1062
1063out_close_bdev:
1064 close_bdev_excl(bdev);
1065 goto out;
1066}
1067
0b86a832
CM
1068int btrfs_update_device(struct btrfs_trans_handle *trans,
1069 struct btrfs_device *device)
1070{
1071 int ret;
1072 struct btrfs_path *path;
1073 struct btrfs_root *root;
1074 struct btrfs_dev_item *dev_item;
1075 struct extent_buffer *leaf;
1076 struct btrfs_key key;
1077
1078 root = device->dev_root->fs_info->chunk_root;
1079
1080 path = btrfs_alloc_path();
1081 if (!path)
1082 return -ENOMEM;
1083
1084 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1085 key.type = BTRFS_DEV_ITEM_KEY;
1086 key.offset = device->devid;
1087
1088 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1089 if (ret < 0)
1090 goto out;
1091
1092 if (ret > 0) {
1093 ret = -ENOENT;
1094 goto out;
1095 }
1096
1097 leaf = path->nodes[0];
1098 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1099
1100 btrfs_set_device_id(leaf, dev_item, device->devid);
1101 btrfs_set_device_type(leaf, dev_item, device->type);
1102 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1103 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1104 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
0b86a832
CM
1105 btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
1106 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
1107 btrfs_mark_buffer_dirty(leaf);
1108
1109out:
1110 btrfs_free_path(path);
1111 return ret;
1112}
1113
8f18cf13
CM
1114int btrfs_grow_device(struct btrfs_trans_handle *trans,
1115 struct btrfs_device *device, u64 new_size)
1116{
1117 struct btrfs_super_block *super_copy =
1118 &device->dev_root->fs_info->super_copy;
1119 u64 old_total = btrfs_super_total_bytes(super_copy);
1120 u64 diff = new_size - device->total_bytes;
1121
1122 btrfs_set_super_total_bytes(super_copy, old_total + diff);
1123 return btrfs_update_device(trans, device);
1124}
1125
1126static int btrfs_free_chunk(struct btrfs_trans_handle *trans,
1127 struct btrfs_root *root,
1128 u64 chunk_tree, u64 chunk_objectid,
1129 u64 chunk_offset)
1130{
1131 int ret;
1132 struct btrfs_path *path;
1133 struct btrfs_key key;
1134
1135 root = root->fs_info->chunk_root;
1136 path = btrfs_alloc_path();
1137 if (!path)
1138 return -ENOMEM;
1139
1140 key.objectid = chunk_objectid;
1141 key.offset = chunk_offset;
1142 key.type = BTRFS_CHUNK_ITEM_KEY;
1143
1144 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1145 BUG_ON(ret);
1146
1147 ret = btrfs_del_item(trans, root, path);
1148 BUG_ON(ret);
1149
1150 btrfs_free_path(path);
1151 return 0;
1152}
1153
1154int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64
1155 chunk_offset)
1156{
1157 struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
1158 struct btrfs_disk_key *disk_key;
1159 struct btrfs_chunk *chunk;
1160 u8 *ptr;
1161 int ret = 0;
1162 u32 num_stripes;
1163 u32 array_size;
1164 u32 len = 0;
1165 u32 cur;
1166 struct btrfs_key key;
1167
1168 array_size = btrfs_super_sys_array_size(super_copy);
1169
1170 ptr = super_copy->sys_chunk_array;
1171 cur = 0;
1172
1173 while (cur < array_size) {
1174 disk_key = (struct btrfs_disk_key *)ptr;
1175 btrfs_disk_key_to_cpu(&key, disk_key);
1176
1177 len = sizeof(*disk_key);
1178
1179 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
1180 chunk = (struct btrfs_chunk *)(ptr + len);
1181 num_stripes = btrfs_stack_chunk_num_stripes(chunk);
1182 len += btrfs_chunk_item_size(num_stripes);
1183 } else {
1184 ret = -EIO;
1185 break;
1186 }
1187 if (key.objectid == chunk_objectid &&
1188 key.offset == chunk_offset) {
1189 memmove(ptr, ptr + len, array_size - (cur + len));
1190 array_size -= len;
1191 btrfs_set_super_sys_array_size(super_copy, array_size);
1192 } else {
1193 ptr += len;
1194 cur += len;
1195 }
1196 }
1197 return ret;
1198}
1199
1200
1201int btrfs_relocate_chunk(struct btrfs_root *root,
1202 u64 chunk_tree, u64 chunk_objectid,
1203 u64 chunk_offset)
1204{
1205 struct extent_map_tree *em_tree;
1206 struct btrfs_root *extent_root;
1207 struct btrfs_trans_handle *trans;
1208 struct extent_map *em;
1209 struct map_lookup *map;
1210 int ret;
1211 int i;
1212
323da79c
CM
1213 printk("btrfs relocating chunk %llu\n",
1214 (unsigned long long)chunk_offset);
8f18cf13
CM
1215 root = root->fs_info->chunk_root;
1216 extent_root = root->fs_info->extent_root;
1217 em_tree = &root->fs_info->mapping_tree.map_tree;
1218
1219 /* step one, relocate all the extents inside this chunk */
1220 ret = btrfs_shrink_extent_tree(extent_root, chunk_offset);
1221 BUG_ON(ret);
1222
1223 trans = btrfs_start_transaction(root, 1);
1224 BUG_ON(!trans);
1225
1226 /*
1227 * step two, delete the device extents and the
1228 * chunk tree entries
1229 */
1230 spin_lock(&em_tree->lock);
1231 em = lookup_extent_mapping(em_tree, chunk_offset, 1);
1232 spin_unlock(&em_tree->lock);
1233
a061fc8d
CM
1234 BUG_ON(em->start > chunk_offset ||
1235 em->start + em->len < chunk_offset);
8f18cf13
CM
1236 map = (struct map_lookup *)em->bdev;
1237
1238 for (i = 0; i < map->num_stripes; i++) {
1239 ret = btrfs_free_dev_extent(trans, map->stripes[i].dev,
1240 map->stripes[i].physical);
1241 BUG_ON(ret);
a061fc8d 1242
dfe25020
CM
1243 if (map->stripes[i].dev) {
1244 ret = btrfs_update_device(trans, map->stripes[i].dev);
1245 BUG_ON(ret);
1246 }
8f18cf13
CM
1247 }
1248 ret = btrfs_free_chunk(trans, root, chunk_tree, chunk_objectid,
1249 chunk_offset);
1250
1251 BUG_ON(ret);
1252
1253 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
1254 ret = btrfs_del_sys_chunk(root, chunk_objectid, chunk_offset);
1255 BUG_ON(ret);
8f18cf13
CM
1256 }
1257
8f18cf13
CM
1258 spin_lock(&em_tree->lock);
1259 remove_extent_mapping(em_tree, em);
1260 kfree(map);
1261 em->bdev = NULL;
1262
1263 /* once for the tree */
1264 free_extent_map(em);
1265 spin_unlock(&em_tree->lock);
1266
8f18cf13
CM
1267 /* once for us */
1268 free_extent_map(em);
1269
1270 btrfs_end_transaction(trans, root);
1271 return 0;
1272}
1273
ec44a35c
CM
1274static u64 div_factor(u64 num, int factor)
1275{
1276 if (factor == 10)
1277 return num;
1278 num *= factor;
1279 do_div(num, 10);
1280 return num;
1281}
1282
1283
1284int btrfs_balance(struct btrfs_root *dev_root)
1285{
1286 int ret;
1287 struct list_head *cur;
1288 struct list_head *devices = &dev_root->fs_info->fs_devices->devices;
1289 struct btrfs_device *device;
1290 u64 old_size;
1291 u64 size_to_free;
1292 struct btrfs_path *path;
1293 struct btrfs_key key;
1294 struct btrfs_chunk *chunk;
1295 struct btrfs_root *chunk_root = dev_root->fs_info->chunk_root;
1296 struct btrfs_trans_handle *trans;
1297 struct btrfs_key found_key;
1298
1299
1300 dev_root = dev_root->fs_info->dev_root;
1301
1302 mutex_lock(&dev_root->fs_info->fs_mutex);
1303 /* step one make some room on all the devices */
1304 list_for_each(cur, devices) {
1305 device = list_entry(cur, struct btrfs_device, dev_list);
1306 old_size = device->total_bytes;
1307 size_to_free = div_factor(old_size, 1);
1308 size_to_free = min(size_to_free, (u64)1 * 1024 * 1024);
1309 if (device->total_bytes - device->bytes_used > size_to_free)
1310 continue;
1311
1312 ret = btrfs_shrink_device(device, old_size - size_to_free);
1313 BUG_ON(ret);
1314
1315 trans = btrfs_start_transaction(dev_root, 1);
1316 BUG_ON(!trans);
1317
1318 ret = btrfs_grow_device(trans, device, old_size);
1319 BUG_ON(ret);
1320
1321 btrfs_end_transaction(trans, dev_root);
1322 }
1323
1324 /* step two, relocate all the chunks */
1325 path = btrfs_alloc_path();
1326 BUG_ON(!path);
1327
1328 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
1329 key.offset = (u64)-1;
1330 key.type = BTRFS_CHUNK_ITEM_KEY;
1331
1332 while(1) {
1333 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
1334 if (ret < 0)
1335 goto error;
1336
1337 /*
1338 * this shouldn't happen, it means the last relocate
1339 * failed
1340 */
1341 if (ret == 0)
1342 break;
1343
1344 ret = btrfs_previous_item(chunk_root, path, 0,
1345 BTRFS_CHUNK_ITEM_KEY);
1346 if (ret) {
1347 break;
1348 }
1349 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1350 path->slots[0]);
1351 if (found_key.objectid != key.objectid)
1352 break;
1353 chunk = btrfs_item_ptr(path->nodes[0],
1354 path->slots[0],
1355 struct btrfs_chunk);
1356 key.offset = found_key.offset;
1357 /* chunk zero is special */
1358 if (key.offset == 0)
1359 break;
1360
1361 ret = btrfs_relocate_chunk(chunk_root,
1362 chunk_root->root_key.objectid,
1363 found_key.objectid,
1364 found_key.offset);
1365 BUG_ON(ret);
1366 btrfs_release_path(chunk_root, path);
1367 }
1368 ret = 0;
1369error:
1370 btrfs_free_path(path);
1371 mutex_unlock(&dev_root->fs_info->fs_mutex);
1372 return ret;
1373}
1374
8f18cf13
CM
1375/*
1376 * shrinking a device means finding all of the device extents past
1377 * the new size, and then following the back refs to the chunks.
1378 * The chunk relocation code actually frees the device extent
1379 */
1380int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
1381{
1382 struct btrfs_trans_handle *trans;
1383 struct btrfs_root *root = device->dev_root;
1384 struct btrfs_dev_extent *dev_extent = NULL;
1385 struct btrfs_path *path;
1386 u64 length;
1387 u64 chunk_tree;
1388 u64 chunk_objectid;
1389 u64 chunk_offset;
1390 int ret;
1391 int slot;
1392 struct extent_buffer *l;
1393 struct btrfs_key key;
1394 struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
1395 u64 old_total = btrfs_super_total_bytes(super_copy);
1396 u64 diff = device->total_bytes - new_size;
1397
1398
1399 path = btrfs_alloc_path();
1400 if (!path)
1401 return -ENOMEM;
1402
1403 trans = btrfs_start_transaction(root, 1);
1404 if (!trans) {
1405 ret = -ENOMEM;
1406 goto done;
1407 }
1408
1409 path->reada = 2;
1410
1411 device->total_bytes = new_size;
1412 ret = btrfs_update_device(trans, device);
1413 if (ret) {
1414 btrfs_end_transaction(trans, root);
1415 goto done;
1416 }
1417 WARN_ON(diff > old_total);
1418 btrfs_set_super_total_bytes(super_copy, old_total - diff);
1419 btrfs_end_transaction(trans, root);
1420
1421 key.objectid = device->devid;
1422 key.offset = (u64)-1;
1423 key.type = BTRFS_DEV_EXTENT_KEY;
1424
1425 while (1) {
1426 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1427 if (ret < 0)
1428 goto done;
1429
1430 ret = btrfs_previous_item(root, path, 0, key.type);
1431 if (ret < 0)
1432 goto done;
1433 if (ret) {
1434 ret = 0;
1435 goto done;
1436 }
1437
1438 l = path->nodes[0];
1439 slot = path->slots[0];
1440 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
1441
1442 if (key.objectid != device->devid)
1443 goto done;
1444
1445 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
1446 length = btrfs_dev_extent_length(l, dev_extent);
1447
1448 if (key.offset + length <= new_size)
1449 goto done;
1450
1451 chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
1452 chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
1453 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
1454 btrfs_release_path(root, path);
1455
1456 ret = btrfs_relocate_chunk(root, chunk_tree, chunk_objectid,
1457 chunk_offset);
1458 if (ret)
1459 goto done;
1460 }
1461
1462done:
1463 btrfs_free_path(path);
1464 return ret;
1465}
1466
0b86a832
CM
1467int btrfs_add_system_chunk(struct btrfs_trans_handle *trans,
1468 struct btrfs_root *root,
1469 struct btrfs_key *key,
1470 struct btrfs_chunk *chunk, int item_size)
1471{
1472 struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
1473 struct btrfs_disk_key disk_key;
1474 u32 array_size;
1475 u8 *ptr;
1476
1477 array_size = btrfs_super_sys_array_size(super_copy);
1478 if (array_size + item_size > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE)
1479 return -EFBIG;
1480
1481 ptr = super_copy->sys_chunk_array + array_size;
1482 btrfs_cpu_key_to_disk(&disk_key, key);
1483 memcpy(ptr, &disk_key, sizeof(disk_key));
1484 ptr += sizeof(disk_key);
1485 memcpy(ptr, chunk, item_size);
1486 item_size += sizeof(disk_key);
1487 btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
1488 return 0;
1489}
1490
9b3f68b9
CM
1491static u64 chunk_bytes_by_type(u64 type, u64 calc_size, int num_stripes,
1492 int sub_stripes)
1493{
1494 if (type & (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_DUP))
1495 return calc_size;
1496 else if (type & BTRFS_BLOCK_GROUP_RAID10)
1497 return calc_size * (num_stripes / sub_stripes);
1498 else
1499 return calc_size * num_stripes;
1500}
1501
1502
0b86a832
CM
1503int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
1504 struct btrfs_root *extent_root, u64 *start,
6324fbf3 1505 u64 *num_bytes, u64 type)
0b86a832
CM
1506{
1507 u64 dev_offset;
593060d7 1508 struct btrfs_fs_info *info = extent_root->fs_info;
0b86a832 1509 struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
8f18cf13 1510 struct btrfs_path *path;
0b86a832
CM
1511 struct btrfs_stripe *stripes;
1512 struct btrfs_device *device = NULL;
1513 struct btrfs_chunk *chunk;
6324fbf3 1514 struct list_head private_devs;
b3075717 1515 struct list_head *dev_list;
6324fbf3 1516 struct list_head *cur;
0b86a832
CM
1517 struct extent_map_tree *em_tree;
1518 struct map_lookup *map;
1519 struct extent_map *em;
a40a90a0 1520 int min_stripe_size = 1 * 1024 * 1024;
0b86a832
CM
1521 u64 physical;
1522 u64 calc_size = 1024 * 1024 * 1024;
9b3f68b9
CM
1523 u64 max_chunk_size = calc_size;
1524 u64 min_free;
6324fbf3
CM
1525 u64 avail;
1526 u64 max_avail = 0;
9b3f68b9 1527 u64 percent_max;
6324fbf3 1528 int num_stripes = 1;
a40a90a0 1529 int min_stripes = 1;
321aecc6 1530 int sub_stripes = 0;
6324fbf3 1531 int looped = 0;
0b86a832 1532 int ret;
6324fbf3 1533 int index;
593060d7 1534 int stripe_len = 64 * 1024;
0b86a832
CM
1535 struct btrfs_key key;
1536
ec44a35c
CM
1537 if ((type & BTRFS_BLOCK_GROUP_RAID1) &&
1538 (type & BTRFS_BLOCK_GROUP_DUP)) {
1539 WARN_ON(1);
1540 type &= ~BTRFS_BLOCK_GROUP_DUP;
1541 }
b3075717 1542 dev_list = &extent_root->fs_info->fs_devices->alloc_list;
6324fbf3
CM
1543 if (list_empty(dev_list))
1544 return -ENOSPC;
593060d7 1545
a40a90a0 1546 if (type & (BTRFS_BLOCK_GROUP_RAID0)) {
0ef3e66b 1547 num_stripes = extent_root->fs_info->fs_devices->open_devices;
a40a90a0
CM
1548 min_stripes = 2;
1549 }
1550 if (type & (BTRFS_BLOCK_GROUP_DUP)) {
611f0e00 1551 num_stripes = 2;
a40a90a0
CM
1552 min_stripes = 2;
1553 }
8790d502
CM
1554 if (type & (BTRFS_BLOCK_GROUP_RAID1)) {
1555 num_stripes = min_t(u64, 2,
0ef3e66b 1556 extent_root->fs_info->fs_devices->open_devices);
9b3f68b9
CM
1557 if (num_stripes < 2)
1558 return -ENOSPC;
a40a90a0 1559 min_stripes = 2;
8790d502 1560 }
321aecc6 1561 if (type & (BTRFS_BLOCK_GROUP_RAID10)) {
0ef3e66b 1562 num_stripes = extent_root->fs_info->fs_devices->open_devices;
321aecc6
CM
1563 if (num_stripes < 4)
1564 return -ENOSPC;
1565 num_stripes &= ~(u32)1;
1566 sub_stripes = 2;
a40a90a0 1567 min_stripes = 4;
321aecc6 1568 }
9b3f68b9
CM
1569
1570 if (type & BTRFS_BLOCK_GROUP_DATA) {
1571 max_chunk_size = 10 * calc_size;
a40a90a0 1572 min_stripe_size = 64 * 1024 * 1024;
9b3f68b9
CM
1573 } else if (type & BTRFS_BLOCK_GROUP_METADATA) {
1574 max_chunk_size = 4 * calc_size;
a40a90a0
CM
1575 min_stripe_size = 32 * 1024 * 1024;
1576 } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
1577 calc_size = 8 * 1024 * 1024;
1578 max_chunk_size = calc_size * 2;
1579 min_stripe_size = 1 * 1024 * 1024;
9b3f68b9
CM
1580 }
1581
8f18cf13
CM
1582 path = btrfs_alloc_path();
1583 if (!path)
1584 return -ENOMEM;
1585
9b3f68b9
CM
1586 /* we don't want a chunk larger than 10% of the FS */
1587 percent_max = div_factor(btrfs_super_total_bytes(&info->super_copy), 1);
1588 max_chunk_size = min(percent_max, max_chunk_size);
1589
a40a90a0 1590again:
9b3f68b9
CM
1591 if (calc_size * num_stripes > max_chunk_size) {
1592 calc_size = max_chunk_size;
1593 do_div(calc_size, num_stripes);
1594 do_div(calc_size, stripe_len);
1595 calc_size *= stripe_len;
1596 }
1597 /* we don't want tiny stripes */
a40a90a0 1598 calc_size = max_t(u64, min_stripe_size, calc_size);
9b3f68b9 1599
9b3f68b9
CM
1600 do_div(calc_size, stripe_len);
1601 calc_size *= stripe_len;
1602
6324fbf3
CM
1603 INIT_LIST_HEAD(&private_devs);
1604 cur = dev_list->next;
1605 index = 0;
611f0e00
CM
1606
1607 if (type & BTRFS_BLOCK_GROUP_DUP)
1608 min_free = calc_size * 2;
9b3f68b9
CM
1609 else
1610 min_free = calc_size;
611f0e00 1611
ad5bd91e
CM
1612 /* we add 1MB because we never use the first 1MB of the device */
1613 min_free += 1024 * 1024;
1614
6324fbf3
CM
1615 /* build a private list of devices we will allocate from */
1616 while(index < num_stripes) {
b3075717 1617 device = list_entry(cur, struct btrfs_device, dev_alloc_list);
611f0e00 1618
dfe25020
CM
1619 if (device->total_bytes > device->bytes_used)
1620 avail = device->total_bytes - device->bytes_used;
1621 else
1622 avail = 0;
6324fbf3 1623 cur = cur->next;
8f18cf13 1624
dfe25020 1625 if (device->in_fs_metadata && avail >= min_free) {
8f18cf13
CM
1626 u64 ignored_start = 0;
1627 ret = find_free_dev_extent(trans, device, path,
1628 min_free,
1629 &ignored_start);
1630 if (ret == 0) {
1631 list_move_tail(&device->dev_alloc_list,
1632 &private_devs);
611f0e00 1633 index++;
8f18cf13
CM
1634 if (type & BTRFS_BLOCK_GROUP_DUP)
1635 index++;
1636 }
dfe25020 1637 } else if (device->in_fs_metadata && avail > max_avail)
a40a90a0 1638 max_avail = avail;
6324fbf3
CM
1639 if (cur == dev_list)
1640 break;
1641 }
1642 if (index < num_stripes) {
1643 list_splice(&private_devs, dev_list);
a40a90a0
CM
1644 if (index >= min_stripes) {
1645 num_stripes = index;
1646 if (type & (BTRFS_BLOCK_GROUP_RAID10)) {
1647 num_stripes /= sub_stripes;
1648 num_stripes *= sub_stripes;
1649 }
1650 looped = 1;
1651 goto again;
1652 }
6324fbf3
CM
1653 if (!looped && max_avail > 0) {
1654 looped = 1;
1655 calc_size = max_avail;
1656 goto again;
1657 }
8f18cf13 1658 btrfs_free_path(path);
6324fbf3
CM
1659 return -ENOSPC;
1660 }
e17cade2
CM
1661 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
1662 key.type = BTRFS_CHUNK_ITEM_KEY;
1663 ret = find_next_chunk(chunk_root, BTRFS_FIRST_CHUNK_TREE_OBJECTID,
1664 &key.offset);
8f18cf13
CM
1665 if (ret) {
1666 btrfs_free_path(path);
0b86a832 1667 return ret;
8f18cf13 1668 }
0b86a832 1669
0b86a832 1670 chunk = kmalloc(btrfs_chunk_item_size(num_stripes), GFP_NOFS);
8f18cf13
CM
1671 if (!chunk) {
1672 btrfs_free_path(path);
0b86a832 1673 return -ENOMEM;
8f18cf13 1674 }
0b86a832 1675
593060d7
CM
1676 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
1677 if (!map) {
1678 kfree(chunk);
8f18cf13 1679 btrfs_free_path(path);
593060d7
CM
1680 return -ENOMEM;
1681 }
8f18cf13
CM
1682 btrfs_free_path(path);
1683 path = NULL;
593060d7 1684
0b86a832 1685 stripes = &chunk->stripe;
9b3f68b9
CM
1686 *num_bytes = chunk_bytes_by_type(type, calc_size,
1687 num_stripes, sub_stripes);
0b86a832 1688
6324fbf3 1689 index = 0;
0b86a832 1690 while(index < num_stripes) {
e17cade2 1691 struct btrfs_stripe *stripe;
6324fbf3
CM
1692 BUG_ON(list_empty(&private_devs));
1693 cur = private_devs.next;
b3075717 1694 device = list_entry(cur, struct btrfs_device, dev_alloc_list);
611f0e00
CM
1695
1696 /* loop over this device again if we're doing a dup group */
1697 if (!(type & BTRFS_BLOCK_GROUP_DUP) ||
1698 (index == num_stripes - 1))
b3075717 1699 list_move_tail(&device->dev_alloc_list, dev_list);
0b86a832
CM
1700
1701 ret = btrfs_alloc_dev_extent(trans, device,
e17cade2
CM
1702 info->chunk_root->root_key.objectid,
1703 BTRFS_FIRST_CHUNK_TREE_OBJECTID, key.offset,
1704 calc_size, &dev_offset);
0b86a832 1705 BUG_ON(ret);
0b86a832
CM
1706 device->bytes_used += calc_size;
1707 ret = btrfs_update_device(trans, device);
1708 BUG_ON(ret);
1709
593060d7
CM
1710 map->stripes[index].dev = device;
1711 map->stripes[index].physical = dev_offset;
e17cade2
CM
1712 stripe = stripes + index;
1713 btrfs_set_stack_stripe_devid(stripe, device->devid);
1714 btrfs_set_stack_stripe_offset(stripe, dev_offset);
1715 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
0b86a832
CM
1716 physical = dev_offset;
1717 index++;
1718 }
6324fbf3 1719 BUG_ON(!list_empty(&private_devs));
0b86a832 1720
e17cade2
CM
1721 /* key was set above */
1722 btrfs_set_stack_chunk_length(chunk, *num_bytes);
0b86a832 1723 btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
593060d7 1724 btrfs_set_stack_chunk_stripe_len(chunk, stripe_len);
0b86a832
CM
1725 btrfs_set_stack_chunk_type(chunk, type);
1726 btrfs_set_stack_chunk_num_stripes(chunk, num_stripes);
593060d7
CM
1727 btrfs_set_stack_chunk_io_align(chunk, stripe_len);
1728 btrfs_set_stack_chunk_io_width(chunk, stripe_len);
0b86a832 1729 btrfs_set_stack_chunk_sector_size(chunk, extent_root->sectorsize);
321aecc6 1730 btrfs_set_stack_chunk_sub_stripes(chunk, sub_stripes);
593060d7
CM
1731 map->sector_size = extent_root->sectorsize;
1732 map->stripe_len = stripe_len;
1733 map->io_align = stripe_len;
1734 map->io_width = stripe_len;
1735 map->type = type;
1736 map->num_stripes = num_stripes;
321aecc6 1737 map->sub_stripes = sub_stripes;
0b86a832
CM
1738
1739 ret = btrfs_insert_item(trans, chunk_root, &key, chunk,
1740 btrfs_chunk_item_size(num_stripes));
1741 BUG_ON(ret);
e17cade2 1742 *start = key.offset;;
0b86a832
CM
1743
1744 em = alloc_extent_map(GFP_NOFS);
1745 if (!em)
1746 return -ENOMEM;
0b86a832 1747 em->bdev = (struct block_device *)map;
e17cade2
CM
1748 em->start = key.offset;
1749 em->len = *num_bytes;
0b86a832
CM
1750 em->block_start = 0;
1751
8f18cf13
CM
1752 if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
1753 ret = btrfs_add_system_chunk(trans, chunk_root, &key,
1754 chunk, btrfs_chunk_item_size(num_stripes));
1755 BUG_ON(ret);
1756 }
0b86a832
CM
1757 kfree(chunk);
1758
1759 em_tree = &extent_root->fs_info->mapping_tree.map_tree;
1760 spin_lock(&em_tree->lock);
1761 ret = add_extent_mapping(em_tree, em);
0b86a832 1762 spin_unlock(&em_tree->lock);
b248a415 1763 BUG_ON(ret);
0b86a832
CM
1764 free_extent_map(em);
1765 return ret;
1766}
1767
1768void btrfs_mapping_init(struct btrfs_mapping_tree *tree)
1769{
1770 extent_map_tree_init(&tree->map_tree, GFP_NOFS);
1771}
1772
1773void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
1774{
1775 struct extent_map *em;
1776
1777 while(1) {
1778 spin_lock(&tree->map_tree.lock);
1779 em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1);
1780 if (em)
1781 remove_extent_mapping(&tree->map_tree, em);
1782 spin_unlock(&tree->map_tree.lock);
1783 if (!em)
1784 break;
1785 kfree(em->bdev);
1786 /* once for us */
1787 free_extent_map(em);
1788 /* once for the tree */
1789 free_extent_map(em);
1790 }
1791}
1792
f188591e
CM
1793int btrfs_num_copies(struct btrfs_mapping_tree *map_tree, u64 logical, u64 len)
1794{
1795 struct extent_map *em;
1796 struct map_lookup *map;
1797 struct extent_map_tree *em_tree = &map_tree->map_tree;
1798 int ret;
1799
1800 spin_lock(&em_tree->lock);
1801 em = lookup_extent_mapping(em_tree, logical, len);
b248a415 1802 spin_unlock(&em_tree->lock);
f188591e
CM
1803 BUG_ON(!em);
1804
1805 BUG_ON(em->start > logical || em->start + em->len < logical);
1806 map = (struct map_lookup *)em->bdev;
1807 if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1))
1808 ret = map->num_stripes;
321aecc6
CM
1809 else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
1810 ret = map->sub_stripes;
f188591e
CM
1811 else
1812 ret = 1;
1813 free_extent_map(em);
f188591e
CM
1814 return ret;
1815}
1816
dfe25020
CM
1817static int find_live_mirror(struct map_lookup *map, int first, int num,
1818 int optimal)
1819{
1820 int i;
1821 if (map->stripes[optimal].dev->bdev)
1822 return optimal;
1823 for (i = first; i < first + num; i++) {
1824 if (map->stripes[i].dev->bdev)
1825 return i;
1826 }
1827 /* we couldn't find one that doesn't fail. Just return something
1828 * and the io error handling code will clean up eventually
1829 */
1830 return optimal;
1831}
1832
f2d8d74d
CM
1833static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
1834 u64 logical, u64 *length,
1835 struct btrfs_multi_bio **multi_ret,
1836 int mirror_num, struct page *unplug_page)
0b86a832
CM
1837{
1838 struct extent_map *em;
1839 struct map_lookup *map;
1840 struct extent_map_tree *em_tree = &map_tree->map_tree;
1841 u64 offset;
593060d7
CM
1842 u64 stripe_offset;
1843 u64 stripe_nr;
cea9e445 1844 int stripes_allocated = 8;
321aecc6 1845 int stripes_required = 1;
593060d7 1846 int stripe_index;
cea9e445 1847 int i;
f2d8d74d 1848 int num_stripes;
a236aed1 1849 int max_errors = 0;
cea9e445 1850 struct btrfs_multi_bio *multi = NULL;
0b86a832 1851
cea9e445
CM
1852 if (multi_ret && !(rw & (1 << BIO_RW))) {
1853 stripes_allocated = 1;
1854 }
1855again:
1856 if (multi_ret) {
1857 multi = kzalloc(btrfs_multi_bio_size(stripes_allocated),
1858 GFP_NOFS);
1859 if (!multi)
1860 return -ENOMEM;
a236aed1
CM
1861
1862 atomic_set(&multi->error, 0);
cea9e445 1863 }
0b86a832
CM
1864
1865 spin_lock(&em_tree->lock);
1866 em = lookup_extent_mapping(em_tree, logical, *length);
b248a415 1867 spin_unlock(&em_tree->lock);
f2d8d74d
CM
1868
1869 if (!em && unplug_page)
1870 return 0;
1871
3b951516 1872 if (!em) {
a061fc8d 1873 printk("unable to find logical %Lu len %Lu\n", logical, *length);
f2d8d74d 1874 BUG();
3b951516 1875 }
0b86a832
CM
1876
1877 BUG_ON(em->start > logical || em->start + em->len < logical);
1878 map = (struct map_lookup *)em->bdev;
1879 offset = logical - em->start;
593060d7 1880
f188591e
CM
1881 if (mirror_num > map->num_stripes)
1882 mirror_num = 0;
1883
cea9e445 1884 /* if our multi bio struct is too small, back off and try again */
321aecc6
CM
1885 if (rw & (1 << BIO_RW)) {
1886 if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
1887 BTRFS_BLOCK_GROUP_DUP)) {
1888 stripes_required = map->num_stripes;
a236aed1 1889 max_errors = 1;
321aecc6
CM
1890 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
1891 stripes_required = map->sub_stripes;
a236aed1 1892 max_errors = 1;
321aecc6
CM
1893 }
1894 }
1895 if (multi_ret && rw == WRITE &&
1896 stripes_allocated < stripes_required) {
cea9e445 1897 stripes_allocated = map->num_stripes;
cea9e445
CM
1898 free_extent_map(em);
1899 kfree(multi);
1900 goto again;
1901 }
593060d7
CM
1902 stripe_nr = offset;
1903 /*
1904 * stripe_nr counts the total number of stripes we have to stride
1905 * to get to this block
1906 */
1907 do_div(stripe_nr, map->stripe_len);
1908
1909 stripe_offset = stripe_nr * map->stripe_len;
1910 BUG_ON(offset < stripe_offset);
1911
1912 /* stripe_offset is the offset of this block in its stripe*/
1913 stripe_offset = offset - stripe_offset;
1914
cea9e445 1915 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1 |
321aecc6 1916 BTRFS_BLOCK_GROUP_RAID10 |
cea9e445
CM
1917 BTRFS_BLOCK_GROUP_DUP)) {
1918 /* we limit the length of each bio to what fits in a stripe */
1919 *length = min_t(u64, em->len - offset,
1920 map->stripe_len - stripe_offset);
1921 } else {
1922 *length = em->len - offset;
1923 }
f2d8d74d
CM
1924
1925 if (!multi_ret && !unplug_page)
cea9e445
CM
1926 goto out;
1927
f2d8d74d 1928 num_stripes = 1;
cea9e445 1929 stripe_index = 0;
8790d502 1930 if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
f2d8d74d
CM
1931 if (unplug_page || (rw & (1 << BIO_RW)))
1932 num_stripes = map->num_stripes;
2fff734f 1933 else if (mirror_num)
f188591e 1934 stripe_index = mirror_num - 1;
dfe25020
CM
1935 else {
1936 stripe_index = find_live_mirror(map, 0,
1937 map->num_stripes,
1938 current->pid % map->num_stripes);
1939 }
2fff734f 1940
611f0e00 1941 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
cea9e445 1942 if (rw & (1 << BIO_RW))
f2d8d74d 1943 num_stripes = map->num_stripes;
f188591e
CM
1944 else if (mirror_num)
1945 stripe_index = mirror_num - 1;
2fff734f 1946
321aecc6
CM
1947 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
1948 int factor = map->num_stripes / map->sub_stripes;
321aecc6
CM
1949
1950 stripe_index = do_div(stripe_nr, factor);
1951 stripe_index *= map->sub_stripes;
1952
f2d8d74d
CM
1953 if (unplug_page || (rw & (1 << BIO_RW)))
1954 num_stripes = map->sub_stripes;
321aecc6
CM
1955 else if (mirror_num)
1956 stripe_index += mirror_num - 1;
dfe25020
CM
1957 else {
1958 stripe_index = find_live_mirror(map, stripe_index,
1959 map->sub_stripes, stripe_index +
1960 current->pid % map->sub_stripes);
1961 }
8790d502
CM
1962 } else {
1963 /*
1964 * after this do_div call, stripe_nr is the number of stripes
1965 * on this device we have to walk to find the data, and
1966 * stripe_index is the number of our device in the stripe array
1967 */
1968 stripe_index = do_div(stripe_nr, map->num_stripes);
1969 }
593060d7 1970 BUG_ON(stripe_index >= map->num_stripes);
cea9e445 1971
f2d8d74d
CM
1972 for (i = 0; i < num_stripes; i++) {
1973 if (unplug_page) {
1974 struct btrfs_device *device;
1975 struct backing_dev_info *bdi;
1976
1977 device = map->stripes[stripe_index].dev;
dfe25020
CM
1978 if (device->bdev) {
1979 bdi = blk_get_backing_dev_info(device->bdev);
1980 if (bdi->unplug_io_fn) {
1981 bdi->unplug_io_fn(bdi, unplug_page);
1982 }
f2d8d74d
CM
1983 }
1984 } else {
1985 multi->stripes[i].physical =
1986 map->stripes[stripe_index].physical +
1987 stripe_offset + stripe_nr * map->stripe_len;
1988 multi->stripes[i].dev = map->stripes[stripe_index].dev;
1989 }
cea9e445 1990 stripe_index++;
593060d7 1991 }
f2d8d74d
CM
1992 if (multi_ret) {
1993 *multi_ret = multi;
1994 multi->num_stripes = num_stripes;
a236aed1 1995 multi->max_errors = max_errors;
f2d8d74d 1996 }
cea9e445 1997out:
0b86a832 1998 free_extent_map(em);
0b86a832
CM
1999 return 0;
2000}
2001
f2d8d74d
CM
2002int btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
2003 u64 logical, u64 *length,
2004 struct btrfs_multi_bio **multi_ret, int mirror_num)
2005{
2006 return __btrfs_map_block(map_tree, rw, logical, length, multi_ret,
2007 mirror_num, NULL);
2008}
2009
2010int btrfs_unplug_page(struct btrfs_mapping_tree *map_tree,
2011 u64 logical, struct page *page)
2012{
2013 u64 length = PAGE_CACHE_SIZE;
2014 return __btrfs_map_block(map_tree, READ, logical, &length,
2015 NULL, 0, page);
2016}
2017
2018
8790d502
CM
2019#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
2020static void end_bio_multi_stripe(struct bio *bio, int err)
2021#else
2022static int end_bio_multi_stripe(struct bio *bio,
2023 unsigned int bytes_done, int err)
2024#endif
2025{
cea9e445 2026 struct btrfs_multi_bio *multi = bio->bi_private;
8790d502
CM
2027
2028#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
2029 if (bio->bi_size)
2030 return 1;
2031#endif
2032 if (err)
a236aed1 2033 atomic_inc(&multi->error);
8790d502 2034
cea9e445 2035 if (atomic_dec_and_test(&multi->stripes_pending)) {
8790d502
CM
2036 bio->bi_private = multi->private;
2037 bio->bi_end_io = multi->end_io;
a236aed1
CM
2038 /* only send an error to the higher layers if it is
2039 * beyond the tolerance of the multi-bio
2040 */
1259ab75 2041 if (atomic_read(&multi->error) > multi->max_errors) {
a236aed1 2042 err = -EIO;
1259ab75
CM
2043 } else if (err) {
2044 /*
2045 * this bio is actually up to date, we didn't
2046 * go over the max number of errors
2047 */
2048 set_bit(BIO_UPTODATE, &bio->bi_flags);
a236aed1 2049 err = 0;
1259ab75 2050 }
8790d502
CM
2051 kfree(multi);
2052
73f61b2a
M
2053#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
2054 bio_endio(bio, bio->bi_size, err);
2055#else
8790d502 2056 bio_endio(bio, err);
73f61b2a 2057#endif
8790d502
CM
2058 } else {
2059 bio_put(bio);
2060 }
2061#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
2062 return 0;
2063#endif
2064}
2065
8b712842
CM
2066struct async_sched {
2067 struct bio *bio;
2068 int rw;
2069 struct btrfs_fs_info *info;
2070 struct btrfs_work work;
2071};
2072
2073/*
2074 * see run_scheduled_bios for a description of why bios are collected for
2075 * async submit.
2076 *
2077 * This will add one bio to the pending list for a device and make sure
2078 * the work struct is scheduled.
2079 */
2080int schedule_bio(struct btrfs_root *root, struct btrfs_device *device,
2081 int rw, struct bio *bio)
2082{
2083 int should_queue = 1;
2084
2085 /* don't bother with additional async steps for reads, right now */
2086 if (!(rw & (1 << BIO_RW))) {
2087 submit_bio(rw, bio);
2088 return 0;
2089 }
2090
2091 /*
2092 * nr_async_sumbits allows us to reliably return congestion to the
2093 * higher layers. Otherwise, the async bio makes it appear we have
2094 * made progress against dirty pages when we've really just put it
2095 * on a queue for later
2096 */
2097 atomic_inc(&root->fs_info->nr_async_submits);
2098 bio->bi_next = NULL;
2099 bio->bi_rw |= rw;
2100
2101 spin_lock(&device->io_lock);
2102
2103 if (device->pending_bio_tail)
2104 device->pending_bio_tail->bi_next = bio;
2105
2106 device->pending_bio_tail = bio;
2107 if (!device->pending_bios)
2108 device->pending_bios = bio;
2109 if (device->running_pending)
2110 should_queue = 0;
2111
2112 spin_unlock(&device->io_lock);
2113
2114 if (should_queue)
2115 btrfs_queue_worker(&root->fs_info->workers, &device->work);
2116 return 0;
2117}
2118
f188591e 2119int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
8b712842 2120 int mirror_num, int async_submit)
0b86a832
CM
2121{
2122 struct btrfs_mapping_tree *map_tree;
2123 struct btrfs_device *dev;
8790d502 2124 struct bio *first_bio = bio;
0b86a832 2125 u64 logical = bio->bi_sector << 9;
0b86a832
CM
2126 u64 length = 0;
2127 u64 map_length;
cea9e445 2128 struct btrfs_multi_bio *multi = NULL;
0b86a832 2129 int ret;
8790d502
CM
2130 int dev_nr = 0;
2131 int total_devs = 1;
0b86a832 2132
f2d8d74d 2133 length = bio->bi_size;
0b86a832
CM
2134 map_tree = &root->fs_info->mapping_tree;
2135 map_length = length;
cea9e445 2136
f188591e
CM
2137 ret = btrfs_map_block(map_tree, rw, logical, &map_length, &multi,
2138 mirror_num);
cea9e445
CM
2139 BUG_ON(ret);
2140
2141 total_devs = multi->num_stripes;
2142 if (map_length < length) {
2143 printk("mapping failed logical %Lu bio len %Lu "
2144 "len %Lu\n", logical, length, map_length);
2145 BUG();
2146 }
2147 multi->end_io = first_bio->bi_end_io;
2148 multi->private = first_bio->bi_private;
2149 atomic_set(&multi->stripes_pending, multi->num_stripes);
2150
8790d502 2151 while(dev_nr < total_devs) {
8790d502 2152 if (total_devs > 1) {
8790d502
CM
2153 if (dev_nr < total_devs - 1) {
2154 bio = bio_clone(first_bio, GFP_NOFS);
2155 BUG_ON(!bio);
2156 } else {
2157 bio = first_bio;
2158 }
2159 bio->bi_private = multi;
2160 bio->bi_end_io = end_bio_multi_stripe;
2161 }
cea9e445
CM
2162 bio->bi_sector = multi->stripes[dev_nr].physical >> 9;
2163 dev = multi->stripes[dev_nr].dev;
dfe25020
CM
2164 if (dev && dev->bdev) {
2165 bio->bi_bdev = dev->bdev;
8b712842
CM
2166 if (async_submit)
2167 schedule_bio(root, dev, rw, bio);
2168 else
2169 submit_bio(rw, bio);
dfe25020
CM
2170 } else {
2171 bio->bi_bdev = root->fs_info->fs_devices->latest_bdev;
2172 bio->bi_sector = logical >> 9;
2173#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
2174 bio_endio(bio, bio->bi_size, -EIO);
2175#else
2176 bio_endio(bio, -EIO);
2177#endif
2178 }
8790d502
CM
2179 dev_nr++;
2180 }
cea9e445
CM
2181 if (total_devs == 1)
2182 kfree(multi);
0b86a832
CM
2183 return 0;
2184}
2185
a443755f
CM
2186struct btrfs_device *btrfs_find_device(struct btrfs_root *root, u64 devid,
2187 u8 *uuid)
0b86a832 2188{
8a4b83cc 2189 struct list_head *head = &root->fs_info->fs_devices->devices;
0b86a832 2190
a443755f 2191 return __find_device(head, devid, uuid);
0b86a832
CM
2192}
2193
dfe25020
CM
2194static struct btrfs_device *add_missing_dev(struct btrfs_root *root,
2195 u64 devid, u8 *dev_uuid)
2196{
2197 struct btrfs_device *device;
2198 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
2199
2200 device = kzalloc(sizeof(*device), GFP_NOFS);
2201 list_add(&device->dev_list,
2202 &fs_devices->devices);
2203 list_add(&device->dev_alloc_list,
2204 &fs_devices->alloc_list);
2205 device->barriers = 1;
2206 device->dev_root = root->fs_info->dev_root;
2207 device->devid = devid;
8b712842 2208 device->work.func = pending_bios_fn;
dfe25020
CM
2209 fs_devices->num_devices++;
2210 spin_lock_init(&device->io_lock);
2211 memcpy(device->uuid, dev_uuid, BTRFS_UUID_SIZE);
2212 return device;
2213}
2214
2215
0b86a832
CM
2216static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
2217 struct extent_buffer *leaf,
2218 struct btrfs_chunk *chunk)
2219{
2220 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
2221 struct map_lookup *map;
2222 struct extent_map *em;
2223 u64 logical;
2224 u64 length;
2225 u64 devid;
a443755f 2226 u8 uuid[BTRFS_UUID_SIZE];
593060d7 2227 int num_stripes;
0b86a832 2228 int ret;
593060d7 2229 int i;
0b86a832 2230
e17cade2
CM
2231 logical = key->offset;
2232 length = btrfs_chunk_length(leaf, chunk);
a061fc8d 2233
0b86a832
CM
2234 spin_lock(&map_tree->map_tree.lock);
2235 em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
b248a415 2236 spin_unlock(&map_tree->map_tree.lock);
0b86a832
CM
2237
2238 /* already mapped? */
2239 if (em && em->start <= logical && em->start + em->len > logical) {
2240 free_extent_map(em);
0b86a832
CM
2241 return 0;
2242 } else if (em) {
2243 free_extent_map(em);
2244 }
0b86a832
CM
2245
2246 map = kzalloc(sizeof(*map), GFP_NOFS);
2247 if (!map)
2248 return -ENOMEM;
2249
2250 em = alloc_extent_map(GFP_NOFS);
2251 if (!em)
2252 return -ENOMEM;
593060d7
CM
2253 num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
2254 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
0b86a832
CM
2255 if (!map) {
2256 free_extent_map(em);
2257 return -ENOMEM;
2258 }
2259
2260 em->bdev = (struct block_device *)map;
2261 em->start = logical;
2262 em->len = length;
2263 em->block_start = 0;
2264
593060d7
CM
2265 map->num_stripes = num_stripes;
2266 map->io_width = btrfs_chunk_io_width(leaf, chunk);
2267 map->io_align = btrfs_chunk_io_align(leaf, chunk);
2268 map->sector_size = btrfs_chunk_sector_size(leaf, chunk);
2269 map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
2270 map->type = btrfs_chunk_type(leaf, chunk);
321aecc6 2271 map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
593060d7
CM
2272 for (i = 0; i < num_stripes; i++) {
2273 map->stripes[i].physical =
2274 btrfs_stripe_offset_nr(leaf, chunk, i);
2275 devid = btrfs_stripe_devid_nr(leaf, chunk, i);
a443755f
CM
2276 read_extent_buffer(leaf, uuid, (unsigned long)
2277 btrfs_stripe_dev_uuid_nr(chunk, i),
2278 BTRFS_UUID_SIZE);
2279 map->stripes[i].dev = btrfs_find_device(root, devid, uuid);
dfe25020
CM
2280
2281 if (!map->stripes[i].dev && !btrfs_test_opt(root, DEGRADED)) {
593060d7
CM
2282 kfree(map);
2283 free_extent_map(em);
2284 return -EIO;
2285 }
dfe25020
CM
2286 if (!map->stripes[i].dev) {
2287 map->stripes[i].dev =
2288 add_missing_dev(root, devid, uuid);
2289 if (!map->stripes[i].dev) {
2290 kfree(map);
2291 free_extent_map(em);
2292 return -EIO;
2293 }
2294 }
2295 map->stripes[i].dev->in_fs_metadata = 1;
0b86a832
CM
2296 }
2297
2298 spin_lock(&map_tree->map_tree.lock);
2299 ret = add_extent_mapping(&map_tree->map_tree, em);
0b86a832 2300 spin_unlock(&map_tree->map_tree.lock);
b248a415 2301 BUG_ON(ret);
0b86a832
CM
2302 free_extent_map(em);
2303
2304 return 0;
2305}
2306
2307static int fill_device_from_item(struct extent_buffer *leaf,
2308 struct btrfs_dev_item *dev_item,
2309 struct btrfs_device *device)
2310{
2311 unsigned long ptr;
0b86a832
CM
2312
2313 device->devid = btrfs_device_id(leaf, dev_item);
2314 device->total_bytes = btrfs_device_total_bytes(leaf, dev_item);
2315 device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
2316 device->type = btrfs_device_type(leaf, dev_item);
2317 device->io_align = btrfs_device_io_align(leaf, dev_item);
2318 device->io_width = btrfs_device_io_width(leaf, dev_item);
2319 device->sector_size = btrfs_device_sector_size(leaf, dev_item);
0b86a832
CM
2320
2321 ptr = (unsigned long)btrfs_device_uuid(dev_item);
e17cade2 2322 read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
0b86a832 2323
0b86a832
CM
2324 return 0;
2325}
2326
0d81ba5d 2327static int read_one_dev(struct btrfs_root *root,
0b86a832
CM
2328 struct extent_buffer *leaf,
2329 struct btrfs_dev_item *dev_item)
2330{
2331 struct btrfs_device *device;
2332 u64 devid;
2333 int ret;
a443755f
CM
2334 u8 dev_uuid[BTRFS_UUID_SIZE];
2335
0b86a832 2336 devid = btrfs_device_id(leaf, dev_item);
a443755f
CM
2337 read_extent_buffer(leaf, dev_uuid,
2338 (unsigned long)btrfs_device_uuid(dev_item),
2339 BTRFS_UUID_SIZE);
2340 device = btrfs_find_device(root, devid, dev_uuid);
6324fbf3 2341 if (!device) {
dfe25020
CM
2342 printk("warning devid %Lu missing\n", devid);
2343 device = add_missing_dev(root, devid, dev_uuid);
6324fbf3
CM
2344 if (!device)
2345 return -ENOMEM;
6324fbf3 2346 }
0b86a832
CM
2347
2348 fill_device_from_item(leaf, dev_item, device);
2349 device->dev_root = root->fs_info->dev_root;
dfe25020 2350 device->in_fs_metadata = 1;
0b86a832
CM
2351 ret = 0;
2352#if 0
2353 ret = btrfs_open_device(device);
2354 if (ret) {
2355 kfree(device);
2356 }
2357#endif
2358 return ret;
2359}
2360
0d81ba5d
CM
2361int btrfs_read_super_device(struct btrfs_root *root, struct extent_buffer *buf)
2362{
2363 struct btrfs_dev_item *dev_item;
2364
2365 dev_item = (struct btrfs_dev_item *)offsetof(struct btrfs_super_block,
2366 dev_item);
2367 return read_one_dev(root, buf, dev_item);
2368}
2369
0b86a832
CM
2370int btrfs_read_sys_array(struct btrfs_root *root)
2371{
2372 struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
a061fc8d 2373 struct extent_buffer *sb;
0b86a832 2374 struct btrfs_disk_key *disk_key;
0b86a832 2375 struct btrfs_chunk *chunk;
84eed90f
CM
2376 u8 *ptr;
2377 unsigned long sb_ptr;
2378 int ret = 0;
0b86a832
CM
2379 u32 num_stripes;
2380 u32 array_size;
2381 u32 len = 0;
0b86a832 2382 u32 cur;
84eed90f 2383 struct btrfs_key key;
0b86a832 2384
a061fc8d
CM
2385 sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET,
2386 BTRFS_SUPER_INFO_SIZE);
2387 if (!sb)
2388 return -ENOMEM;
2389 btrfs_set_buffer_uptodate(sb);
2390 write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
0b86a832
CM
2391 array_size = btrfs_super_sys_array_size(super_copy);
2392
0b86a832
CM
2393 ptr = super_copy->sys_chunk_array;
2394 sb_ptr = offsetof(struct btrfs_super_block, sys_chunk_array);
2395 cur = 0;
2396
2397 while (cur < array_size) {
2398 disk_key = (struct btrfs_disk_key *)ptr;
2399 btrfs_disk_key_to_cpu(&key, disk_key);
2400
a061fc8d 2401 len = sizeof(*disk_key); ptr += len;
0b86a832
CM
2402 sb_ptr += len;
2403 cur += len;
2404
0d81ba5d 2405 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
0b86a832 2406 chunk = (struct btrfs_chunk *)sb_ptr;
0d81ba5d 2407 ret = read_one_chunk(root, &key, sb, chunk);
84eed90f
CM
2408 if (ret)
2409 break;
0b86a832
CM
2410 num_stripes = btrfs_chunk_num_stripes(sb, chunk);
2411 len = btrfs_chunk_item_size(num_stripes);
2412 } else {
84eed90f
CM
2413 ret = -EIO;
2414 break;
0b86a832
CM
2415 }
2416 ptr += len;
2417 sb_ptr += len;
2418 cur += len;
2419 }
a061fc8d 2420 free_extent_buffer(sb);
84eed90f 2421 return ret;
0b86a832
CM
2422}
2423
2424int btrfs_read_chunk_tree(struct btrfs_root *root)
2425{
2426 struct btrfs_path *path;
2427 struct extent_buffer *leaf;
2428 struct btrfs_key key;
2429 struct btrfs_key found_key;
2430 int ret;
2431 int slot;
2432
2433 root = root->fs_info->chunk_root;
2434
2435 path = btrfs_alloc_path();
2436 if (!path)
2437 return -ENOMEM;
2438
2439 /* first we search for all of the device items, and then we
2440 * read in all of the chunk items. This way we can create chunk
2441 * mappings that reference all of the devices that are afound
2442 */
2443 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2444 key.offset = 0;
2445 key.type = 0;
2446again:
2447 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2448 while(1) {
2449 leaf = path->nodes[0];
2450 slot = path->slots[0];
2451 if (slot >= btrfs_header_nritems(leaf)) {
2452 ret = btrfs_next_leaf(root, path);
2453 if (ret == 0)
2454 continue;
2455 if (ret < 0)
2456 goto error;
2457 break;
2458 }
2459 btrfs_item_key_to_cpu(leaf, &found_key, slot);
2460 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
2461 if (found_key.objectid != BTRFS_DEV_ITEMS_OBJECTID)
2462 break;
2463 if (found_key.type == BTRFS_DEV_ITEM_KEY) {
2464 struct btrfs_dev_item *dev_item;
2465 dev_item = btrfs_item_ptr(leaf, slot,
2466 struct btrfs_dev_item);
0d81ba5d 2467 ret = read_one_dev(root, leaf, dev_item);
0b86a832
CM
2468 BUG_ON(ret);
2469 }
2470 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
2471 struct btrfs_chunk *chunk;
2472 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
2473 ret = read_one_chunk(root, &found_key, leaf, chunk);
2474 }
2475 path->slots[0]++;
2476 }
2477 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
2478 key.objectid = 0;
2479 btrfs_release_path(root, path);
2480 goto again;
2481 }
2482
2483 btrfs_free_path(path);
2484 ret = 0;
2485error:
2486 return ret;
2487}
2488