Btrfs: fill UUID tree initially
[linux-2.6-block.git] / fs / btrfs / volumes.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 #include <linux/sched.h>
19 #include <linux/bio.h>
20 #include <linux/slab.h>
21 #include <linux/buffer_head.h>
22 #include <linux/blkdev.h>
23 #include <linux/random.h>
24 #include <linux/iocontext.h>
25 #include <linux/capability.h>
26 #include <linux/ratelimit.h>
27 #include <linux/kthread.h>
28 #include <linux/raid/pq.h>
29 #include <linux/semaphore.h>
30 #include <asm/div64.h>
31 #include "compat.h"
32 #include "ctree.h"
33 #include "extent_map.h"
34 #include "disk-io.h"
35 #include "transaction.h"
36 #include "print-tree.h"
37 #include "volumes.h"
38 #include "raid56.h"
39 #include "async-thread.h"
40 #include "check-integrity.h"
41 #include "rcu-string.h"
42 #include "math.h"
43 #include "dev-replace.h"
44
45 static int init_first_rw_device(struct btrfs_trans_handle *trans,
46                                 struct btrfs_root *root,
47                                 struct btrfs_device *device);
48 static int btrfs_relocate_sys_chunks(struct btrfs_root *root);
49 static void __btrfs_reset_dev_stats(struct btrfs_device *dev);
50 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev);
51 static void btrfs_dev_stat_print_on_load(struct btrfs_device *device);
52
53 static DEFINE_MUTEX(uuid_mutex);
54 static LIST_HEAD(fs_uuids);
55
56 static void lock_chunks(struct btrfs_root *root)
57 {
58         mutex_lock(&root->fs_info->chunk_mutex);
59 }
60
61 static void unlock_chunks(struct btrfs_root *root)
62 {
63         mutex_unlock(&root->fs_info->chunk_mutex);
64 }
65
66 static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
67 {
68         struct btrfs_device *device;
69         WARN_ON(fs_devices->opened);
70         while (!list_empty(&fs_devices->devices)) {
71                 device = list_entry(fs_devices->devices.next,
72                                     struct btrfs_device, dev_list);
73                 list_del(&device->dev_list);
74                 rcu_string_free(device->name);
75                 kfree(device);
76         }
77         kfree(fs_devices);
78 }
79
80 static void btrfs_kobject_uevent(struct block_device *bdev,
81                                  enum kobject_action action)
82 {
83         int ret;
84
85         ret = kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, action);
86         if (ret)
87                 pr_warn("Sending event '%d' to kobject: '%s' (%p): failed\n",
88                         action,
89                         kobject_name(&disk_to_dev(bdev->bd_disk)->kobj),
90                         &disk_to_dev(bdev->bd_disk)->kobj);
91 }
92
93 void btrfs_cleanup_fs_uuids(void)
94 {
95         struct btrfs_fs_devices *fs_devices;
96
97         while (!list_empty(&fs_uuids)) {
98                 fs_devices = list_entry(fs_uuids.next,
99                                         struct btrfs_fs_devices, list);
100                 list_del(&fs_devices->list);
101                 free_fs_devices(fs_devices);
102         }
103 }
104
105 static noinline struct btrfs_device *__find_device(struct list_head *head,
106                                                    u64 devid, u8 *uuid)
107 {
108         struct btrfs_device *dev;
109
110         list_for_each_entry(dev, head, dev_list) {
111                 if (dev->devid == devid &&
112                     (!uuid || !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE))) {
113                         return dev;
114                 }
115         }
116         return NULL;
117 }
118
119 static noinline struct btrfs_fs_devices *find_fsid(u8 *fsid)
120 {
121         struct btrfs_fs_devices *fs_devices;
122
123         list_for_each_entry(fs_devices, &fs_uuids, list) {
124                 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
125                         return fs_devices;
126         }
127         return NULL;
128 }
129
130 static int
131 btrfs_get_bdev_and_sb(const char *device_path, fmode_t flags, void *holder,
132                       int flush, struct block_device **bdev,
133                       struct buffer_head **bh)
134 {
135         int ret;
136
137         *bdev = blkdev_get_by_path(device_path, flags, holder);
138
139         if (IS_ERR(*bdev)) {
140                 ret = PTR_ERR(*bdev);
141                 printk(KERN_INFO "btrfs: open %s failed\n", device_path);
142                 goto error;
143         }
144
145         if (flush)
146                 filemap_write_and_wait((*bdev)->bd_inode->i_mapping);
147         ret = set_blocksize(*bdev, 4096);
148         if (ret) {
149                 blkdev_put(*bdev, flags);
150                 goto error;
151         }
152         invalidate_bdev(*bdev);
153         *bh = btrfs_read_dev_super(*bdev);
154         if (!*bh) {
155                 ret = -EINVAL;
156                 blkdev_put(*bdev, flags);
157                 goto error;
158         }
159
160         return 0;
161
162 error:
163         *bdev = NULL;
164         *bh = NULL;
165         return ret;
166 }
167
168 static void requeue_list(struct btrfs_pending_bios *pending_bios,
169                         struct bio *head, struct bio *tail)
170 {
171
172         struct bio *old_head;
173
174         old_head = pending_bios->head;
175         pending_bios->head = head;
176         if (pending_bios->tail)
177                 tail->bi_next = old_head;
178         else
179                 pending_bios->tail = tail;
180 }
181
182 /*
183  * we try to collect pending bios for a device so we don't get a large
184  * number of procs sending bios down to the same device.  This greatly
185  * improves the schedulers ability to collect and merge the bios.
186  *
187  * But, it also turns into a long list of bios to process and that is sure
188  * to eventually make the worker thread block.  The solution here is to
189  * make some progress and then put this work struct back at the end of
190  * the list if the block device is congested.  This way, multiple devices
191  * can make progress from a single worker thread.
192  */
193 static noinline void run_scheduled_bios(struct btrfs_device *device)
194 {
195         struct bio *pending;
196         struct backing_dev_info *bdi;
197         struct btrfs_fs_info *fs_info;
198         struct btrfs_pending_bios *pending_bios;
199         struct bio *tail;
200         struct bio *cur;
201         int again = 0;
202         unsigned long num_run;
203         unsigned long batch_run = 0;
204         unsigned long limit;
205         unsigned long last_waited = 0;
206         int force_reg = 0;
207         int sync_pending = 0;
208         struct blk_plug plug;
209
210         /*
211          * this function runs all the bios we've collected for
212          * a particular device.  We don't want to wander off to
213          * another device without first sending all of these down.
214          * So, setup a plug here and finish it off before we return
215          */
216         blk_start_plug(&plug);
217
218         bdi = blk_get_backing_dev_info(device->bdev);
219         fs_info = device->dev_root->fs_info;
220         limit = btrfs_async_submit_limit(fs_info);
221         limit = limit * 2 / 3;
222
223 loop:
224         spin_lock(&device->io_lock);
225
226 loop_lock:
227         num_run = 0;
228
229         /* take all the bios off the list at once and process them
230          * later on (without the lock held).  But, remember the
231          * tail and other pointers so the bios can be properly reinserted
232          * into the list if we hit congestion
233          */
234         if (!force_reg && device->pending_sync_bios.head) {
235                 pending_bios = &device->pending_sync_bios;
236                 force_reg = 1;
237         } else {
238                 pending_bios = &device->pending_bios;
239                 force_reg = 0;
240         }
241
242         pending = pending_bios->head;
243         tail = pending_bios->tail;
244         WARN_ON(pending && !tail);
245
246         /*
247          * if pending was null this time around, no bios need processing
248          * at all and we can stop.  Otherwise it'll loop back up again
249          * and do an additional check so no bios are missed.
250          *
251          * device->running_pending is used to synchronize with the
252          * schedule_bio code.
253          */
254         if (device->pending_sync_bios.head == NULL &&
255             device->pending_bios.head == NULL) {
256                 again = 0;
257                 device->running_pending = 0;
258         } else {
259                 again = 1;
260                 device->running_pending = 1;
261         }
262
263         pending_bios->head = NULL;
264         pending_bios->tail = NULL;
265
266         spin_unlock(&device->io_lock);
267
268         while (pending) {
269
270                 rmb();
271                 /* we want to work on both lists, but do more bios on the
272                  * sync list than the regular list
273                  */
274                 if ((num_run > 32 &&
275                     pending_bios != &device->pending_sync_bios &&
276                     device->pending_sync_bios.head) ||
277                    (num_run > 64 && pending_bios == &device->pending_sync_bios &&
278                     device->pending_bios.head)) {
279                         spin_lock(&device->io_lock);
280                         requeue_list(pending_bios, pending, tail);
281                         goto loop_lock;
282                 }
283
284                 cur = pending;
285                 pending = pending->bi_next;
286                 cur->bi_next = NULL;
287
288                 if (atomic_dec_return(&fs_info->nr_async_bios) < limit &&
289                     waitqueue_active(&fs_info->async_submit_wait))
290                         wake_up(&fs_info->async_submit_wait);
291
292                 BUG_ON(atomic_read(&cur->bi_cnt) == 0);
293
294                 /*
295                  * if we're doing the sync list, record that our
296                  * plug has some sync requests on it
297                  *
298                  * If we're doing the regular list and there are
299                  * sync requests sitting around, unplug before
300                  * we add more
301                  */
302                 if (pending_bios == &device->pending_sync_bios) {
303                         sync_pending = 1;
304                 } else if (sync_pending) {
305                         blk_finish_plug(&plug);
306                         blk_start_plug(&plug);
307                         sync_pending = 0;
308                 }
309
310                 btrfsic_submit_bio(cur->bi_rw, cur);
311                 num_run++;
312                 batch_run++;
313                 if (need_resched())
314                         cond_resched();
315
316                 /*
317                  * we made progress, there is more work to do and the bdi
318                  * is now congested.  Back off and let other work structs
319                  * run instead
320                  */
321                 if (pending && bdi_write_congested(bdi) && batch_run > 8 &&
322                     fs_info->fs_devices->open_devices > 1) {
323                         struct io_context *ioc;
324
325                         ioc = current->io_context;
326
327                         /*
328                          * the main goal here is that we don't want to
329                          * block if we're going to be able to submit
330                          * more requests without blocking.
331                          *
332                          * This code does two great things, it pokes into
333                          * the elevator code from a filesystem _and_
334                          * it makes assumptions about how batching works.
335                          */
336                         if (ioc && ioc->nr_batch_requests > 0 &&
337                             time_before(jiffies, ioc->last_waited + HZ/50UL) &&
338                             (last_waited == 0 ||
339                              ioc->last_waited == last_waited)) {
340                                 /*
341                                  * we want to go through our batch of
342                                  * requests and stop.  So, we copy out
343                                  * the ioc->last_waited time and test
344                                  * against it before looping
345                                  */
346                                 last_waited = ioc->last_waited;
347                                 if (need_resched())
348                                         cond_resched();
349                                 continue;
350                         }
351                         spin_lock(&device->io_lock);
352                         requeue_list(pending_bios, pending, tail);
353                         device->running_pending = 1;
354
355                         spin_unlock(&device->io_lock);
356                         btrfs_requeue_work(&device->work);
357                         goto done;
358                 }
359                 /* unplug every 64 requests just for good measure */
360                 if (batch_run % 64 == 0) {
361                         blk_finish_plug(&plug);
362                         blk_start_plug(&plug);
363                         sync_pending = 0;
364                 }
365         }
366
367         cond_resched();
368         if (again)
369                 goto loop;
370
371         spin_lock(&device->io_lock);
372         if (device->pending_bios.head || device->pending_sync_bios.head)
373                 goto loop_lock;
374         spin_unlock(&device->io_lock);
375
376 done:
377         blk_finish_plug(&plug);
378 }
379
380 static void pending_bios_fn(struct btrfs_work *work)
381 {
382         struct btrfs_device *device;
383
384         device = container_of(work, struct btrfs_device, work);
385         run_scheduled_bios(device);
386 }
387
388 static noinline int device_list_add(const char *path,
389                            struct btrfs_super_block *disk_super,
390                            u64 devid, struct btrfs_fs_devices **fs_devices_ret)
391 {
392         struct btrfs_device *device;
393         struct btrfs_fs_devices *fs_devices;
394         struct rcu_string *name;
395         u64 found_transid = btrfs_super_generation(disk_super);
396
397         fs_devices = find_fsid(disk_super->fsid);
398         if (!fs_devices) {
399                 fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
400                 if (!fs_devices)
401                         return -ENOMEM;
402                 INIT_LIST_HEAD(&fs_devices->devices);
403                 INIT_LIST_HEAD(&fs_devices->alloc_list);
404                 list_add(&fs_devices->list, &fs_uuids);
405                 memcpy(fs_devices->fsid, disk_super->fsid, BTRFS_FSID_SIZE);
406                 fs_devices->latest_devid = devid;
407                 fs_devices->latest_trans = found_transid;
408                 mutex_init(&fs_devices->device_list_mutex);
409                 device = NULL;
410         } else {
411                 device = __find_device(&fs_devices->devices, devid,
412                                        disk_super->dev_item.uuid);
413         }
414         if (!device) {
415                 if (fs_devices->opened)
416                         return -EBUSY;
417
418                 device = kzalloc(sizeof(*device), GFP_NOFS);
419                 if (!device) {
420                         /* we can safely leave the fs_devices entry around */
421                         return -ENOMEM;
422                 }
423                 device->devid = devid;
424                 device->dev_stats_valid = 0;
425                 device->work.func = pending_bios_fn;
426                 memcpy(device->uuid, disk_super->dev_item.uuid,
427                        BTRFS_UUID_SIZE);
428                 spin_lock_init(&device->io_lock);
429
430                 name = rcu_string_strdup(path, GFP_NOFS);
431                 if (!name) {
432                         kfree(device);
433                         return -ENOMEM;
434                 }
435                 rcu_assign_pointer(device->name, name);
436                 INIT_LIST_HEAD(&device->dev_alloc_list);
437
438                 /* init readahead state */
439                 spin_lock_init(&device->reada_lock);
440                 device->reada_curr_zone = NULL;
441                 atomic_set(&device->reada_in_flight, 0);
442                 device->reada_next = 0;
443                 INIT_RADIX_TREE(&device->reada_zones, GFP_NOFS & ~__GFP_WAIT);
444                 INIT_RADIX_TREE(&device->reada_extents, GFP_NOFS & ~__GFP_WAIT);
445
446                 mutex_lock(&fs_devices->device_list_mutex);
447                 list_add_rcu(&device->dev_list, &fs_devices->devices);
448                 mutex_unlock(&fs_devices->device_list_mutex);
449
450                 device->fs_devices = fs_devices;
451                 fs_devices->num_devices++;
452         } else if (!device->name || strcmp(device->name->str, path)) {
453                 name = rcu_string_strdup(path, GFP_NOFS);
454                 if (!name)
455                         return -ENOMEM;
456                 rcu_string_free(device->name);
457                 rcu_assign_pointer(device->name, name);
458                 if (device->missing) {
459                         fs_devices->missing_devices--;
460                         device->missing = 0;
461                 }
462         }
463
464         if (found_transid > fs_devices->latest_trans) {
465                 fs_devices->latest_devid = devid;
466                 fs_devices->latest_trans = found_transid;
467         }
468         *fs_devices_ret = fs_devices;
469         return 0;
470 }
471
472 static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
473 {
474         struct btrfs_fs_devices *fs_devices;
475         struct btrfs_device *device;
476         struct btrfs_device *orig_dev;
477
478         fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
479         if (!fs_devices)
480                 return ERR_PTR(-ENOMEM);
481
482         INIT_LIST_HEAD(&fs_devices->devices);
483         INIT_LIST_HEAD(&fs_devices->alloc_list);
484         INIT_LIST_HEAD(&fs_devices->list);
485         mutex_init(&fs_devices->device_list_mutex);
486         fs_devices->latest_devid = orig->latest_devid;
487         fs_devices->latest_trans = orig->latest_trans;
488         fs_devices->total_devices = orig->total_devices;
489         memcpy(fs_devices->fsid, orig->fsid, sizeof(fs_devices->fsid));
490
491         /* We have held the volume lock, it is safe to get the devices. */
492         list_for_each_entry(orig_dev, &orig->devices, dev_list) {
493                 struct rcu_string *name;
494
495                 device = kzalloc(sizeof(*device), GFP_NOFS);
496                 if (!device)
497                         goto error;
498
499                 /*
500                  * This is ok to do without rcu read locked because we hold the
501                  * uuid mutex so nothing we touch in here is going to disappear.
502                  */
503                 name = rcu_string_strdup(orig_dev->name->str, GFP_NOFS);
504                 if (!name) {
505                         kfree(device);
506                         goto error;
507                 }
508                 rcu_assign_pointer(device->name, name);
509
510                 device->devid = orig_dev->devid;
511                 device->work.func = pending_bios_fn;
512                 memcpy(device->uuid, orig_dev->uuid, sizeof(device->uuid));
513                 spin_lock_init(&device->io_lock);
514                 INIT_LIST_HEAD(&device->dev_list);
515                 INIT_LIST_HEAD(&device->dev_alloc_list);
516
517                 list_add(&device->dev_list, &fs_devices->devices);
518                 device->fs_devices = fs_devices;
519                 fs_devices->num_devices++;
520         }
521         return fs_devices;
522 error:
523         free_fs_devices(fs_devices);
524         return ERR_PTR(-ENOMEM);
525 }
526
527 void btrfs_close_extra_devices(struct btrfs_fs_info *fs_info,
528                                struct btrfs_fs_devices *fs_devices, int step)
529 {
530         struct btrfs_device *device, *next;
531
532         struct block_device *latest_bdev = NULL;
533         u64 latest_devid = 0;
534         u64 latest_transid = 0;
535
536         mutex_lock(&uuid_mutex);
537 again:
538         /* This is the initialized path, it is safe to release the devices. */
539         list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
540                 if (device->in_fs_metadata) {
541                         if (!device->is_tgtdev_for_dev_replace &&
542                             (!latest_transid ||
543                              device->generation > latest_transid)) {
544                                 latest_devid = device->devid;
545                                 latest_transid = device->generation;
546                                 latest_bdev = device->bdev;
547                         }
548                         continue;
549                 }
550
551                 if (device->devid == BTRFS_DEV_REPLACE_DEVID) {
552                         /*
553                          * In the first step, keep the device which has
554                          * the correct fsid and the devid that is used
555                          * for the dev_replace procedure.
556                          * In the second step, the dev_replace state is
557                          * read from the device tree and it is known
558                          * whether the procedure is really active or
559                          * not, which means whether this device is
560                          * used or whether it should be removed.
561                          */
562                         if (step == 0 || device->is_tgtdev_for_dev_replace) {
563                                 continue;
564                         }
565                 }
566                 if (device->bdev) {
567                         blkdev_put(device->bdev, device->mode);
568                         device->bdev = NULL;
569                         fs_devices->open_devices--;
570                 }
571                 if (device->writeable) {
572                         list_del_init(&device->dev_alloc_list);
573                         device->writeable = 0;
574                         if (!device->is_tgtdev_for_dev_replace)
575                                 fs_devices->rw_devices--;
576                 }
577                 list_del_init(&device->dev_list);
578                 fs_devices->num_devices--;
579                 rcu_string_free(device->name);
580                 kfree(device);
581         }
582
583         if (fs_devices->seed) {
584                 fs_devices = fs_devices->seed;
585                 goto again;
586         }
587
588         fs_devices->latest_bdev = latest_bdev;
589         fs_devices->latest_devid = latest_devid;
590         fs_devices->latest_trans = latest_transid;
591
592         mutex_unlock(&uuid_mutex);
593 }
594
595 static void __free_device(struct work_struct *work)
596 {
597         struct btrfs_device *device;
598
599         device = container_of(work, struct btrfs_device, rcu_work);
600
601         if (device->bdev)
602                 blkdev_put(device->bdev, device->mode);
603
604         rcu_string_free(device->name);
605         kfree(device);
606 }
607
608 static void free_device(struct rcu_head *head)
609 {
610         struct btrfs_device *device;
611
612         device = container_of(head, struct btrfs_device, rcu);
613
614         INIT_WORK(&device->rcu_work, __free_device);
615         schedule_work(&device->rcu_work);
616 }
617
618 static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
619 {
620         struct btrfs_device *device;
621
622         if (--fs_devices->opened > 0)
623                 return 0;
624
625         mutex_lock(&fs_devices->device_list_mutex);
626         list_for_each_entry(device, &fs_devices->devices, dev_list) {
627                 struct btrfs_device *new_device;
628                 struct rcu_string *name;
629
630                 if (device->bdev)
631                         fs_devices->open_devices--;
632
633                 if (device->writeable && !device->is_tgtdev_for_dev_replace) {
634                         list_del_init(&device->dev_alloc_list);
635                         fs_devices->rw_devices--;
636                 }
637
638                 if (device->can_discard)
639                         fs_devices->num_can_discard--;
640
641                 new_device = kmalloc(sizeof(*new_device), GFP_NOFS);
642                 BUG_ON(!new_device); /* -ENOMEM */
643                 memcpy(new_device, device, sizeof(*new_device));
644
645                 /* Safe because we are under uuid_mutex */
646                 if (device->name) {
647                         name = rcu_string_strdup(device->name->str, GFP_NOFS);
648                         BUG_ON(device->name && !name); /* -ENOMEM */
649                         rcu_assign_pointer(new_device->name, name);
650                 }
651                 new_device->bdev = NULL;
652                 new_device->writeable = 0;
653                 new_device->in_fs_metadata = 0;
654                 new_device->can_discard = 0;
655                 spin_lock_init(&new_device->io_lock);
656                 list_replace_rcu(&device->dev_list, &new_device->dev_list);
657
658                 call_rcu(&device->rcu, free_device);
659         }
660         mutex_unlock(&fs_devices->device_list_mutex);
661
662         WARN_ON(fs_devices->open_devices);
663         WARN_ON(fs_devices->rw_devices);
664         fs_devices->opened = 0;
665         fs_devices->seeding = 0;
666
667         return 0;
668 }
669
670 int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
671 {
672         struct btrfs_fs_devices *seed_devices = NULL;
673         int ret;
674
675         mutex_lock(&uuid_mutex);
676         ret = __btrfs_close_devices(fs_devices);
677         if (!fs_devices->opened) {
678                 seed_devices = fs_devices->seed;
679                 fs_devices->seed = NULL;
680         }
681         mutex_unlock(&uuid_mutex);
682
683         while (seed_devices) {
684                 fs_devices = seed_devices;
685                 seed_devices = fs_devices->seed;
686                 __btrfs_close_devices(fs_devices);
687                 free_fs_devices(fs_devices);
688         }
689         /*
690          * Wait for rcu kworkers under __btrfs_close_devices
691          * to finish all blkdev_puts so device is really
692          * free when umount is done.
693          */
694         rcu_barrier();
695         return ret;
696 }
697
698 static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
699                                 fmode_t flags, void *holder)
700 {
701         struct request_queue *q;
702         struct block_device *bdev;
703         struct list_head *head = &fs_devices->devices;
704         struct btrfs_device *device;
705         struct block_device *latest_bdev = NULL;
706         struct buffer_head *bh;
707         struct btrfs_super_block *disk_super;
708         u64 latest_devid = 0;
709         u64 latest_transid = 0;
710         u64 devid;
711         int seeding = 1;
712         int ret = 0;
713
714         flags |= FMODE_EXCL;
715
716         list_for_each_entry(device, head, dev_list) {
717                 if (device->bdev)
718                         continue;
719                 if (!device->name)
720                         continue;
721
722                 /* Just open everything we can; ignore failures here */
723                 if (btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1,
724                                             &bdev, &bh))
725                         continue;
726
727                 disk_super = (struct btrfs_super_block *)bh->b_data;
728                 devid = btrfs_stack_device_id(&disk_super->dev_item);
729                 if (devid != device->devid)
730                         goto error_brelse;
731
732                 if (memcmp(device->uuid, disk_super->dev_item.uuid,
733                            BTRFS_UUID_SIZE))
734                         goto error_brelse;
735
736                 device->generation = btrfs_super_generation(disk_super);
737                 if (!latest_transid || device->generation > latest_transid) {
738                         latest_devid = devid;
739                         latest_transid = device->generation;
740                         latest_bdev = bdev;
741                 }
742
743                 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
744                         device->writeable = 0;
745                 } else {
746                         device->writeable = !bdev_read_only(bdev);
747                         seeding = 0;
748                 }
749
750                 q = bdev_get_queue(bdev);
751                 if (blk_queue_discard(q)) {
752                         device->can_discard = 1;
753                         fs_devices->num_can_discard++;
754                 }
755
756                 device->bdev = bdev;
757                 device->in_fs_metadata = 0;
758                 device->mode = flags;
759
760                 if (!blk_queue_nonrot(bdev_get_queue(bdev)))
761                         fs_devices->rotating = 1;
762
763                 fs_devices->open_devices++;
764                 if (device->writeable && !device->is_tgtdev_for_dev_replace) {
765                         fs_devices->rw_devices++;
766                         list_add(&device->dev_alloc_list,
767                                  &fs_devices->alloc_list);
768                 }
769                 brelse(bh);
770                 continue;
771
772 error_brelse:
773                 brelse(bh);
774                 blkdev_put(bdev, flags);
775                 continue;
776         }
777         if (fs_devices->open_devices == 0) {
778                 ret = -EINVAL;
779                 goto out;
780         }
781         fs_devices->seeding = seeding;
782         fs_devices->opened = 1;
783         fs_devices->latest_bdev = latest_bdev;
784         fs_devices->latest_devid = latest_devid;
785         fs_devices->latest_trans = latest_transid;
786         fs_devices->total_rw_bytes = 0;
787 out:
788         return ret;
789 }
790
791 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
792                        fmode_t flags, void *holder)
793 {
794         int ret;
795
796         mutex_lock(&uuid_mutex);
797         if (fs_devices->opened) {
798                 fs_devices->opened++;
799                 ret = 0;
800         } else {
801                 ret = __btrfs_open_devices(fs_devices, flags, holder);
802         }
803         mutex_unlock(&uuid_mutex);
804         return ret;
805 }
806
807 /*
808  * Look for a btrfs signature on a device. This may be called out of the mount path
809  * and we are not allowed to call set_blocksize during the scan. The superblock
810  * is read via pagecache
811  */
812 int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
813                           struct btrfs_fs_devices **fs_devices_ret)
814 {
815         struct btrfs_super_block *disk_super;
816         struct block_device *bdev;
817         struct page *page;
818         void *p;
819         int ret = -EINVAL;
820         u64 devid;
821         u64 transid;
822         u64 total_devices;
823         u64 bytenr;
824         pgoff_t index;
825
826         /*
827          * we would like to check all the supers, but that would make
828          * a btrfs mount succeed after a mkfs from a different FS.
829          * So, we need to add a special mount option to scan for
830          * later supers, using BTRFS_SUPER_MIRROR_MAX instead
831          */
832         bytenr = btrfs_sb_offset(0);
833         flags |= FMODE_EXCL;
834         mutex_lock(&uuid_mutex);
835
836         bdev = blkdev_get_by_path(path, flags, holder);
837
838         if (IS_ERR(bdev)) {
839                 ret = PTR_ERR(bdev);
840                 goto error;
841         }
842
843         /* make sure our super fits in the device */
844         if (bytenr + PAGE_CACHE_SIZE >= i_size_read(bdev->bd_inode))
845                 goto error_bdev_put;
846
847         /* make sure our super fits in the page */
848         if (sizeof(*disk_super) > PAGE_CACHE_SIZE)
849                 goto error_bdev_put;
850
851         /* make sure our super doesn't straddle pages on disk */
852         index = bytenr >> PAGE_CACHE_SHIFT;
853         if ((bytenr + sizeof(*disk_super) - 1) >> PAGE_CACHE_SHIFT != index)
854                 goto error_bdev_put;
855
856         /* pull in the page with our super */
857         page = read_cache_page_gfp(bdev->bd_inode->i_mapping,
858                                    index, GFP_NOFS);
859
860         if (IS_ERR_OR_NULL(page))
861                 goto error_bdev_put;
862
863         p = kmap(page);
864
865         /* align our pointer to the offset of the super block */
866         disk_super = p + (bytenr & ~PAGE_CACHE_MASK);
867
868         if (btrfs_super_bytenr(disk_super) != bytenr ||
869             btrfs_super_magic(disk_super) != BTRFS_MAGIC)
870                 goto error_unmap;
871
872         devid = btrfs_stack_device_id(&disk_super->dev_item);
873         transid = btrfs_super_generation(disk_super);
874         total_devices = btrfs_super_num_devices(disk_super);
875
876         if (disk_super->label[0]) {
877                 if (disk_super->label[BTRFS_LABEL_SIZE - 1])
878                         disk_super->label[BTRFS_LABEL_SIZE - 1] = '\0';
879                 printk(KERN_INFO "device label %s ", disk_super->label);
880         } else {
881                 printk(KERN_INFO "device fsid %pU ", disk_super->fsid);
882         }
883
884         printk(KERN_CONT "devid %llu transid %llu %s\n",
885                (unsigned long long)devid, (unsigned long long)transid, path);
886
887         ret = device_list_add(path, disk_super, devid, fs_devices_ret);
888         if (!ret && fs_devices_ret)
889                 (*fs_devices_ret)->total_devices = total_devices;
890
891 error_unmap:
892         kunmap(page);
893         page_cache_release(page);
894
895 error_bdev_put:
896         blkdev_put(bdev, flags);
897 error:
898         mutex_unlock(&uuid_mutex);
899         return ret;
900 }
901
902 /* helper to account the used device space in the range */
903 int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start,
904                                    u64 end, u64 *length)
905 {
906         struct btrfs_key key;
907         struct btrfs_root *root = device->dev_root;
908         struct btrfs_dev_extent *dev_extent;
909         struct btrfs_path *path;
910         u64 extent_end;
911         int ret;
912         int slot;
913         struct extent_buffer *l;
914
915         *length = 0;
916
917         if (start >= device->total_bytes || device->is_tgtdev_for_dev_replace)
918                 return 0;
919
920         path = btrfs_alloc_path();
921         if (!path)
922                 return -ENOMEM;
923         path->reada = 2;
924
925         key.objectid = device->devid;
926         key.offset = start;
927         key.type = BTRFS_DEV_EXTENT_KEY;
928
929         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
930         if (ret < 0)
931                 goto out;
932         if (ret > 0) {
933                 ret = btrfs_previous_item(root, path, key.objectid, key.type);
934                 if (ret < 0)
935                         goto out;
936         }
937
938         while (1) {
939                 l = path->nodes[0];
940                 slot = path->slots[0];
941                 if (slot >= btrfs_header_nritems(l)) {
942                         ret = btrfs_next_leaf(root, path);
943                         if (ret == 0)
944                                 continue;
945                         if (ret < 0)
946                                 goto out;
947
948                         break;
949                 }
950                 btrfs_item_key_to_cpu(l, &key, slot);
951
952                 if (key.objectid < device->devid)
953                         goto next;
954
955                 if (key.objectid > device->devid)
956                         break;
957
958                 if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
959                         goto next;
960
961                 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
962                 extent_end = key.offset + btrfs_dev_extent_length(l,
963                                                                   dev_extent);
964                 if (key.offset <= start && extent_end > end) {
965                         *length = end - start + 1;
966                         break;
967                 } else if (key.offset <= start && extent_end > start)
968                         *length += extent_end - start;
969                 else if (key.offset > start && extent_end <= end)
970                         *length += extent_end - key.offset;
971                 else if (key.offset > start && key.offset <= end) {
972                         *length += end - key.offset + 1;
973                         break;
974                 } else if (key.offset > end)
975                         break;
976
977 next:
978                 path->slots[0]++;
979         }
980         ret = 0;
981 out:
982         btrfs_free_path(path);
983         return ret;
984 }
985
986 static int contains_pending_extent(struct btrfs_trans_handle *trans,
987                                    struct btrfs_device *device,
988                                    u64 *start, u64 len)
989 {
990         struct extent_map *em;
991         int ret = 0;
992
993         list_for_each_entry(em, &trans->transaction->pending_chunks, list) {
994                 struct map_lookup *map;
995                 int i;
996
997                 map = (struct map_lookup *)em->bdev;
998                 for (i = 0; i < map->num_stripes; i++) {
999                         if (map->stripes[i].dev != device)
1000                                 continue;
1001                         if (map->stripes[i].physical >= *start + len ||
1002                             map->stripes[i].physical + em->orig_block_len <=
1003                             *start)
1004                                 continue;
1005                         *start = map->stripes[i].physical +
1006                                 em->orig_block_len;
1007                         ret = 1;
1008                 }
1009         }
1010
1011         return ret;
1012 }
1013
1014
1015 /*
1016  * find_free_dev_extent - find free space in the specified device
1017  * @device:     the device which we search the free space in
1018  * @num_bytes:  the size of the free space that we need
1019  * @start:      store the start of the free space.
1020  * @len:        the size of the free space. that we find, or the size of the max
1021  *              free space if we don't find suitable free space
1022  *
1023  * this uses a pretty simple search, the expectation is that it is
1024  * called very infrequently and that a given device has a small number
1025  * of extents
1026  *
1027  * @start is used to store the start of the free space if we find. But if we
1028  * don't find suitable free space, it will be used to store the start position
1029  * of the max free space.
1030  *
1031  * @len is used to store the size of the free space that we find.
1032  * But if we don't find suitable free space, it is used to store the size of
1033  * the max free space.
1034  */
1035 int find_free_dev_extent(struct btrfs_trans_handle *trans,
1036                          struct btrfs_device *device, u64 num_bytes,
1037                          u64 *start, u64 *len)
1038 {
1039         struct btrfs_key key;
1040         struct btrfs_root *root = device->dev_root;
1041         struct btrfs_dev_extent *dev_extent;
1042         struct btrfs_path *path;
1043         u64 hole_size;
1044         u64 max_hole_start;
1045         u64 max_hole_size;
1046         u64 extent_end;
1047         u64 search_start;
1048         u64 search_end = device->total_bytes;
1049         int ret;
1050         int slot;
1051         struct extent_buffer *l;
1052
1053         /* FIXME use last free of some kind */
1054
1055         /* we don't want to overwrite the superblock on the drive,
1056          * so we make sure to start at an offset of at least 1MB
1057          */
1058         search_start = max(root->fs_info->alloc_start, 1024ull * 1024);
1059
1060         path = btrfs_alloc_path();
1061         if (!path)
1062                 return -ENOMEM;
1063 again:
1064         max_hole_start = search_start;
1065         max_hole_size = 0;
1066         hole_size = 0;
1067
1068         if (search_start >= search_end || device->is_tgtdev_for_dev_replace) {
1069                 ret = -ENOSPC;
1070                 goto out;
1071         }
1072
1073         path->reada = 2;
1074         path->search_commit_root = 1;
1075         path->skip_locking = 1;
1076
1077         key.objectid = device->devid;
1078         key.offset = search_start;
1079         key.type = BTRFS_DEV_EXTENT_KEY;
1080
1081         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1082         if (ret < 0)
1083                 goto out;
1084         if (ret > 0) {
1085                 ret = btrfs_previous_item(root, path, key.objectid, key.type);
1086                 if (ret < 0)
1087                         goto out;
1088         }
1089
1090         while (1) {
1091                 l = path->nodes[0];
1092                 slot = path->slots[0];
1093                 if (slot >= btrfs_header_nritems(l)) {
1094                         ret = btrfs_next_leaf(root, path);
1095                         if (ret == 0)
1096                                 continue;
1097                         if (ret < 0)
1098                                 goto out;
1099
1100                         break;
1101                 }
1102                 btrfs_item_key_to_cpu(l, &key, slot);
1103
1104                 if (key.objectid < device->devid)
1105                         goto next;
1106
1107                 if (key.objectid > device->devid)
1108                         break;
1109
1110                 if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
1111                         goto next;
1112
1113                 if (key.offset > search_start) {
1114                         hole_size = key.offset - search_start;
1115
1116                         /*
1117                          * Have to check before we set max_hole_start, otherwise
1118                          * we could end up sending back this offset anyway.
1119                          */
1120                         if (contains_pending_extent(trans, device,
1121                                                     &search_start,
1122                                                     hole_size))
1123                                 hole_size = 0;
1124
1125                         if (hole_size > max_hole_size) {
1126                                 max_hole_start = search_start;
1127                                 max_hole_size = hole_size;
1128                         }
1129
1130                         /*
1131                          * If this free space is greater than which we need,
1132                          * it must be the max free space that we have found
1133                          * until now, so max_hole_start must point to the start
1134                          * of this free space and the length of this free space
1135                          * is stored in max_hole_size. Thus, we return
1136                          * max_hole_start and max_hole_size and go back to the
1137                          * caller.
1138                          */
1139                         if (hole_size >= num_bytes) {
1140                                 ret = 0;
1141                                 goto out;
1142                         }
1143                 }
1144
1145                 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
1146                 extent_end = key.offset + btrfs_dev_extent_length(l,
1147                                                                   dev_extent);
1148                 if (extent_end > search_start)
1149                         search_start = extent_end;
1150 next:
1151                 path->slots[0]++;
1152                 cond_resched();
1153         }
1154
1155         /*
1156          * At this point, search_start should be the end of
1157          * allocated dev extents, and when shrinking the device,
1158          * search_end may be smaller than search_start.
1159          */
1160         if (search_end > search_start)
1161                 hole_size = search_end - search_start;
1162
1163         if (hole_size > max_hole_size) {
1164                 max_hole_start = search_start;
1165                 max_hole_size = hole_size;
1166         }
1167
1168         if (contains_pending_extent(trans, device, &search_start, hole_size)) {
1169                 btrfs_release_path(path);
1170                 goto again;
1171         }
1172
1173         /* See above. */
1174         if (hole_size < num_bytes)
1175                 ret = -ENOSPC;
1176         else
1177                 ret = 0;
1178
1179 out:
1180         btrfs_free_path(path);
1181         *start = max_hole_start;
1182         if (len)
1183                 *len = max_hole_size;
1184         return ret;
1185 }
1186
1187 static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
1188                           struct btrfs_device *device,
1189                           u64 start)
1190 {
1191         int ret;
1192         struct btrfs_path *path;
1193         struct btrfs_root *root = device->dev_root;
1194         struct btrfs_key key;
1195         struct btrfs_key found_key;
1196         struct extent_buffer *leaf = NULL;
1197         struct btrfs_dev_extent *extent = NULL;
1198
1199         path = btrfs_alloc_path();
1200         if (!path)
1201                 return -ENOMEM;
1202
1203         key.objectid = device->devid;
1204         key.offset = start;
1205         key.type = BTRFS_DEV_EXTENT_KEY;
1206 again:
1207         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1208         if (ret > 0) {
1209                 ret = btrfs_previous_item(root, path, key.objectid,
1210                                           BTRFS_DEV_EXTENT_KEY);
1211                 if (ret)
1212                         goto out;
1213                 leaf = path->nodes[0];
1214                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1215                 extent = btrfs_item_ptr(leaf, path->slots[0],
1216                                         struct btrfs_dev_extent);
1217                 BUG_ON(found_key.offset > start || found_key.offset +
1218                        btrfs_dev_extent_length(leaf, extent) < start);
1219                 key = found_key;
1220                 btrfs_release_path(path);
1221                 goto again;
1222         } else if (ret == 0) {
1223                 leaf = path->nodes[0];
1224                 extent = btrfs_item_ptr(leaf, path->slots[0],
1225                                         struct btrfs_dev_extent);
1226         } else {
1227                 btrfs_error(root->fs_info, ret, "Slot search failed");
1228                 goto out;
1229         }
1230
1231         if (device->bytes_used > 0) {
1232                 u64 len = btrfs_dev_extent_length(leaf, extent);
1233                 device->bytes_used -= len;
1234                 spin_lock(&root->fs_info->free_chunk_lock);
1235                 root->fs_info->free_chunk_space += len;
1236                 spin_unlock(&root->fs_info->free_chunk_lock);
1237         }
1238         ret = btrfs_del_item(trans, root, path);
1239         if (ret) {
1240                 btrfs_error(root->fs_info, ret,
1241                             "Failed to remove dev extent item");
1242         }
1243 out:
1244         btrfs_free_path(path);
1245         return ret;
1246 }
1247
1248 static int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
1249                                   struct btrfs_device *device,
1250                                   u64 chunk_tree, u64 chunk_objectid,
1251                                   u64 chunk_offset, u64 start, u64 num_bytes)
1252 {
1253         int ret;
1254         struct btrfs_path *path;
1255         struct btrfs_root *root = device->dev_root;
1256         struct btrfs_dev_extent *extent;
1257         struct extent_buffer *leaf;
1258         struct btrfs_key key;
1259
1260         WARN_ON(!device->in_fs_metadata);
1261         WARN_ON(device->is_tgtdev_for_dev_replace);
1262         path = btrfs_alloc_path();
1263         if (!path)
1264                 return -ENOMEM;
1265
1266         key.objectid = device->devid;
1267         key.offset = start;
1268         key.type = BTRFS_DEV_EXTENT_KEY;
1269         ret = btrfs_insert_empty_item(trans, root, path, &key,
1270                                       sizeof(*extent));
1271         if (ret)
1272                 goto out;
1273
1274         leaf = path->nodes[0];
1275         extent = btrfs_item_ptr(leaf, path->slots[0],
1276                                 struct btrfs_dev_extent);
1277         btrfs_set_dev_extent_chunk_tree(leaf, extent, chunk_tree);
1278         btrfs_set_dev_extent_chunk_objectid(leaf, extent, chunk_objectid);
1279         btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
1280
1281         write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid,
1282                     (unsigned long)btrfs_dev_extent_chunk_tree_uuid(extent),
1283                     BTRFS_UUID_SIZE);
1284
1285         btrfs_set_dev_extent_length(leaf, extent, num_bytes);
1286         btrfs_mark_buffer_dirty(leaf);
1287 out:
1288         btrfs_free_path(path);
1289         return ret;
1290 }
1291
1292 static u64 find_next_chunk(struct btrfs_fs_info *fs_info)
1293 {
1294         struct extent_map_tree *em_tree;
1295         struct extent_map *em;
1296         struct rb_node *n;
1297         u64 ret = 0;
1298
1299         em_tree = &fs_info->mapping_tree.map_tree;
1300         read_lock(&em_tree->lock);
1301         n = rb_last(&em_tree->map);
1302         if (n) {
1303                 em = rb_entry(n, struct extent_map, rb_node);
1304                 ret = em->start + em->len;
1305         }
1306         read_unlock(&em_tree->lock);
1307
1308         return ret;
1309 }
1310
1311 static noinline int find_next_devid(struct btrfs_root *root, u64 *objectid)
1312 {
1313         int ret;
1314         struct btrfs_key key;
1315         struct btrfs_key found_key;
1316         struct btrfs_path *path;
1317
1318         root = root->fs_info->chunk_root;
1319
1320         path = btrfs_alloc_path();
1321         if (!path)
1322                 return -ENOMEM;
1323
1324         key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1325         key.type = BTRFS_DEV_ITEM_KEY;
1326         key.offset = (u64)-1;
1327
1328         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1329         if (ret < 0)
1330                 goto error;
1331
1332         BUG_ON(ret == 0); /* Corruption */
1333
1334         ret = btrfs_previous_item(root, path, BTRFS_DEV_ITEMS_OBJECTID,
1335                                   BTRFS_DEV_ITEM_KEY);
1336         if (ret) {
1337                 *objectid = 1;
1338         } else {
1339                 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1340                                       path->slots[0]);
1341                 *objectid = found_key.offset + 1;
1342         }
1343         ret = 0;
1344 error:
1345         btrfs_free_path(path);
1346         return ret;
1347 }
1348
1349 /*
1350  * the device information is stored in the chunk root
1351  * the btrfs_device struct should be fully filled in
1352  */
1353 static int btrfs_add_device(struct btrfs_trans_handle *trans,
1354                             struct btrfs_root *root,
1355                             struct btrfs_device *device)
1356 {
1357         int ret;
1358         struct btrfs_path *path;
1359         struct btrfs_dev_item *dev_item;
1360         struct extent_buffer *leaf;
1361         struct btrfs_key key;
1362         unsigned long ptr;
1363
1364         root = root->fs_info->chunk_root;
1365
1366         path = btrfs_alloc_path();
1367         if (!path)
1368                 return -ENOMEM;
1369
1370         key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1371         key.type = BTRFS_DEV_ITEM_KEY;
1372         key.offset = device->devid;
1373
1374         ret = btrfs_insert_empty_item(trans, root, path, &key,
1375                                       sizeof(*dev_item));
1376         if (ret)
1377                 goto out;
1378
1379         leaf = path->nodes[0];
1380         dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1381
1382         btrfs_set_device_id(leaf, dev_item, device->devid);
1383         btrfs_set_device_generation(leaf, dev_item, 0);
1384         btrfs_set_device_type(leaf, dev_item, device->type);
1385         btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1386         btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1387         btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1388         btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
1389         btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
1390         btrfs_set_device_group(leaf, dev_item, 0);
1391         btrfs_set_device_seek_speed(leaf, dev_item, 0);
1392         btrfs_set_device_bandwidth(leaf, dev_item, 0);
1393         btrfs_set_device_start_offset(leaf, dev_item, 0);
1394
1395         ptr = (unsigned long)btrfs_device_uuid(dev_item);
1396         write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
1397         ptr = (unsigned long)btrfs_device_fsid(dev_item);
1398         write_extent_buffer(leaf, root->fs_info->fsid, ptr, BTRFS_UUID_SIZE);
1399         btrfs_mark_buffer_dirty(leaf);
1400
1401         ret = 0;
1402 out:
1403         btrfs_free_path(path);
1404         return ret;
1405 }
1406
1407 static int btrfs_rm_dev_item(struct btrfs_root *root,
1408                              struct btrfs_device *device)
1409 {
1410         int ret;
1411         struct btrfs_path *path;
1412         struct btrfs_key key;
1413         struct btrfs_trans_handle *trans;
1414
1415         root = root->fs_info->chunk_root;
1416
1417         path = btrfs_alloc_path();
1418         if (!path)
1419                 return -ENOMEM;
1420
1421         trans = btrfs_start_transaction(root, 0);
1422         if (IS_ERR(trans)) {
1423                 btrfs_free_path(path);
1424                 return PTR_ERR(trans);
1425         }
1426         key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1427         key.type = BTRFS_DEV_ITEM_KEY;
1428         key.offset = device->devid;
1429         lock_chunks(root);
1430
1431         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1432         if (ret < 0)
1433                 goto out;
1434
1435         if (ret > 0) {
1436                 ret = -ENOENT;
1437                 goto out;
1438         }
1439
1440         ret = btrfs_del_item(trans, root, path);
1441         if (ret)
1442                 goto out;
1443 out:
1444         btrfs_free_path(path);
1445         unlock_chunks(root);
1446         btrfs_commit_transaction(trans, root);
1447         return ret;
1448 }
1449
1450 int btrfs_rm_device(struct btrfs_root *root, char *device_path)
1451 {
1452         struct btrfs_device *device;
1453         struct btrfs_device *next_device;
1454         struct block_device *bdev;
1455         struct buffer_head *bh = NULL;
1456         struct btrfs_super_block *disk_super;
1457         struct btrfs_fs_devices *cur_devices;
1458         u64 all_avail;
1459         u64 devid;
1460         u64 num_devices;
1461         u8 *dev_uuid;
1462         unsigned seq;
1463         int ret = 0;
1464         bool clear_super = false;
1465
1466         mutex_lock(&uuid_mutex);
1467
1468         do {
1469                 seq = read_seqbegin(&root->fs_info->profiles_lock);
1470
1471                 all_avail = root->fs_info->avail_data_alloc_bits |
1472                             root->fs_info->avail_system_alloc_bits |
1473                             root->fs_info->avail_metadata_alloc_bits;
1474         } while (read_seqretry(&root->fs_info->profiles_lock, seq));
1475
1476         num_devices = root->fs_info->fs_devices->num_devices;
1477         btrfs_dev_replace_lock(&root->fs_info->dev_replace);
1478         if (btrfs_dev_replace_is_ongoing(&root->fs_info->dev_replace)) {
1479                 WARN_ON(num_devices < 1);
1480                 num_devices--;
1481         }
1482         btrfs_dev_replace_unlock(&root->fs_info->dev_replace);
1483
1484         if ((all_avail & BTRFS_BLOCK_GROUP_RAID10) && num_devices <= 4) {
1485                 ret = BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET;
1486                 goto out;
1487         }
1488
1489         if ((all_avail & BTRFS_BLOCK_GROUP_RAID1) && num_devices <= 2) {
1490                 ret = BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET;
1491                 goto out;
1492         }
1493
1494         if ((all_avail & BTRFS_BLOCK_GROUP_RAID5) &&
1495             root->fs_info->fs_devices->rw_devices <= 2) {
1496                 ret = BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET;
1497                 goto out;
1498         }
1499         if ((all_avail & BTRFS_BLOCK_GROUP_RAID6) &&
1500             root->fs_info->fs_devices->rw_devices <= 3) {
1501                 ret = BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET;
1502                 goto out;
1503         }
1504
1505         if (strcmp(device_path, "missing") == 0) {
1506                 struct list_head *devices;
1507                 struct btrfs_device *tmp;
1508
1509                 device = NULL;
1510                 devices = &root->fs_info->fs_devices->devices;
1511                 /*
1512                  * It is safe to read the devices since the volume_mutex
1513                  * is held.
1514                  */
1515                 list_for_each_entry(tmp, devices, dev_list) {
1516                         if (tmp->in_fs_metadata &&
1517                             !tmp->is_tgtdev_for_dev_replace &&
1518                             !tmp->bdev) {
1519                                 device = tmp;
1520                                 break;
1521                         }
1522                 }
1523                 bdev = NULL;
1524                 bh = NULL;
1525                 disk_super = NULL;
1526                 if (!device) {
1527                         ret = BTRFS_ERROR_DEV_MISSING_NOT_FOUND;
1528                         goto out;
1529                 }
1530         } else {
1531                 ret = btrfs_get_bdev_and_sb(device_path,
1532                                             FMODE_WRITE | FMODE_EXCL,
1533                                             root->fs_info->bdev_holder, 0,
1534                                             &bdev, &bh);
1535                 if (ret)
1536                         goto out;
1537                 disk_super = (struct btrfs_super_block *)bh->b_data;
1538                 devid = btrfs_stack_device_id(&disk_super->dev_item);
1539                 dev_uuid = disk_super->dev_item.uuid;
1540                 device = btrfs_find_device(root->fs_info, devid, dev_uuid,
1541                                            disk_super->fsid);
1542                 if (!device) {
1543                         ret = -ENOENT;
1544                         goto error_brelse;
1545                 }
1546         }
1547
1548         if (device->is_tgtdev_for_dev_replace) {
1549                 ret = BTRFS_ERROR_DEV_TGT_REPLACE;
1550                 goto error_brelse;
1551         }
1552
1553         if (device->writeable && root->fs_info->fs_devices->rw_devices == 1) {
1554                 ret = BTRFS_ERROR_DEV_ONLY_WRITABLE;
1555                 goto error_brelse;
1556         }
1557
1558         if (device->writeable) {
1559                 lock_chunks(root);
1560                 list_del_init(&device->dev_alloc_list);
1561                 unlock_chunks(root);
1562                 root->fs_info->fs_devices->rw_devices--;
1563                 clear_super = true;
1564         }
1565
1566         mutex_unlock(&uuid_mutex);
1567         ret = btrfs_shrink_device(device, 0);
1568         mutex_lock(&uuid_mutex);
1569         if (ret)
1570                 goto error_undo;
1571
1572         /*
1573          * TODO: the superblock still includes this device in its num_devices
1574          * counter although write_all_supers() is not locked out. This
1575          * could give a filesystem state which requires a degraded mount.
1576          */
1577         ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device);
1578         if (ret)
1579                 goto error_undo;
1580
1581         spin_lock(&root->fs_info->free_chunk_lock);
1582         root->fs_info->free_chunk_space = device->total_bytes -
1583                 device->bytes_used;
1584         spin_unlock(&root->fs_info->free_chunk_lock);
1585
1586         device->in_fs_metadata = 0;
1587         btrfs_scrub_cancel_dev(root->fs_info, device);
1588
1589         /*
1590          * the device list mutex makes sure that we don't change
1591          * the device list while someone else is writing out all
1592          * the device supers.
1593          */
1594
1595         cur_devices = device->fs_devices;
1596         mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1597         list_del_rcu(&device->dev_list);
1598
1599         device->fs_devices->num_devices--;
1600         device->fs_devices->total_devices--;
1601
1602         if (device->missing)
1603                 root->fs_info->fs_devices->missing_devices--;
1604
1605         next_device = list_entry(root->fs_info->fs_devices->devices.next,
1606                                  struct btrfs_device, dev_list);
1607         if (device->bdev == root->fs_info->sb->s_bdev)
1608                 root->fs_info->sb->s_bdev = next_device->bdev;
1609         if (device->bdev == root->fs_info->fs_devices->latest_bdev)
1610                 root->fs_info->fs_devices->latest_bdev = next_device->bdev;
1611
1612         if (device->bdev)
1613                 device->fs_devices->open_devices--;
1614
1615         call_rcu(&device->rcu, free_device);
1616         mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1617
1618         num_devices = btrfs_super_num_devices(root->fs_info->super_copy) - 1;
1619         btrfs_set_super_num_devices(root->fs_info->super_copy, num_devices);
1620
1621         if (cur_devices->open_devices == 0) {
1622                 struct btrfs_fs_devices *fs_devices;
1623                 fs_devices = root->fs_info->fs_devices;
1624                 while (fs_devices) {
1625                         if (fs_devices->seed == cur_devices)
1626                                 break;
1627                         fs_devices = fs_devices->seed;
1628                 }
1629                 fs_devices->seed = cur_devices->seed;
1630                 cur_devices->seed = NULL;
1631                 lock_chunks(root);
1632                 __btrfs_close_devices(cur_devices);
1633                 unlock_chunks(root);
1634                 free_fs_devices(cur_devices);
1635         }
1636
1637         root->fs_info->num_tolerated_disk_barrier_failures =
1638                 btrfs_calc_num_tolerated_disk_barrier_failures(root->fs_info);
1639
1640         /*
1641          * at this point, the device is zero sized.  We want to
1642          * remove it from the devices list and zero out the old super
1643          */
1644         if (clear_super && disk_super) {
1645                 /* make sure this device isn't detected as part of
1646                  * the FS anymore
1647                  */
1648                 memset(&disk_super->magic, 0, sizeof(disk_super->magic));
1649                 set_buffer_dirty(bh);
1650                 sync_dirty_buffer(bh);
1651         }
1652
1653         ret = 0;
1654
1655         /* Notify udev that device has changed */
1656         if (bdev)
1657                 btrfs_kobject_uevent(bdev, KOBJ_CHANGE);
1658
1659 error_brelse:
1660         brelse(bh);
1661         if (bdev)
1662                 blkdev_put(bdev, FMODE_READ | FMODE_EXCL);
1663 out:
1664         mutex_unlock(&uuid_mutex);
1665         return ret;
1666 error_undo:
1667         if (device->writeable) {
1668                 lock_chunks(root);
1669                 list_add(&device->dev_alloc_list,
1670                          &root->fs_info->fs_devices->alloc_list);
1671                 unlock_chunks(root);
1672                 root->fs_info->fs_devices->rw_devices++;
1673         }
1674         goto error_brelse;
1675 }
1676
1677 void btrfs_rm_dev_replace_srcdev(struct btrfs_fs_info *fs_info,
1678                                  struct btrfs_device *srcdev)
1679 {
1680         WARN_ON(!mutex_is_locked(&fs_info->fs_devices->device_list_mutex));
1681         list_del_rcu(&srcdev->dev_list);
1682         list_del_rcu(&srcdev->dev_alloc_list);
1683         fs_info->fs_devices->num_devices--;
1684         if (srcdev->missing) {
1685                 fs_info->fs_devices->missing_devices--;
1686                 fs_info->fs_devices->rw_devices++;
1687         }
1688         if (srcdev->can_discard)
1689                 fs_info->fs_devices->num_can_discard--;
1690         if (srcdev->bdev)
1691                 fs_info->fs_devices->open_devices--;
1692
1693         call_rcu(&srcdev->rcu, free_device);
1694 }
1695
1696 void btrfs_destroy_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
1697                                       struct btrfs_device *tgtdev)
1698 {
1699         struct btrfs_device *next_device;
1700
1701         WARN_ON(!tgtdev);
1702         mutex_lock(&fs_info->fs_devices->device_list_mutex);
1703         if (tgtdev->bdev) {
1704                 btrfs_scratch_superblock(tgtdev);
1705                 fs_info->fs_devices->open_devices--;
1706         }
1707         fs_info->fs_devices->num_devices--;
1708         if (tgtdev->can_discard)
1709                 fs_info->fs_devices->num_can_discard++;
1710
1711         next_device = list_entry(fs_info->fs_devices->devices.next,
1712                                  struct btrfs_device, dev_list);
1713         if (tgtdev->bdev == fs_info->sb->s_bdev)
1714                 fs_info->sb->s_bdev = next_device->bdev;
1715         if (tgtdev->bdev == fs_info->fs_devices->latest_bdev)
1716                 fs_info->fs_devices->latest_bdev = next_device->bdev;
1717         list_del_rcu(&tgtdev->dev_list);
1718
1719         call_rcu(&tgtdev->rcu, free_device);
1720
1721         mutex_unlock(&fs_info->fs_devices->device_list_mutex);
1722 }
1723
1724 static int btrfs_find_device_by_path(struct btrfs_root *root, char *device_path,
1725                                      struct btrfs_device **device)
1726 {
1727         int ret = 0;
1728         struct btrfs_super_block *disk_super;
1729         u64 devid;
1730         u8 *dev_uuid;
1731         struct block_device *bdev;
1732         struct buffer_head *bh;
1733
1734         *device = NULL;
1735         ret = btrfs_get_bdev_and_sb(device_path, FMODE_READ,
1736                                     root->fs_info->bdev_holder, 0, &bdev, &bh);
1737         if (ret)
1738                 return ret;
1739         disk_super = (struct btrfs_super_block *)bh->b_data;
1740         devid = btrfs_stack_device_id(&disk_super->dev_item);
1741         dev_uuid = disk_super->dev_item.uuid;
1742         *device = btrfs_find_device(root->fs_info, devid, dev_uuid,
1743                                     disk_super->fsid);
1744         brelse(bh);
1745         if (!*device)
1746                 ret = -ENOENT;
1747         blkdev_put(bdev, FMODE_READ);
1748         return ret;
1749 }
1750
1751 int btrfs_find_device_missing_or_by_path(struct btrfs_root *root,
1752                                          char *device_path,
1753                                          struct btrfs_device **device)
1754 {
1755         *device = NULL;
1756         if (strcmp(device_path, "missing") == 0) {
1757                 struct list_head *devices;
1758                 struct btrfs_device *tmp;
1759
1760                 devices = &root->fs_info->fs_devices->devices;
1761                 /*
1762                  * It is safe to read the devices since the volume_mutex
1763                  * is held by the caller.
1764                  */
1765                 list_for_each_entry(tmp, devices, dev_list) {
1766                         if (tmp->in_fs_metadata && !tmp->bdev) {
1767                                 *device = tmp;
1768                                 break;
1769                         }
1770                 }
1771
1772                 if (!*device) {
1773                         pr_err("btrfs: no missing device found\n");
1774                         return -ENOENT;
1775                 }
1776
1777                 return 0;
1778         } else {
1779                 return btrfs_find_device_by_path(root, device_path, device);
1780         }
1781 }
1782
1783 /*
1784  * does all the dirty work required for changing file system's UUID.
1785  */
1786 static int btrfs_prepare_sprout(struct btrfs_root *root)
1787 {
1788         struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
1789         struct btrfs_fs_devices *old_devices;
1790         struct btrfs_fs_devices *seed_devices;
1791         struct btrfs_super_block *disk_super = root->fs_info->super_copy;
1792         struct btrfs_device *device;
1793         u64 super_flags;
1794
1795         BUG_ON(!mutex_is_locked(&uuid_mutex));
1796         if (!fs_devices->seeding)
1797                 return -EINVAL;
1798
1799         seed_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
1800         if (!seed_devices)
1801                 return -ENOMEM;
1802
1803         old_devices = clone_fs_devices(fs_devices);
1804         if (IS_ERR(old_devices)) {
1805                 kfree(seed_devices);
1806                 return PTR_ERR(old_devices);
1807         }
1808
1809         list_add(&old_devices->list, &fs_uuids);
1810
1811         memcpy(seed_devices, fs_devices, sizeof(*seed_devices));
1812         seed_devices->opened = 1;
1813         INIT_LIST_HEAD(&seed_devices->devices);
1814         INIT_LIST_HEAD(&seed_devices->alloc_list);
1815         mutex_init(&seed_devices->device_list_mutex);
1816
1817         mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1818         list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices,
1819                               synchronize_rcu);
1820         mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1821
1822         list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list);
1823         list_for_each_entry(device, &seed_devices->devices, dev_list) {
1824                 device->fs_devices = seed_devices;
1825         }
1826
1827         fs_devices->seeding = 0;
1828         fs_devices->num_devices = 0;
1829         fs_devices->open_devices = 0;
1830         fs_devices->total_devices = 0;
1831         fs_devices->seed = seed_devices;
1832
1833         generate_random_uuid(fs_devices->fsid);
1834         memcpy(root->fs_info->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
1835         memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
1836         super_flags = btrfs_super_flags(disk_super) &
1837                       ~BTRFS_SUPER_FLAG_SEEDING;
1838         btrfs_set_super_flags(disk_super, super_flags);
1839
1840         return 0;
1841 }
1842
1843 /*
1844  * strore the expected generation for seed devices in device items.
1845  */
1846 static int btrfs_finish_sprout(struct btrfs_trans_handle *trans,
1847                                struct btrfs_root *root)
1848 {
1849         struct btrfs_path *path;
1850         struct extent_buffer *leaf;
1851         struct btrfs_dev_item *dev_item;
1852         struct btrfs_device *device;
1853         struct btrfs_key key;
1854         u8 fs_uuid[BTRFS_UUID_SIZE];
1855         u8 dev_uuid[BTRFS_UUID_SIZE];
1856         u64 devid;
1857         int ret;
1858
1859         path = btrfs_alloc_path();
1860         if (!path)
1861                 return -ENOMEM;
1862
1863         root = root->fs_info->chunk_root;
1864         key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1865         key.offset = 0;
1866         key.type = BTRFS_DEV_ITEM_KEY;
1867
1868         while (1) {
1869                 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1870                 if (ret < 0)
1871                         goto error;
1872
1873                 leaf = path->nodes[0];
1874 next_slot:
1875                 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1876                         ret = btrfs_next_leaf(root, path);
1877                         if (ret > 0)
1878                                 break;
1879                         if (ret < 0)
1880                                 goto error;
1881                         leaf = path->nodes[0];
1882                         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1883                         btrfs_release_path(path);
1884                         continue;
1885                 }
1886
1887                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1888                 if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID ||
1889                     key.type != BTRFS_DEV_ITEM_KEY)
1890                         break;
1891
1892                 dev_item = btrfs_item_ptr(leaf, path->slots[0],
1893                                           struct btrfs_dev_item);
1894                 devid = btrfs_device_id(leaf, dev_item);
1895                 read_extent_buffer(leaf, dev_uuid,
1896                                    (unsigned long)btrfs_device_uuid(dev_item),
1897                                    BTRFS_UUID_SIZE);
1898                 read_extent_buffer(leaf, fs_uuid,
1899                                    (unsigned long)btrfs_device_fsid(dev_item),
1900                                    BTRFS_UUID_SIZE);
1901                 device = btrfs_find_device(root->fs_info, devid, dev_uuid,
1902                                            fs_uuid);
1903                 BUG_ON(!device); /* Logic error */
1904
1905                 if (device->fs_devices->seeding) {
1906                         btrfs_set_device_generation(leaf, dev_item,
1907                                                     device->generation);
1908                         btrfs_mark_buffer_dirty(leaf);
1909                 }
1910
1911                 path->slots[0]++;
1912                 goto next_slot;
1913         }
1914         ret = 0;
1915 error:
1916         btrfs_free_path(path);
1917         return ret;
1918 }
1919
1920 int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
1921 {
1922         struct request_queue *q;
1923         struct btrfs_trans_handle *trans;
1924         struct btrfs_device *device;
1925         struct block_device *bdev;
1926         struct list_head *devices;
1927         struct super_block *sb = root->fs_info->sb;
1928         struct rcu_string *name;
1929         u64 total_bytes;
1930         int seeding_dev = 0;
1931         int ret = 0;
1932
1933         if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding)
1934                 return -EROFS;
1935
1936         bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
1937                                   root->fs_info->bdev_holder);
1938         if (IS_ERR(bdev))
1939                 return PTR_ERR(bdev);
1940
1941         if (root->fs_info->fs_devices->seeding) {
1942                 seeding_dev = 1;
1943                 down_write(&sb->s_umount);
1944                 mutex_lock(&uuid_mutex);
1945         }
1946
1947         filemap_write_and_wait(bdev->bd_inode->i_mapping);
1948
1949         devices = &root->fs_info->fs_devices->devices;
1950
1951         mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1952         list_for_each_entry(device, devices, dev_list) {
1953                 if (device->bdev == bdev) {
1954                         ret = -EEXIST;
1955                         mutex_unlock(
1956                                 &root->fs_info->fs_devices->device_list_mutex);
1957                         goto error;
1958                 }
1959         }
1960         mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1961
1962         device = kzalloc(sizeof(*device), GFP_NOFS);
1963         if (!device) {
1964                 /* we can safely leave the fs_devices entry around */
1965                 ret = -ENOMEM;
1966                 goto error;
1967         }
1968
1969         name = rcu_string_strdup(device_path, GFP_NOFS);
1970         if (!name) {
1971                 kfree(device);
1972                 ret = -ENOMEM;
1973                 goto error;
1974         }
1975         rcu_assign_pointer(device->name, name);
1976
1977         ret = find_next_devid(root, &device->devid);
1978         if (ret) {
1979                 rcu_string_free(device->name);
1980                 kfree(device);
1981                 goto error;
1982         }
1983
1984         trans = btrfs_start_transaction(root, 0);
1985         if (IS_ERR(trans)) {
1986                 rcu_string_free(device->name);
1987                 kfree(device);
1988                 ret = PTR_ERR(trans);
1989                 goto error;
1990         }
1991
1992         lock_chunks(root);
1993
1994         q = bdev_get_queue(bdev);
1995         if (blk_queue_discard(q))
1996                 device->can_discard = 1;
1997         device->writeable = 1;
1998         device->work.func = pending_bios_fn;
1999         generate_random_uuid(device->uuid);
2000         spin_lock_init(&device->io_lock);
2001         device->generation = trans->transid;
2002         device->io_width = root->sectorsize;
2003         device->io_align = root->sectorsize;
2004         device->sector_size = root->sectorsize;
2005         device->total_bytes = i_size_read(bdev->bd_inode);
2006         device->disk_total_bytes = device->total_bytes;
2007         device->dev_root = root->fs_info->dev_root;
2008         device->bdev = bdev;
2009         device->in_fs_metadata = 1;
2010         device->is_tgtdev_for_dev_replace = 0;
2011         device->mode = FMODE_EXCL;
2012         set_blocksize(device->bdev, 4096);
2013
2014         if (seeding_dev) {
2015                 sb->s_flags &= ~MS_RDONLY;
2016                 ret = btrfs_prepare_sprout(root);
2017                 BUG_ON(ret); /* -ENOMEM */
2018         }
2019
2020         device->fs_devices = root->fs_info->fs_devices;
2021
2022         mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2023         list_add_rcu(&device->dev_list, &root->fs_info->fs_devices->devices);
2024         list_add(&device->dev_alloc_list,
2025                  &root->fs_info->fs_devices->alloc_list);
2026         root->fs_info->fs_devices->num_devices++;
2027         root->fs_info->fs_devices->open_devices++;
2028         root->fs_info->fs_devices->rw_devices++;
2029         root->fs_info->fs_devices->total_devices++;
2030         if (device->can_discard)
2031                 root->fs_info->fs_devices->num_can_discard++;
2032         root->fs_info->fs_devices->total_rw_bytes += device->total_bytes;
2033
2034         spin_lock(&root->fs_info->free_chunk_lock);
2035         root->fs_info->free_chunk_space += device->total_bytes;
2036         spin_unlock(&root->fs_info->free_chunk_lock);
2037
2038         if (!blk_queue_nonrot(bdev_get_queue(bdev)))
2039                 root->fs_info->fs_devices->rotating = 1;
2040
2041         total_bytes = btrfs_super_total_bytes(root->fs_info->super_copy);
2042         btrfs_set_super_total_bytes(root->fs_info->super_copy,
2043                                     total_bytes + device->total_bytes);
2044
2045         total_bytes = btrfs_super_num_devices(root->fs_info->super_copy);
2046         btrfs_set_super_num_devices(root->fs_info->super_copy,
2047                                     total_bytes + 1);
2048         mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2049
2050         if (seeding_dev) {
2051                 ret = init_first_rw_device(trans, root, device);
2052                 if (ret) {
2053                         btrfs_abort_transaction(trans, root, ret);
2054                         goto error_trans;
2055                 }
2056                 ret = btrfs_finish_sprout(trans, root);
2057                 if (ret) {
2058                         btrfs_abort_transaction(trans, root, ret);
2059                         goto error_trans;
2060                 }
2061         } else {
2062                 ret = btrfs_add_device(trans, root, device);
2063                 if (ret) {
2064                         btrfs_abort_transaction(trans, root, ret);
2065                         goto error_trans;
2066                 }
2067         }
2068
2069         /*
2070          * we've got more storage, clear any full flags on the space
2071          * infos
2072          */
2073         btrfs_clear_space_info_full(root->fs_info);
2074
2075         unlock_chunks(root);
2076         root->fs_info->num_tolerated_disk_barrier_failures =
2077                 btrfs_calc_num_tolerated_disk_barrier_failures(root->fs_info);
2078         ret = btrfs_commit_transaction(trans, root);
2079
2080         if (seeding_dev) {
2081                 mutex_unlock(&uuid_mutex);
2082                 up_write(&sb->s_umount);
2083
2084                 if (ret) /* transaction commit */
2085                         return ret;
2086
2087                 ret = btrfs_relocate_sys_chunks(root);
2088                 if (ret < 0)
2089                         btrfs_error(root->fs_info, ret,
2090                                     "Failed to relocate sys chunks after "
2091                                     "device initialization. This can be fixed "
2092                                     "using the \"btrfs balance\" command.");
2093                 trans = btrfs_attach_transaction(root);
2094                 if (IS_ERR(trans)) {
2095                         if (PTR_ERR(trans) == -ENOENT)
2096                                 return 0;
2097                         return PTR_ERR(trans);
2098                 }
2099                 ret = btrfs_commit_transaction(trans, root);
2100         }
2101
2102         return ret;
2103
2104 error_trans:
2105         unlock_chunks(root);
2106         btrfs_end_transaction(trans, root);
2107         rcu_string_free(device->name);
2108         kfree(device);
2109 error:
2110         blkdev_put(bdev, FMODE_EXCL);
2111         if (seeding_dev) {
2112                 mutex_unlock(&uuid_mutex);
2113                 up_write(&sb->s_umount);
2114         }
2115         return ret;
2116 }
2117
2118 int btrfs_init_dev_replace_tgtdev(struct btrfs_root *root, char *device_path,
2119                                   struct btrfs_device **device_out)
2120 {
2121         struct request_queue *q;
2122         struct btrfs_device *device;
2123         struct block_device *bdev;
2124         struct btrfs_fs_info *fs_info = root->fs_info;
2125         struct list_head *devices;
2126         struct rcu_string *name;
2127         int ret = 0;
2128
2129         *device_out = NULL;
2130         if (fs_info->fs_devices->seeding)
2131                 return -EINVAL;
2132
2133         bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
2134                                   fs_info->bdev_holder);
2135         if (IS_ERR(bdev))
2136                 return PTR_ERR(bdev);
2137
2138         filemap_write_and_wait(bdev->bd_inode->i_mapping);
2139
2140         devices = &fs_info->fs_devices->devices;
2141         list_for_each_entry(device, devices, dev_list) {
2142                 if (device->bdev == bdev) {
2143                         ret = -EEXIST;
2144                         goto error;
2145                 }
2146         }
2147
2148         device = kzalloc(sizeof(*device), GFP_NOFS);
2149         if (!device) {
2150                 ret = -ENOMEM;
2151                 goto error;
2152         }
2153
2154         name = rcu_string_strdup(device_path, GFP_NOFS);
2155         if (!name) {
2156                 kfree(device);
2157                 ret = -ENOMEM;
2158                 goto error;
2159         }
2160         rcu_assign_pointer(device->name, name);
2161
2162         q = bdev_get_queue(bdev);
2163         if (blk_queue_discard(q))
2164                 device->can_discard = 1;
2165         mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2166         device->writeable = 1;
2167         device->work.func = pending_bios_fn;
2168         generate_random_uuid(device->uuid);
2169         device->devid = BTRFS_DEV_REPLACE_DEVID;
2170         spin_lock_init(&device->io_lock);
2171         device->generation = 0;
2172         device->io_width = root->sectorsize;
2173         device->io_align = root->sectorsize;
2174         device->sector_size = root->sectorsize;
2175         device->total_bytes = i_size_read(bdev->bd_inode);
2176         device->disk_total_bytes = device->total_bytes;
2177         device->dev_root = fs_info->dev_root;
2178         device->bdev = bdev;
2179         device->in_fs_metadata = 1;
2180         device->is_tgtdev_for_dev_replace = 1;
2181         device->mode = FMODE_EXCL;
2182         set_blocksize(device->bdev, 4096);
2183         device->fs_devices = fs_info->fs_devices;
2184         list_add(&device->dev_list, &fs_info->fs_devices->devices);
2185         fs_info->fs_devices->num_devices++;
2186         fs_info->fs_devices->open_devices++;
2187         if (device->can_discard)
2188                 fs_info->fs_devices->num_can_discard++;
2189         mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2190
2191         *device_out = device;
2192         return ret;
2193
2194 error:
2195         blkdev_put(bdev, FMODE_EXCL);
2196         return ret;
2197 }
2198
2199 void btrfs_init_dev_replace_tgtdev_for_resume(struct btrfs_fs_info *fs_info,
2200                                               struct btrfs_device *tgtdev)
2201 {
2202         WARN_ON(fs_info->fs_devices->rw_devices == 0);
2203         tgtdev->io_width = fs_info->dev_root->sectorsize;
2204         tgtdev->io_align = fs_info->dev_root->sectorsize;
2205         tgtdev->sector_size = fs_info->dev_root->sectorsize;
2206         tgtdev->dev_root = fs_info->dev_root;
2207         tgtdev->in_fs_metadata = 1;
2208 }
2209
2210 static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
2211                                         struct btrfs_device *device)
2212 {
2213         int ret;
2214         struct btrfs_path *path;
2215         struct btrfs_root *root;
2216         struct btrfs_dev_item *dev_item;
2217         struct extent_buffer *leaf;
2218         struct btrfs_key key;
2219
2220         root = device->dev_root->fs_info->chunk_root;
2221
2222         path = btrfs_alloc_path();
2223         if (!path)
2224                 return -ENOMEM;
2225
2226         key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2227         key.type = BTRFS_DEV_ITEM_KEY;
2228         key.offset = device->devid;
2229
2230         ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2231         if (ret < 0)
2232                 goto out;
2233
2234         if (ret > 0) {
2235                 ret = -ENOENT;
2236                 goto out;
2237         }
2238
2239         leaf = path->nodes[0];
2240         dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
2241
2242         btrfs_set_device_id(leaf, dev_item, device->devid);
2243         btrfs_set_device_type(leaf, dev_item, device->type);
2244         btrfs_set_device_io_align(leaf, dev_item, device->io_align);
2245         btrfs_set_device_io_width(leaf, dev_item, device->io_width);
2246         btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
2247         btrfs_set_device_total_bytes(leaf, dev_item, device->disk_total_bytes);
2248         btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
2249         btrfs_mark_buffer_dirty(leaf);
2250
2251 out:
2252         btrfs_free_path(path);
2253         return ret;
2254 }
2255
2256 static int __btrfs_grow_device(struct btrfs_trans_handle *trans,
2257                       struct btrfs_device *device, u64 new_size)
2258 {
2259         struct btrfs_super_block *super_copy =
2260                 device->dev_root->fs_info->super_copy;
2261         u64 old_total = btrfs_super_total_bytes(super_copy);
2262         u64 diff = new_size - device->total_bytes;
2263
2264         if (!device->writeable)
2265                 return -EACCES;
2266         if (new_size <= device->total_bytes ||
2267             device->is_tgtdev_for_dev_replace)
2268                 return -EINVAL;
2269
2270         btrfs_set_super_total_bytes(super_copy, old_total + diff);
2271         device->fs_devices->total_rw_bytes += diff;
2272
2273         device->total_bytes = new_size;
2274         device->disk_total_bytes = new_size;
2275         btrfs_clear_space_info_full(device->dev_root->fs_info);
2276
2277         return btrfs_update_device(trans, device);
2278 }
2279
2280 int btrfs_grow_device(struct btrfs_trans_handle *trans,
2281                       struct btrfs_device *device, u64 new_size)
2282 {
2283         int ret;
2284         lock_chunks(device->dev_root);
2285         ret = __btrfs_grow_device(trans, device, new_size);
2286         unlock_chunks(device->dev_root);
2287         return ret;
2288 }
2289
2290 static int btrfs_free_chunk(struct btrfs_trans_handle *trans,
2291                             struct btrfs_root *root,
2292                             u64 chunk_tree, u64 chunk_objectid,
2293                             u64 chunk_offset)
2294 {
2295         int ret;
2296         struct btrfs_path *path;
2297         struct btrfs_key key;
2298
2299         root = root->fs_info->chunk_root;
2300         path = btrfs_alloc_path();
2301         if (!path)
2302                 return -ENOMEM;
2303
2304         key.objectid = chunk_objectid;
2305         key.offset = chunk_offset;
2306         key.type = BTRFS_CHUNK_ITEM_KEY;
2307
2308         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2309         if (ret < 0)
2310                 goto out;
2311         else if (ret > 0) { /* Logic error or corruption */
2312                 btrfs_error(root->fs_info, -ENOENT,
2313                             "Failed lookup while freeing chunk.");
2314                 ret = -ENOENT;
2315                 goto out;
2316         }
2317
2318         ret = btrfs_del_item(trans, root, path);
2319         if (ret < 0)
2320                 btrfs_error(root->fs_info, ret,
2321                             "Failed to delete chunk item.");
2322 out:
2323         btrfs_free_path(path);
2324         return ret;
2325 }
2326
2327 static int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64
2328                         chunk_offset)
2329 {
2330         struct btrfs_super_block *super_copy = root->fs_info->super_copy;
2331         struct btrfs_disk_key *disk_key;
2332         struct btrfs_chunk *chunk;
2333         u8 *ptr;
2334         int ret = 0;
2335         u32 num_stripes;
2336         u32 array_size;
2337         u32 len = 0;
2338         u32 cur;
2339         struct btrfs_key key;
2340
2341         array_size = btrfs_super_sys_array_size(super_copy);
2342
2343         ptr = super_copy->sys_chunk_array;
2344         cur = 0;
2345
2346         while (cur < array_size) {
2347                 disk_key = (struct btrfs_disk_key *)ptr;
2348                 btrfs_disk_key_to_cpu(&key, disk_key);
2349
2350                 len = sizeof(*disk_key);
2351
2352                 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
2353                         chunk = (struct btrfs_chunk *)(ptr + len);
2354                         num_stripes = btrfs_stack_chunk_num_stripes(chunk);
2355                         len += btrfs_chunk_item_size(num_stripes);
2356                 } else {
2357                         ret = -EIO;
2358                         break;
2359                 }
2360                 if (key.objectid == chunk_objectid &&
2361                     key.offset == chunk_offset) {
2362                         memmove(ptr, ptr + len, array_size - (cur + len));
2363                         array_size -= len;
2364                         btrfs_set_super_sys_array_size(super_copy, array_size);
2365                 } else {
2366                         ptr += len;
2367                         cur += len;
2368                 }
2369         }
2370         return ret;
2371 }
2372
2373 static int btrfs_relocate_chunk(struct btrfs_root *root,
2374                          u64 chunk_tree, u64 chunk_objectid,
2375                          u64 chunk_offset)
2376 {
2377         struct extent_map_tree *em_tree;
2378         struct btrfs_root *extent_root;
2379         struct btrfs_trans_handle *trans;
2380         struct extent_map *em;
2381         struct map_lookup *map;
2382         int ret;
2383         int i;
2384
2385         root = root->fs_info->chunk_root;
2386         extent_root = root->fs_info->extent_root;
2387         em_tree = &root->fs_info->mapping_tree.map_tree;
2388
2389         ret = btrfs_can_relocate(extent_root, chunk_offset);
2390         if (ret)
2391                 return -ENOSPC;
2392
2393         /* step one, relocate all the extents inside this chunk */
2394         ret = btrfs_relocate_block_group(extent_root, chunk_offset);
2395         if (ret)
2396                 return ret;
2397
2398         trans = btrfs_start_transaction(root, 0);
2399         if (IS_ERR(trans)) {
2400                 ret = PTR_ERR(trans);
2401                 btrfs_std_error(root->fs_info, ret);
2402                 return ret;
2403         }
2404
2405         lock_chunks(root);
2406
2407         /*
2408          * step two, delete the device extents and the
2409          * chunk tree entries
2410          */
2411         read_lock(&em_tree->lock);
2412         em = lookup_extent_mapping(em_tree, chunk_offset, 1);
2413         read_unlock(&em_tree->lock);
2414
2415         BUG_ON(!em || em->start > chunk_offset ||
2416                em->start + em->len < chunk_offset);
2417         map = (struct map_lookup *)em->bdev;
2418
2419         for (i = 0; i < map->num_stripes; i++) {
2420                 ret = btrfs_free_dev_extent(trans, map->stripes[i].dev,
2421                                             map->stripes[i].physical);
2422                 BUG_ON(ret);
2423
2424                 if (map->stripes[i].dev) {
2425                         ret = btrfs_update_device(trans, map->stripes[i].dev);
2426                         BUG_ON(ret);
2427                 }
2428         }
2429         ret = btrfs_free_chunk(trans, root, chunk_tree, chunk_objectid,
2430                                chunk_offset);
2431
2432         BUG_ON(ret);
2433
2434         trace_btrfs_chunk_free(root, map, chunk_offset, em->len);
2435
2436         if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
2437                 ret = btrfs_del_sys_chunk(root, chunk_objectid, chunk_offset);
2438                 BUG_ON(ret);
2439         }
2440
2441         ret = btrfs_remove_block_group(trans, extent_root, chunk_offset);
2442         BUG_ON(ret);
2443
2444         write_lock(&em_tree->lock);
2445         remove_extent_mapping(em_tree, em);
2446         write_unlock(&em_tree->lock);
2447
2448         kfree(map);
2449         em->bdev = NULL;
2450
2451         /* once for the tree */
2452         free_extent_map(em);
2453         /* once for us */
2454         free_extent_map(em);
2455
2456         unlock_chunks(root);
2457         btrfs_end_transaction(trans, root);
2458         return 0;
2459 }
2460
2461 static int btrfs_relocate_sys_chunks(struct btrfs_root *root)
2462 {
2463         struct btrfs_root *chunk_root = root->fs_info->chunk_root;
2464         struct btrfs_path *path;
2465         struct extent_buffer *leaf;
2466         struct btrfs_chunk *chunk;
2467         struct btrfs_key key;
2468         struct btrfs_key found_key;
2469         u64 chunk_tree = chunk_root->root_key.objectid;
2470         u64 chunk_type;
2471         bool retried = false;
2472         int failed = 0;
2473         int ret;
2474
2475         path = btrfs_alloc_path();
2476         if (!path)
2477                 return -ENOMEM;
2478
2479 again:
2480         key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2481         key.offset = (u64)-1;
2482         key.type = BTRFS_CHUNK_ITEM_KEY;
2483
2484         while (1) {
2485                 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
2486                 if (ret < 0)
2487                         goto error;
2488                 BUG_ON(ret == 0); /* Corruption */
2489
2490                 ret = btrfs_previous_item(chunk_root, path, key.objectid,
2491                                           key.type);
2492                 if (ret < 0)
2493                         goto error;
2494                 if (ret > 0)
2495                         break;
2496
2497                 leaf = path->nodes[0];
2498                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2499
2500                 chunk = btrfs_item_ptr(leaf, path->slots[0],
2501                                        struct btrfs_chunk);
2502                 chunk_type = btrfs_chunk_type(leaf, chunk);
2503                 btrfs_release_path(path);
2504
2505                 if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
2506                         ret = btrfs_relocate_chunk(chunk_root, chunk_tree,
2507                                                    found_key.objectid,
2508                                                    found_key.offset);
2509                         if (ret == -ENOSPC)
2510                                 failed++;
2511                         else if (ret)
2512                                 BUG();
2513                 }
2514
2515                 if (found_key.offset == 0)
2516                         break;
2517                 key.offset = found_key.offset - 1;
2518         }
2519         ret = 0;
2520         if (failed && !retried) {
2521                 failed = 0;
2522                 retried = true;
2523                 goto again;
2524         } else if (failed && retried) {
2525                 WARN_ON(1);
2526                 ret = -ENOSPC;
2527         }
2528 error:
2529         btrfs_free_path(path);
2530         return ret;
2531 }
2532
2533 static int insert_balance_item(struct btrfs_root *root,
2534                                struct btrfs_balance_control *bctl)
2535 {
2536         struct btrfs_trans_handle *trans;
2537         struct btrfs_balance_item *item;
2538         struct btrfs_disk_balance_args disk_bargs;
2539         struct btrfs_path *path;
2540         struct extent_buffer *leaf;
2541         struct btrfs_key key;
2542         int ret, err;
2543
2544         path = btrfs_alloc_path();
2545         if (!path)
2546                 return -ENOMEM;
2547
2548         trans = btrfs_start_transaction(root, 0);
2549         if (IS_ERR(trans)) {
2550                 btrfs_free_path(path);
2551                 return PTR_ERR(trans);
2552         }
2553
2554         key.objectid = BTRFS_BALANCE_OBJECTID;
2555         key.type = BTRFS_BALANCE_ITEM_KEY;
2556         key.offset = 0;
2557
2558         ret = btrfs_insert_empty_item(trans, root, path, &key,
2559                                       sizeof(*item));
2560         if (ret)
2561                 goto out;
2562
2563         leaf = path->nodes[0];
2564         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
2565
2566         memset_extent_buffer(leaf, 0, (unsigned long)item, sizeof(*item));
2567
2568         btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data);
2569         btrfs_set_balance_data(leaf, item, &disk_bargs);
2570         btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta);
2571         btrfs_set_balance_meta(leaf, item, &disk_bargs);
2572         btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys);
2573         btrfs_set_balance_sys(leaf, item, &disk_bargs);
2574
2575         btrfs_set_balance_flags(leaf, item, bctl->flags);
2576
2577         btrfs_mark_buffer_dirty(leaf);
2578 out:
2579         btrfs_free_path(path);
2580         err = btrfs_commit_transaction(trans, root);
2581         if (err && !ret)
2582                 ret = err;
2583         return ret;
2584 }
2585
2586 static int del_balance_item(struct btrfs_root *root)
2587 {
2588         struct btrfs_trans_handle *trans;
2589         struct btrfs_path *path;
2590         struct btrfs_key key;
2591         int ret, err;
2592
2593         path = btrfs_alloc_path();
2594         if (!path)
2595                 return -ENOMEM;
2596
2597         trans = btrfs_start_transaction(root, 0);
2598         if (IS_ERR(trans)) {
2599                 btrfs_free_path(path);
2600                 return PTR_ERR(trans);
2601         }
2602
2603         key.objectid = BTRFS_BALANCE_OBJECTID;
2604         key.type = BTRFS_BALANCE_ITEM_KEY;
2605         key.offset = 0;
2606
2607         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2608         if (ret < 0)
2609                 goto out;
2610         if (ret > 0) {
2611                 ret = -ENOENT;
2612                 goto out;
2613         }
2614
2615         ret = btrfs_del_item(trans, root, path);
2616 out:
2617         btrfs_free_path(path);
2618         err = btrfs_commit_transaction(trans, root);
2619         if (err && !ret)
2620                 ret = err;
2621         return ret;
2622 }
2623
2624 /*
2625  * This is a heuristic used to reduce the number of chunks balanced on
2626  * resume after balance was interrupted.
2627  */
2628 static void update_balance_args(struct btrfs_balance_control *bctl)
2629 {
2630         /*
2631          * Turn on soft mode for chunk types that were being converted.
2632          */
2633         if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)
2634                 bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT;
2635         if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)
2636                 bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT;
2637         if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)
2638                 bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT;
2639
2640         /*
2641          * Turn on usage filter if is not already used.  The idea is
2642          * that chunks that we have already balanced should be
2643          * reasonably full.  Don't do it for chunks that are being
2644          * converted - that will keep us from relocating unconverted
2645          * (albeit full) chunks.
2646          */
2647         if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) &&
2648             !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
2649                 bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE;
2650                 bctl->data.usage = 90;
2651         }
2652         if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) &&
2653             !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
2654                 bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE;
2655                 bctl->sys.usage = 90;
2656         }
2657         if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) &&
2658             !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
2659                 bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE;
2660                 bctl->meta.usage = 90;
2661         }
2662 }
2663
2664 /*
2665  * Should be called with both balance and volume mutexes held to
2666  * serialize other volume operations (add_dev/rm_dev/resize) with
2667  * restriper.  Same goes for unset_balance_control.
2668  */
2669 static void set_balance_control(struct btrfs_balance_control *bctl)
2670 {
2671         struct btrfs_fs_info *fs_info = bctl->fs_info;
2672
2673         BUG_ON(fs_info->balance_ctl);
2674
2675         spin_lock(&fs_info->balance_lock);
2676         fs_info->balance_ctl = bctl;
2677         spin_unlock(&fs_info->balance_lock);
2678 }
2679
2680 static void unset_balance_control(struct btrfs_fs_info *fs_info)
2681 {
2682         struct btrfs_balance_control *bctl = fs_info->balance_ctl;
2683
2684         BUG_ON(!fs_info->balance_ctl);
2685
2686         spin_lock(&fs_info->balance_lock);
2687         fs_info->balance_ctl = NULL;
2688         spin_unlock(&fs_info->balance_lock);
2689
2690         kfree(bctl);
2691 }
2692
2693 /*
2694  * Balance filters.  Return 1 if chunk should be filtered out
2695  * (should not be balanced).
2696  */
2697 static int chunk_profiles_filter(u64 chunk_type,
2698                                  struct btrfs_balance_args *bargs)
2699 {
2700         chunk_type = chunk_to_extended(chunk_type) &
2701                                 BTRFS_EXTENDED_PROFILE_MASK;
2702
2703         if (bargs->profiles & chunk_type)
2704                 return 0;
2705
2706         return 1;
2707 }
2708
2709 static int chunk_usage_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
2710                               struct btrfs_balance_args *bargs)
2711 {
2712         struct btrfs_block_group_cache *cache;
2713         u64 chunk_used, user_thresh;
2714         int ret = 1;
2715
2716         cache = btrfs_lookup_block_group(fs_info, chunk_offset);
2717         chunk_used = btrfs_block_group_used(&cache->item);
2718
2719         if (bargs->usage == 0)
2720                 user_thresh = 1;
2721         else if (bargs->usage > 100)
2722                 user_thresh = cache->key.offset;
2723         else
2724                 user_thresh = div_factor_fine(cache->key.offset,
2725                                               bargs->usage);
2726
2727         if (chunk_used < user_thresh)
2728                 ret = 0;
2729
2730         btrfs_put_block_group(cache);
2731         return ret;
2732 }
2733
2734 static int chunk_devid_filter(struct extent_buffer *leaf,
2735                               struct btrfs_chunk *chunk,
2736                               struct btrfs_balance_args *bargs)
2737 {
2738         struct btrfs_stripe *stripe;
2739         int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
2740         int i;
2741
2742         for (i = 0; i < num_stripes; i++) {
2743                 stripe = btrfs_stripe_nr(chunk, i);
2744                 if (btrfs_stripe_devid(leaf, stripe) == bargs->devid)
2745                         return 0;
2746         }
2747
2748         return 1;
2749 }
2750
2751 /* [pstart, pend) */
2752 static int chunk_drange_filter(struct extent_buffer *leaf,
2753                                struct btrfs_chunk *chunk,
2754                                u64 chunk_offset,
2755                                struct btrfs_balance_args *bargs)
2756 {
2757         struct btrfs_stripe *stripe;
2758         int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
2759         u64 stripe_offset;
2760         u64 stripe_length;
2761         int factor;
2762         int i;
2763
2764         if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID))
2765                 return 0;
2766
2767         if (btrfs_chunk_type(leaf, chunk) & (BTRFS_BLOCK_GROUP_DUP |
2768              BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10)) {
2769                 factor = num_stripes / 2;
2770         } else if (btrfs_chunk_type(leaf, chunk) & BTRFS_BLOCK_GROUP_RAID5) {
2771                 factor = num_stripes - 1;
2772         } else if (btrfs_chunk_type(leaf, chunk) & BTRFS_BLOCK_GROUP_RAID6) {
2773                 factor = num_stripes - 2;
2774         } else {
2775                 factor = num_stripes;
2776         }
2777
2778         for (i = 0; i < num_stripes; i++) {
2779                 stripe = btrfs_stripe_nr(chunk, i);
2780                 if (btrfs_stripe_devid(leaf, stripe) != bargs->devid)
2781                         continue;
2782
2783                 stripe_offset = btrfs_stripe_offset(leaf, stripe);
2784                 stripe_length = btrfs_chunk_length(leaf, chunk);
2785                 do_div(stripe_length, factor);
2786
2787                 if (stripe_offset < bargs->pend &&
2788                     stripe_offset + stripe_length > bargs->pstart)
2789                         return 0;
2790         }
2791
2792         return 1;
2793 }
2794
2795 /* [vstart, vend) */
2796 static int chunk_vrange_filter(struct extent_buffer *leaf,
2797                                struct btrfs_chunk *chunk,
2798                                u64 chunk_offset,
2799                                struct btrfs_balance_args *bargs)
2800 {
2801         if (chunk_offset < bargs->vend &&
2802             chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart)
2803                 /* at least part of the chunk is inside this vrange */
2804                 return 0;
2805
2806         return 1;
2807 }
2808
2809 static int chunk_soft_convert_filter(u64 chunk_type,
2810                                      struct btrfs_balance_args *bargs)
2811 {
2812         if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
2813                 return 0;
2814
2815         chunk_type = chunk_to_extended(chunk_type) &
2816                                 BTRFS_EXTENDED_PROFILE_MASK;
2817
2818         if (bargs->target == chunk_type)
2819                 return 1;
2820
2821         return 0;
2822 }
2823
2824 static int should_balance_chunk(struct btrfs_root *root,
2825                                 struct extent_buffer *leaf,
2826                                 struct btrfs_chunk *chunk, u64 chunk_offset)
2827 {
2828         struct btrfs_balance_control *bctl = root->fs_info->balance_ctl;
2829         struct btrfs_balance_args *bargs = NULL;
2830         u64 chunk_type = btrfs_chunk_type(leaf, chunk);
2831
2832         /* type filter */
2833         if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) &
2834               (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) {
2835                 return 0;
2836         }
2837
2838         if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
2839                 bargs = &bctl->data;
2840         else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
2841                 bargs = &bctl->sys;
2842         else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
2843                 bargs = &bctl->meta;
2844
2845         /* profiles filter */
2846         if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) &&
2847             chunk_profiles_filter(chunk_type, bargs)) {
2848                 return 0;
2849         }
2850
2851         /* usage filter */
2852         if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) &&
2853             chunk_usage_filter(bctl->fs_info, chunk_offset, bargs)) {
2854                 return 0;
2855         }
2856
2857         /* devid filter */
2858         if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) &&
2859             chunk_devid_filter(leaf, chunk, bargs)) {
2860                 return 0;
2861         }
2862
2863         /* drange filter, makes sense only with devid filter */
2864         if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) &&
2865             chunk_drange_filter(leaf, chunk, chunk_offset, bargs)) {
2866                 return 0;
2867         }
2868
2869         /* vrange filter */
2870         if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) &&
2871             chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) {
2872                 return 0;
2873         }
2874
2875         /* soft profile changing mode */
2876         if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) &&
2877             chunk_soft_convert_filter(chunk_type, bargs)) {
2878                 return 0;
2879         }
2880
2881         return 1;
2882 }
2883
2884 static int __btrfs_balance(struct btrfs_fs_info *fs_info)
2885 {
2886         struct btrfs_balance_control *bctl = fs_info->balance_ctl;
2887         struct btrfs_root *chunk_root = fs_info->chunk_root;
2888         struct btrfs_root *dev_root = fs_info->dev_root;
2889         struct list_head *devices;
2890         struct btrfs_device *device;
2891         u64 old_size;
2892         u64 size_to_free;
2893         struct btrfs_chunk *chunk;
2894         struct btrfs_path *path;
2895         struct btrfs_key key;
2896         struct btrfs_key found_key;
2897         struct btrfs_trans_handle *trans;
2898         struct extent_buffer *leaf;
2899         int slot;
2900         int ret;
2901         int enospc_errors = 0;
2902         bool counting = true;
2903
2904         /* step one make some room on all the devices */
2905         devices = &fs_info->fs_devices->devices;
2906         list_for_each_entry(device, devices, dev_list) {
2907                 old_size = device->total_bytes;
2908                 size_to_free = div_factor(old_size, 1);
2909                 size_to_free = min(size_to_free, (u64)1 * 1024 * 1024);
2910                 if (!device->writeable ||
2911                     device->total_bytes - device->bytes_used > size_to_free ||
2912                     device->is_tgtdev_for_dev_replace)
2913                         continue;
2914
2915                 ret = btrfs_shrink_device(device, old_size - size_to_free);
2916                 if (ret == -ENOSPC)
2917                         break;
2918                 BUG_ON(ret);
2919
2920                 trans = btrfs_start_transaction(dev_root, 0);
2921                 BUG_ON(IS_ERR(trans));
2922
2923                 ret = btrfs_grow_device(trans, device, old_size);
2924                 BUG_ON(ret);
2925
2926                 btrfs_end_transaction(trans, dev_root);
2927         }
2928
2929         /* step two, relocate all the chunks */
2930         path = btrfs_alloc_path();
2931         if (!path) {
2932                 ret = -ENOMEM;
2933                 goto error;
2934         }
2935
2936         /* zero out stat counters */
2937         spin_lock(&fs_info->balance_lock);
2938         memset(&bctl->stat, 0, sizeof(bctl->stat));
2939         spin_unlock(&fs_info->balance_lock);
2940 again:
2941         key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2942         key.offset = (u64)-1;
2943         key.type = BTRFS_CHUNK_ITEM_KEY;
2944
2945         while (1) {
2946                 if ((!counting && atomic_read(&fs_info->balance_pause_req)) ||
2947                     atomic_read(&fs_info->balance_cancel_req)) {
2948                         ret = -ECANCELED;
2949                         goto error;
2950                 }
2951
2952                 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
2953                 if (ret < 0)
2954                         goto error;
2955
2956                 /*
2957                  * this shouldn't happen, it means the last relocate
2958                  * failed
2959                  */
2960                 if (ret == 0)
2961                         BUG(); /* FIXME break ? */
2962
2963                 ret = btrfs_previous_item(chunk_root, path, 0,
2964                                           BTRFS_CHUNK_ITEM_KEY);
2965                 if (ret) {
2966                         ret = 0;
2967                         break;
2968                 }
2969
2970                 leaf = path->nodes[0];
2971                 slot = path->slots[0];
2972                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
2973
2974                 if (found_key.objectid != key.objectid)
2975                         break;
2976
2977                 /* chunk zero is special */
2978                 if (found_key.offset == 0)
2979                         break;
2980
2981                 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
2982
2983                 if (!counting) {
2984                         spin_lock(&fs_info->balance_lock);
2985                         bctl->stat.considered++;
2986                         spin_unlock(&fs_info->balance_lock);
2987                 }
2988
2989                 ret = should_balance_chunk(chunk_root, leaf, chunk,
2990                                            found_key.offset);
2991                 btrfs_release_path(path);
2992                 if (!ret)
2993                         goto loop;
2994
2995                 if (counting) {
2996                         spin_lock(&fs_info->balance_lock);
2997                         bctl->stat.expected++;
2998                         spin_unlock(&fs_info->balance_lock);
2999                         goto loop;
3000                 }
3001
3002                 ret = btrfs_relocate_chunk(chunk_root,
3003                                            chunk_root->root_key.objectid,
3004                                            found_key.objectid,
3005                                            found_key.offset);
3006                 if (ret && ret != -ENOSPC)
3007                         goto error;
3008                 if (ret == -ENOSPC) {
3009                         enospc_errors++;
3010                 } else {
3011                         spin_lock(&fs_info->balance_lock);
3012                         bctl->stat.completed++;
3013                         spin_unlock(&fs_info->balance_lock);
3014                 }
3015 loop:
3016                 key.offset = found_key.offset - 1;
3017         }
3018
3019         if (counting) {
3020                 btrfs_release_path(path);
3021                 counting = false;
3022                 goto again;
3023         }
3024 error:
3025         btrfs_free_path(path);
3026         if (enospc_errors) {
3027                 printk(KERN_INFO "btrfs: %d enospc errors during balance\n",
3028                        enospc_errors);
3029                 if (!ret)
3030                         ret = -ENOSPC;
3031         }
3032
3033         return ret;
3034 }
3035
3036 /**
3037  * alloc_profile_is_valid - see if a given profile is valid and reduced
3038  * @flags: profile to validate
3039  * @extended: if true @flags is treated as an extended profile
3040  */
3041 static int alloc_profile_is_valid(u64 flags, int extended)
3042 {
3043         u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK :
3044                                BTRFS_BLOCK_GROUP_PROFILE_MASK);
3045
3046         flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK;
3047
3048         /* 1) check that all other bits are zeroed */
3049         if (flags & ~mask)
3050                 return 0;
3051
3052         /* 2) see if profile is reduced */
3053         if (flags == 0)
3054                 return !extended; /* "0" is valid for usual profiles */
3055
3056         /* true if exactly one bit set */
3057         return (flags & (flags - 1)) == 0;
3058 }
3059
3060 static inline int balance_need_close(struct btrfs_fs_info *fs_info)
3061 {
3062         /* cancel requested || normal exit path */
3063         return atomic_read(&fs_info->balance_cancel_req) ||
3064                 (atomic_read(&fs_info->balance_pause_req) == 0 &&
3065                  atomic_read(&fs_info->balance_cancel_req) == 0);
3066 }
3067
3068 static void __cancel_balance(struct btrfs_fs_info *fs_info)
3069 {
3070         int ret;
3071
3072         unset_balance_control(fs_info);
3073         ret = del_balance_item(fs_info->tree_root);
3074         if (ret)
3075                 btrfs_std_error(fs_info, ret);
3076
3077         atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
3078 }
3079
3080 /*
3081  * Should be called with both balance and volume mutexes held
3082  */
3083 int btrfs_balance(struct btrfs_balance_control *bctl,
3084                   struct btrfs_ioctl_balance_args *bargs)
3085 {
3086         struct btrfs_fs_info *fs_info = bctl->fs_info;
3087         u64 allowed;
3088         int mixed = 0;
3089         int ret;
3090         u64 num_devices;
3091         unsigned seq;
3092
3093         if (btrfs_fs_closing(fs_info) ||
3094             atomic_read(&fs_info->balance_pause_req) ||
3095             atomic_read(&fs_info->balance_cancel_req)) {
3096                 ret = -EINVAL;
3097                 goto out;
3098         }
3099
3100         allowed = btrfs_super_incompat_flags(fs_info->super_copy);
3101         if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
3102                 mixed = 1;
3103
3104         /*
3105          * In case of mixed groups both data and meta should be picked,
3106          * and identical options should be given for both of them.
3107          */
3108         allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA;
3109         if (mixed && (bctl->flags & allowed)) {
3110                 if (!(bctl->flags & BTRFS_BALANCE_DATA) ||
3111                     !(bctl->flags & BTRFS_BALANCE_METADATA) ||
3112                     memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) {
3113                         printk(KERN_ERR "btrfs: with mixed groups data and "
3114                                "metadata balance options must be the same\n");
3115                         ret = -EINVAL;
3116                         goto out;
3117                 }
3118         }
3119
3120         num_devices = fs_info->fs_devices->num_devices;
3121         btrfs_dev_replace_lock(&fs_info->dev_replace);
3122         if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) {
3123                 BUG_ON(num_devices < 1);
3124                 num_devices--;
3125         }
3126         btrfs_dev_replace_unlock(&fs_info->dev_replace);
3127         allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
3128         if (num_devices == 1)
3129                 allowed |= BTRFS_BLOCK_GROUP_DUP;
3130         else if (num_devices > 1)
3131                 allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1);
3132         if (num_devices > 2)
3133                 allowed |= BTRFS_BLOCK_GROUP_RAID5;
3134         if (num_devices > 3)
3135                 allowed |= (BTRFS_BLOCK_GROUP_RAID10 |
3136                             BTRFS_BLOCK_GROUP_RAID6);
3137         if ((bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3138             (!alloc_profile_is_valid(bctl->data.target, 1) ||
3139              (bctl->data.target & ~allowed))) {
3140                 printk(KERN_ERR "btrfs: unable to start balance with target "
3141                        "data profile %llu\n",
3142                        (unsigned long long)bctl->data.target);
3143                 ret = -EINVAL;
3144                 goto out;
3145         }
3146         if ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3147             (!alloc_profile_is_valid(bctl->meta.target, 1) ||
3148              (bctl->meta.target & ~allowed))) {
3149                 printk(KERN_ERR "btrfs: unable to start balance with target "
3150                        "metadata profile %llu\n",
3151                        (unsigned long long)bctl->meta.target);
3152                 ret = -EINVAL;
3153                 goto out;
3154         }
3155         if ((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3156             (!alloc_profile_is_valid(bctl->sys.target, 1) ||
3157              (bctl->sys.target & ~allowed))) {
3158                 printk(KERN_ERR "btrfs: unable to start balance with target "
3159                        "system profile %llu\n",
3160                        (unsigned long long)bctl->sys.target);
3161                 ret = -EINVAL;
3162                 goto out;
3163         }
3164
3165         /* allow dup'ed data chunks only in mixed mode */
3166         if (!mixed && (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3167             (bctl->data.target & BTRFS_BLOCK_GROUP_DUP)) {
3168                 printk(KERN_ERR "btrfs: dup for data is not allowed\n");
3169                 ret = -EINVAL;
3170                 goto out;
3171         }
3172
3173         /* allow to reduce meta or sys integrity only if force set */
3174         allowed = BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
3175                         BTRFS_BLOCK_GROUP_RAID10 |
3176                         BTRFS_BLOCK_GROUP_RAID5 |
3177                         BTRFS_BLOCK_GROUP_RAID6;
3178         do {
3179                 seq = read_seqbegin(&fs_info->profiles_lock);
3180
3181                 if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3182                      (fs_info->avail_system_alloc_bits & allowed) &&
3183                      !(bctl->sys.target & allowed)) ||
3184                     ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3185                      (fs_info->avail_metadata_alloc_bits & allowed) &&
3186                      !(bctl->meta.target & allowed))) {
3187                         if (bctl->flags & BTRFS_BALANCE_FORCE) {
3188                                 printk(KERN_INFO "btrfs: force reducing metadata "
3189                                        "integrity\n");
3190                         } else {
3191                                 printk(KERN_ERR "btrfs: balance will reduce metadata "
3192                                        "integrity, use force if you want this\n");
3193                                 ret = -EINVAL;
3194                                 goto out;
3195                         }
3196                 }
3197         } while (read_seqretry(&fs_info->profiles_lock, seq));
3198
3199         if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3200                 int num_tolerated_disk_barrier_failures;
3201                 u64 target = bctl->sys.target;
3202
3203                 num_tolerated_disk_barrier_failures =
3204                         btrfs_calc_num_tolerated_disk_barrier_failures(fs_info);
3205                 if (num_tolerated_disk_barrier_failures > 0 &&
3206                     (target &
3207                      (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID0 |
3208                       BTRFS_AVAIL_ALLOC_BIT_SINGLE)))
3209                         num_tolerated_disk_barrier_failures = 0;
3210                 else if (num_tolerated_disk_barrier_failures > 1 &&
3211                          (target &
3212                           (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10)))
3213                         num_tolerated_disk_barrier_failures = 1;
3214
3215                 fs_info->num_tolerated_disk_barrier_failures =
3216                         num_tolerated_disk_barrier_failures;
3217         }
3218
3219         ret = insert_balance_item(fs_info->tree_root, bctl);
3220         if (ret && ret != -EEXIST)
3221                 goto out;
3222
3223         if (!(bctl->flags & BTRFS_BALANCE_RESUME)) {
3224                 BUG_ON(ret == -EEXIST);
3225                 set_balance_control(bctl);
3226         } else {
3227                 BUG_ON(ret != -EEXIST);
3228                 spin_lock(&fs_info->balance_lock);
3229                 update_balance_args(bctl);
3230                 spin_unlock(&fs_info->balance_lock);
3231         }
3232
3233         atomic_inc(&fs_info->balance_running);
3234         mutex_unlock(&fs_info->balance_mutex);
3235
3236         ret = __btrfs_balance(fs_info);
3237
3238         mutex_lock(&fs_info->balance_mutex);
3239         atomic_dec(&fs_info->balance_running);
3240
3241         if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3242                 fs_info->num_tolerated_disk_barrier_failures =
3243                         btrfs_calc_num_tolerated_disk_barrier_failures(fs_info);
3244         }
3245
3246         if (bargs) {
3247                 memset(bargs, 0, sizeof(*bargs));
3248                 update_ioctl_balance_args(fs_info, 0, bargs);
3249         }
3250
3251         if ((ret && ret != -ECANCELED && ret != -ENOSPC) ||
3252             balance_need_close(fs_info)) {
3253                 __cancel_balance(fs_info);
3254         }
3255
3256         wake_up(&fs_info->balance_wait_q);
3257
3258         return ret;
3259 out:
3260         if (bctl->flags & BTRFS_BALANCE_RESUME)
3261                 __cancel_balance(fs_info);
3262         else {
3263                 kfree(bctl);
3264                 atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
3265         }
3266         return ret;
3267 }
3268
3269 static int balance_kthread(void *data)
3270 {
3271         struct btrfs_fs_info *fs_info = data;
3272         int ret = 0;
3273
3274         mutex_lock(&fs_info->volume_mutex);
3275         mutex_lock(&fs_info->balance_mutex);
3276
3277         if (fs_info->balance_ctl) {
3278                 printk(KERN_INFO "btrfs: continuing balance\n");
3279                 ret = btrfs_balance(fs_info->balance_ctl, NULL);
3280         }
3281
3282         mutex_unlock(&fs_info->balance_mutex);
3283         mutex_unlock(&fs_info->volume_mutex);
3284
3285         return ret;
3286 }
3287
3288 int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info)
3289 {
3290         struct task_struct *tsk;
3291
3292         spin_lock(&fs_info->balance_lock);
3293         if (!fs_info->balance_ctl) {
3294                 spin_unlock(&fs_info->balance_lock);
3295                 return 0;
3296         }
3297         spin_unlock(&fs_info->balance_lock);
3298
3299         if (btrfs_test_opt(fs_info->tree_root, SKIP_BALANCE)) {
3300                 printk(KERN_INFO "btrfs: force skipping balance\n");
3301                 return 0;
3302         }
3303
3304         tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance");
3305         return PTR_RET(tsk);
3306 }
3307
3308 int btrfs_recover_balance(struct btrfs_fs_info *fs_info)
3309 {
3310         struct btrfs_balance_control *bctl;
3311         struct btrfs_balance_item *item;
3312         struct btrfs_disk_balance_args disk_bargs;
3313         struct btrfs_path *path;
3314         struct extent_buffer *leaf;
3315         struct btrfs_key key;
3316         int ret;
3317
3318         path = btrfs_alloc_path();
3319         if (!path)
3320                 return -ENOMEM;
3321
3322         key.objectid = BTRFS_BALANCE_OBJECTID;
3323         key.type = BTRFS_BALANCE_ITEM_KEY;
3324         key.offset = 0;
3325
3326         ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
3327         if (ret < 0)
3328                 goto out;
3329         if (ret > 0) { /* ret = -ENOENT; */
3330                 ret = 0;
3331                 goto out;
3332         }
3333
3334         bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
3335         if (!bctl) {
3336                 ret = -ENOMEM;
3337                 goto out;
3338         }
3339
3340         leaf = path->nodes[0];
3341         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
3342
3343         bctl->fs_info = fs_info;
3344         bctl->flags = btrfs_balance_flags(leaf, item);
3345         bctl->flags |= BTRFS_BALANCE_RESUME;
3346
3347         btrfs_balance_data(leaf, item, &disk_bargs);
3348         btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs);
3349         btrfs_balance_meta(leaf, item, &disk_bargs);
3350         btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs);
3351         btrfs_balance_sys(leaf, item, &disk_bargs);
3352         btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs);
3353
3354         WARN_ON(atomic_xchg(&fs_info->mutually_exclusive_operation_running, 1));
3355
3356         mutex_lock(&fs_info->volume_mutex);
3357         mutex_lock(&fs_info->balance_mutex);
3358
3359         set_balance_control(bctl);
3360
3361         mutex_unlock(&fs_info->balance_mutex);
3362         mutex_unlock(&fs_info->volume_mutex);
3363 out:
3364         btrfs_free_path(path);
3365         return ret;
3366 }
3367
3368 int btrfs_pause_balance(struct btrfs_fs_info *fs_info)
3369 {
3370         int ret = 0;
3371
3372         mutex_lock(&fs_info->balance_mutex);
3373         if (!fs_info->balance_ctl) {
3374                 mutex_unlock(&fs_info->balance_mutex);
3375                 return -ENOTCONN;
3376         }
3377
3378         if (atomic_read(&fs_info->balance_running)) {
3379                 atomic_inc(&fs_info->balance_pause_req);
3380                 mutex_unlock(&fs_info->balance_mutex);
3381
3382                 wait_event(fs_info->balance_wait_q,
3383                            atomic_read(&fs_info->balance_running) == 0);
3384
3385                 mutex_lock(&fs_info->balance_mutex);
3386                 /* we are good with balance_ctl ripped off from under us */
3387                 BUG_ON(atomic_read(&fs_info->balance_running));
3388                 atomic_dec(&fs_info->balance_pause_req);
3389         } else {
3390                 ret = -ENOTCONN;
3391         }
3392
3393         mutex_unlock(&fs_info->balance_mutex);
3394         return ret;
3395 }
3396
3397 int btrfs_cancel_balance(struct btrfs_fs_info *fs_info)
3398 {
3399         mutex_lock(&fs_info->balance_mutex);
3400         if (!fs_info->balance_ctl) {
3401                 mutex_unlock(&fs_info->balance_mutex);
3402                 return -ENOTCONN;
3403         }
3404
3405         atomic_inc(&fs_info->balance_cancel_req);
3406         /*
3407          * if we are running just wait and return, balance item is
3408          * deleted in btrfs_balance in this case
3409          */
3410         if (atomic_read(&fs_info->balance_running)) {
3411                 mutex_unlock(&fs_info->balance_mutex);
3412                 wait_event(fs_info->balance_wait_q,
3413                            atomic_read(&fs_info->balance_running) == 0);
3414                 mutex_lock(&fs_info->balance_mutex);
3415         } else {
3416                 /* __cancel_balance needs volume_mutex */
3417                 mutex_unlock(&fs_info->balance_mutex);
3418                 mutex_lock(&fs_info->volume_mutex);
3419                 mutex_lock(&fs_info->balance_mutex);
3420
3421                 if (fs_info->balance_ctl)
3422                         __cancel_balance(fs_info);
3423
3424                 mutex_unlock(&fs_info->volume_mutex);
3425         }
3426
3427         BUG_ON(fs_info->balance_ctl || atomic_read(&fs_info->balance_running));
3428         atomic_dec(&fs_info->balance_cancel_req);
3429         mutex_unlock(&fs_info->balance_mutex);
3430         return 0;
3431 }
3432
3433 static int btrfs_uuid_scan_kthread(void *data)
3434 {
3435         struct btrfs_fs_info *fs_info = data;
3436         struct btrfs_root *root = fs_info->tree_root;
3437         struct btrfs_key key;
3438         struct btrfs_key max_key;
3439         struct btrfs_path *path = NULL;
3440         int ret = 0;
3441         struct extent_buffer *eb;
3442         int slot;
3443         struct btrfs_root_item root_item;
3444         u32 item_size;
3445         struct btrfs_trans_handle *trans;
3446
3447         path = btrfs_alloc_path();
3448         if (!path) {
3449                 ret = -ENOMEM;
3450                 goto out;
3451         }
3452
3453         key.objectid = 0;
3454         key.type = BTRFS_ROOT_ITEM_KEY;
3455         key.offset = 0;
3456
3457         max_key.objectid = (u64)-1;
3458         max_key.type = BTRFS_ROOT_ITEM_KEY;
3459         max_key.offset = (u64)-1;
3460
3461         path->keep_locks = 1;
3462
3463         while (1) {
3464                 ret = btrfs_search_forward(root, &key, &max_key, path, 0);
3465                 if (ret) {
3466                         if (ret > 0)
3467                                 ret = 0;
3468                         break;
3469                 }
3470
3471                 if (key.type != BTRFS_ROOT_ITEM_KEY ||
3472                     (key.objectid < BTRFS_FIRST_FREE_OBJECTID &&
3473                      key.objectid != BTRFS_FS_TREE_OBJECTID) ||
3474                     key.objectid > BTRFS_LAST_FREE_OBJECTID)
3475                         goto skip;
3476
3477                 eb = path->nodes[0];
3478                 slot = path->slots[0];
3479                 item_size = btrfs_item_size_nr(eb, slot);
3480                 if (item_size < sizeof(root_item))
3481                         goto skip;
3482
3483                 trans = NULL;
3484                 read_extent_buffer(eb, &root_item,
3485                                    btrfs_item_ptr_offset(eb, slot),
3486                                    (int)sizeof(root_item));
3487                 if (btrfs_root_refs(&root_item) == 0)
3488                         goto skip;
3489                 if (!btrfs_is_empty_uuid(root_item.uuid)) {
3490                         /*
3491                          * 1 - subvol uuid item
3492                          * 1 - received_subvol uuid item
3493                          */
3494                         trans = btrfs_start_transaction(fs_info->uuid_root, 2);
3495                         if (IS_ERR(trans)) {
3496                                 ret = PTR_ERR(trans);
3497                                 break;
3498                         }
3499                         ret = btrfs_uuid_tree_add(trans, fs_info->uuid_root,
3500                                                   root_item.uuid,
3501                                                   BTRFS_UUID_KEY_SUBVOL,
3502                                                   key.objectid);
3503                         if (ret < 0) {
3504                                 pr_warn("btrfs: uuid_tree_add failed %d\n",
3505                                         ret);
3506                                 btrfs_end_transaction(trans,
3507                                                       fs_info->uuid_root);
3508                                 break;
3509                         }
3510                 }
3511
3512                 if (!btrfs_is_empty_uuid(root_item.received_uuid)) {
3513                         if (!trans) {
3514                                 /* 1 - received_subvol uuid item */
3515                                 trans = btrfs_start_transaction(
3516                                                 fs_info->uuid_root, 1);
3517                                 if (IS_ERR(trans)) {
3518                                         ret = PTR_ERR(trans);
3519                                         break;
3520                                 }
3521                         }
3522                         ret = btrfs_uuid_tree_add(trans, fs_info->uuid_root,
3523                                                   root_item.received_uuid,
3524                                                  BTRFS_UUID_KEY_RECEIVED_SUBVOL,
3525                                                   key.objectid);
3526                         if (ret < 0) {
3527                                 pr_warn("btrfs: uuid_tree_add failed %d\n",
3528                                         ret);
3529                                 btrfs_end_transaction(trans,
3530                                                       fs_info->uuid_root);
3531                                 break;
3532                         }
3533                 }
3534
3535                 if (trans) {
3536                         ret = btrfs_end_transaction(trans, fs_info->uuid_root);
3537                         if (ret)
3538                                 break;
3539                 }
3540
3541 skip:
3542                 btrfs_release_path(path);
3543                 if (key.offset < (u64)-1) {
3544                         key.offset++;
3545                 } else if (key.type < BTRFS_ROOT_ITEM_KEY) {
3546                         key.offset = 0;
3547                         key.type = BTRFS_ROOT_ITEM_KEY;
3548                 } else if (key.objectid < (u64)-1) {
3549                         key.offset = 0;
3550                         key.type = BTRFS_ROOT_ITEM_KEY;
3551                         key.objectid++;
3552                 } else {
3553                         break;
3554                 }
3555                 cond_resched();
3556         }
3557
3558 out:
3559         btrfs_free_path(path);
3560         if (ret)
3561                 pr_warn("btrfs: btrfs_uuid_scan_kthread failed %d\n", ret);
3562         up(&fs_info->uuid_tree_rescan_sem);
3563         return 0;
3564 }
3565
3566 int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info)
3567 {
3568         struct btrfs_trans_handle *trans;
3569         struct btrfs_root *tree_root = fs_info->tree_root;
3570         struct btrfs_root *uuid_root;
3571         struct task_struct *task;
3572         int ret;
3573
3574         /*
3575          * 1 - root node
3576          * 1 - root item
3577          */
3578         trans = btrfs_start_transaction(tree_root, 2);
3579         if (IS_ERR(trans))
3580                 return PTR_ERR(trans);
3581
3582         uuid_root = btrfs_create_tree(trans, fs_info,
3583                                       BTRFS_UUID_TREE_OBJECTID);
3584         if (IS_ERR(uuid_root)) {
3585                 btrfs_abort_transaction(trans, tree_root,
3586                                         PTR_ERR(uuid_root));
3587                 return PTR_ERR(uuid_root);
3588         }
3589
3590         fs_info->uuid_root = uuid_root;
3591
3592         ret = btrfs_commit_transaction(trans, tree_root);
3593         if (ret)
3594                 return ret;
3595
3596         down(&fs_info->uuid_tree_rescan_sem);
3597         task = kthread_run(btrfs_uuid_scan_kthread, fs_info, "btrfs-uuid");
3598         if (IS_ERR(task)) {
3599                 pr_warn("btrfs: failed to start uuid_scan task\n");
3600                 up(&fs_info->uuid_tree_rescan_sem);
3601                 return PTR_ERR(task);
3602         }
3603
3604         return 0;
3605 }
3606
3607 /*
3608  * shrinking a device means finding all of the device extents past
3609  * the new size, and then following the back refs to the chunks.
3610  * The chunk relocation code actually frees the device extent
3611  */
3612 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
3613 {
3614         struct btrfs_trans_handle *trans;
3615         struct btrfs_root *root = device->dev_root;
3616         struct btrfs_dev_extent *dev_extent = NULL;
3617         struct btrfs_path *path;
3618         u64 length;
3619         u64 chunk_tree;
3620         u64 chunk_objectid;
3621         u64 chunk_offset;
3622         int ret;
3623         int slot;
3624         int failed = 0;
3625         bool retried = false;
3626         struct extent_buffer *l;
3627         struct btrfs_key key;
3628         struct btrfs_super_block *super_copy = root->fs_info->super_copy;
3629         u64 old_total = btrfs_super_total_bytes(super_copy);
3630         u64 old_size = device->total_bytes;
3631         u64 diff = device->total_bytes - new_size;
3632
3633         if (device->is_tgtdev_for_dev_replace)
3634                 return -EINVAL;
3635
3636         path = btrfs_alloc_path();
3637         if (!path)
3638                 return -ENOMEM;
3639
3640         path->reada = 2;
3641
3642         lock_chunks(root);
3643
3644         device->total_bytes = new_size;
3645         if (device->writeable) {
3646                 device->fs_devices->total_rw_bytes -= diff;
3647                 spin_lock(&root->fs_info->free_chunk_lock);
3648                 root->fs_info->free_chunk_space -= diff;
3649                 spin_unlock(&root->fs_info->free_chunk_lock);
3650         }
3651         unlock_chunks(root);
3652
3653 again:
3654         key.objectid = device->devid;
3655         key.offset = (u64)-1;
3656         key.type = BTRFS_DEV_EXTENT_KEY;
3657
3658         do {
3659                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3660                 if (ret < 0)
3661                         goto done;
3662
3663                 ret = btrfs_previous_item(root, path, 0, key.type);
3664                 if (ret < 0)
3665                         goto done;
3666                 if (ret) {
3667                         ret = 0;
3668                         btrfs_release_path(path);
3669                         break;
3670                 }
3671
3672                 l = path->nodes[0];
3673                 slot = path->slots[0];
3674                 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
3675
3676                 if (key.objectid != device->devid) {
3677                         btrfs_release_path(path);
3678                         break;
3679                 }
3680
3681                 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
3682                 length = btrfs_dev_extent_length(l, dev_extent);
3683
3684                 if (key.offset + length <= new_size) {
3685                         btrfs_release_path(path);
3686                         break;
3687                 }
3688
3689                 chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
3690                 chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
3691                 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
3692                 btrfs_release_path(path);
3693
3694                 ret = btrfs_relocate_chunk(root, chunk_tree, chunk_objectid,
3695                                            chunk_offset);
3696                 if (ret && ret != -ENOSPC)
3697                         goto done;
3698                 if (ret == -ENOSPC)
3699                         failed++;
3700         } while (key.offset-- > 0);
3701
3702         if (failed && !retried) {
3703                 failed = 0;
3704                 retried = true;
3705                 goto again;
3706         } else if (failed && retried) {
3707                 ret = -ENOSPC;
3708                 lock_chunks(root);
3709
3710                 device->total_bytes = old_size;
3711                 if (device->writeable)
3712                         device->fs_devices->total_rw_bytes += diff;
3713                 spin_lock(&root->fs_info->free_chunk_lock);
3714                 root->fs_info->free_chunk_space += diff;
3715                 spin_unlock(&root->fs_info->free_chunk_lock);
3716                 unlock_chunks(root);
3717                 goto done;
3718         }
3719
3720         /* Shrinking succeeded, else we would be at "done". */
3721         trans = btrfs_start_transaction(root, 0);
3722         if (IS_ERR(trans)) {
3723                 ret = PTR_ERR(trans);
3724                 goto done;
3725         }
3726
3727         lock_chunks(root);
3728
3729         device->disk_total_bytes = new_size;
3730         /* Now btrfs_update_device() will change the on-disk size. */
3731         ret = btrfs_update_device(trans, device);
3732         if (ret) {
3733                 unlock_chunks(root);
3734                 btrfs_end_transaction(trans, root);
3735                 goto done;
3736         }
3737         WARN_ON(diff > old_total);
3738         btrfs_set_super_total_bytes(super_copy, old_total - diff);
3739         unlock_chunks(root);
3740         btrfs_end_transaction(trans, root);
3741 done:
3742         btrfs_free_path(path);
3743         return ret;
3744 }
3745
3746 static int btrfs_add_system_chunk(struct btrfs_root *root,
3747                            struct btrfs_key *key,
3748                            struct btrfs_chunk *chunk, int item_size)
3749 {
3750         struct btrfs_super_block *super_copy = root->fs_info->super_copy;
3751         struct btrfs_disk_key disk_key;
3752         u32 array_size;
3753         u8 *ptr;
3754
3755         array_size = btrfs_super_sys_array_size(super_copy);
3756         if (array_size + item_size > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE)
3757                 return -EFBIG;
3758
3759         ptr = super_copy->sys_chunk_array + array_size;
3760         btrfs_cpu_key_to_disk(&disk_key, key);
3761         memcpy(ptr, &disk_key, sizeof(disk_key));
3762         ptr += sizeof(disk_key);
3763         memcpy(ptr, chunk, item_size);
3764         item_size += sizeof(disk_key);
3765         btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
3766         return 0;
3767 }
3768
3769 /*
3770  * sort the devices in descending order by max_avail, total_avail
3771  */
3772 static int btrfs_cmp_device_info(const void *a, const void *b)
3773 {
3774         const struct btrfs_device_info *di_a = a;
3775         const struct btrfs_device_info *di_b = b;
3776
3777         if (di_a->max_avail > di_b->max_avail)
3778                 return -1;
3779         if (di_a->max_avail < di_b->max_avail)
3780                 return 1;
3781         if (di_a->total_avail > di_b->total_avail)
3782                 return -1;
3783         if (di_a->total_avail < di_b->total_avail)
3784                 return 1;
3785         return 0;
3786 }
3787
3788 static struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
3789         [BTRFS_RAID_RAID10] = {
3790                 .sub_stripes    = 2,
3791                 .dev_stripes    = 1,
3792                 .devs_max       = 0,    /* 0 == as many as possible */
3793                 .devs_min       = 4,
3794                 .devs_increment = 2,
3795                 .ncopies        = 2,
3796         },
3797         [BTRFS_RAID_RAID1] = {
3798                 .sub_stripes    = 1,
3799                 .dev_stripes    = 1,
3800                 .devs_max       = 2,
3801                 .devs_min       = 2,
3802                 .devs_increment = 2,
3803                 .ncopies        = 2,
3804         },
3805         [BTRFS_RAID_DUP] = {
3806                 .sub_stripes    = 1,
3807                 .dev_stripes    = 2,
3808                 .devs_max       = 1,
3809                 .devs_min       = 1,
3810                 .devs_increment = 1,
3811                 .ncopies        = 2,
3812         },
3813         [BTRFS_RAID_RAID0] = {
3814                 .sub_stripes    = 1,
3815                 .dev_stripes    = 1,
3816                 .devs_max       = 0,
3817                 .devs_min       = 2,
3818                 .devs_increment = 1,
3819                 .ncopies        = 1,
3820         },
3821         [BTRFS_RAID_SINGLE] = {
3822                 .sub_stripes    = 1,
3823                 .dev_stripes    = 1,
3824                 .devs_max       = 1,
3825                 .devs_min       = 1,
3826                 .devs_increment = 1,
3827                 .ncopies        = 1,
3828         },
3829         [BTRFS_RAID_RAID5] = {
3830                 .sub_stripes    = 1,
3831                 .dev_stripes    = 1,
3832                 .devs_max       = 0,
3833                 .devs_min       = 2,
3834                 .devs_increment = 1,
3835                 .ncopies        = 2,
3836         },
3837         [BTRFS_RAID_RAID6] = {
3838                 .sub_stripes    = 1,
3839                 .dev_stripes    = 1,
3840                 .devs_max       = 0,
3841                 .devs_min       = 3,
3842                 .devs_increment = 1,
3843                 .ncopies        = 3,
3844         },
3845 };
3846
3847 static u32 find_raid56_stripe_len(u32 data_devices, u32 dev_stripe_target)
3848 {
3849         /* TODO allow them to set a preferred stripe size */
3850         return 64 * 1024;
3851 }
3852
3853 static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type)
3854 {
3855         if (!(type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6)))
3856                 return;
3857
3858         btrfs_set_fs_incompat(info, RAID56);
3859 }
3860
3861 static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
3862                                struct btrfs_root *extent_root, u64 start,
3863                                u64 type)
3864 {
3865         struct btrfs_fs_info *info = extent_root->fs_info;
3866         struct btrfs_fs_devices *fs_devices = info->fs_devices;
3867         struct list_head *cur;
3868         struct map_lookup *map = NULL;
3869         struct extent_map_tree *em_tree;
3870         struct extent_map *em;
3871         struct btrfs_device_info *devices_info = NULL;
3872         u64 total_avail;
3873         int num_stripes;        /* total number of stripes to allocate */
3874         int data_stripes;       /* number of stripes that count for
3875                                    block group size */
3876         int sub_stripes;        /* sub_stripes info for map */
3877         int dev_stripes;        /* stripes per dev */
3878         int devs_max;           /* max devs to use */
3879         int devs_min;           /* min devs needed */
3880         int devs_increment;     /* ndevs has to be a multiple of this */
3881         int ncopies;            /* how many copies to data has */
3882         int ret;
3883         u64 max_stripe_size;
3884         u64 max_chunk_size;
3885         u64 stripe_size;
3886         u64 num_bytes;
3887         u64 raid_stripe_len = BTRFS_STRIPE_LEN;
3888         int ndevs;
3889         int i;
3890         int j;
3891         int index;
3892
3893         BUG_ON(!alloc_profile_is_valid(type, 0));
3894
3895         if (list_empty(&fs_devices->alloc_list))
3896                 return -ENOSPC;
3897
3898         index = __get_raid_index(type);
3899
3900         sub_stripes = btrfs_raid_array[index].sub_stripes;
3901         dev_stripes = btrfs_raid_array[index].dev_stripes;
3902         devs_max = btrfs_raid_array[index].devs_max;
3903         devs_min = btrfs_raid_array[index].devs_min;
3904         devs_increment = btrfs_raid_array[index].devs_increment;
3905         ncopies = btrfs_raid_array[index].ncopies;
3906
3907         if (type & BTRFS_BLOCK_GROUP_DATA) {
3908                 max_stripe_size = 1024 * 1024 * 1024;
3909                 max_chunk_size = 10 * max_stripe_size;
3910         } else if (type & BTRFS_BLOCK_GROUP_METADATA) {
3911                 /* for larger filesystems, use larger metadata chunks */
3912                 if (fs_devices->total_rw_bytes > 50ULL * 1024 * 1024 * 1024)
3913                         max_stripe_size = 1024 * 1024 * 1024;
3914                 else
3915                         max_stripe_size = 256 * 1024 * 1024;
3916                 max_chunk_size = max_stripe_size;
3917         } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
3918                 max_stripe_size = 32 * 1024 * 1024;
3919                 max_chunk_size = 2 * max_stripe_size;
3920         } else {
3921                 printk(KERN_ERR "btrfs: invalid chunk type 0x%llx requested\n",
3922                        type);
3923                 BUG_ON(1);
3924         }
3925
3926         /* we don't want a chunk larger than 10% of writeable space */
3927         max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
3928                              max_chunk_size);
3929
3930         devices_info = kzalloc(sizeof(*devices_info) * fs_devices->rw_devices,
3931                                GFP_NOFS);
3932         if (!devices_info)
3933                 return -ENOMEM;
3934
3935         cur = fs_devices->alloc_list.next;
3936
3937         /*
3938          * in the first pass through the devices list, we gather information
3939          * about the available holes on each device.
3940          */
3941         ndevs = 0;
3942         while (cur != &fs_devices->alloc_list) {
3943                 struct btrfs_device *device;
3944                 u64 max_avail;
3945                 u64 dev_offset;
3946
3947                 device = list_entry(cur, struct btrfs_device, dev_alloc_list);
3948
3949                 cur = cur->next;
3950
3951                 if (!device->writeable) {
3952                         WARN(1, KERN_ERR
3953                                "btrfs: read-only device in alloc_list\n");
3954                         continue;
3955                 }
3956
3957                 if (!device->in_fs_metadata ||
3958                     device->is_tgtdev_for_dev_replace)
3959                         continue;
3960
3961                 if (device->total_bytes > device->bytes_used)
3962                         total_avail = device->total_bytes - device->bytes_used;
3963                 else
3964                         total_avail = 0;
3965
3966                 /* If there is no space on this device, skip it. */
3967                 if (total_avail == 0)
3968                         continue;
3969
3970                 ret = find_free_dev_extent(trans, device,
3971                                            max_stripe_size * dev_stripes,
3972                                            &dev_offset, &max_avail);
3973                 if (ret && ret != -ENOSPC)
3974                         goto error;
3975
3976                 if (ret == 0)
3977                         max_avail = max_stripe_size * dev_stripes;
3978
3979                 if (max_avail < BTRFS_STRIPE_LEN * dev_stripes)
3980                         continue;
3981
3982                 if (ndevs == fs_devices->rw_devices) {
3983                         WARN(1, "%s: found more than %llu devices\n",
3984                              __func__, fs_devices->rw_devices);
3985                         break;
3986                 }
3987                 devices_info[ndevs].dev_offset = dev_offset;
3988                 devices_info[ndevs].max_avail = max_avail;
3989                 devices_info[ndevs].total_avail = total_avail;
3990                 devices_info[ndevs].dev = device;
3991                 ++ndevs;
3992         }
3993
3994         /*
3995          * now sort the devices by hole size / available space
3996          */
3997         sort(devices_info, ndevs, sizeof(struct btrfs_device_info),
3998              btrfs_cmp_device_info, NULL);
3999
4000         /* round down to number of usable stripes */
4001         ndevs -= ndevs % devs_increment;
4002
4003         if (ndevs < devs_increment * sub_stripes || ndevs < devs_min) {
4004                 ret = -ENOSPC;
4005                 goto error;
4006         }
4007
4008         if (devs_max && ndevs > devs_max)
4009                 ndevs = devs_max;
4010         /*
4011          * the primary goal is to maximize the number of stripes, so use as many
4012          * devices as possible, even if the stripes are not maximum sized.
4013          */
4014         stripe_size = devices_info[ndevs-1].max_avail;
4015         num_stripes = ndevs * dev_stripes;
4016
4017         /*
4018          * this will have to be fixed for RAID1 and RAID10 over
4019          * more drives
4020          */
4021         data_stripes = num_stripes / ncopies;
4022
4023         if (type & BTRFS_BLOCK_GROUP_RAID5) {
4024                 raid_stripe_len = find_raid56_stripe_len(ndevs - 1,
4025                                  btrfs_super_stripesize(info->super_copy));
4026                 data_stripes = num_stripes - 1;
4027         }
4028         if (type & BTRFS_BLOCK_GROUP_RAID6) {
4029                 raid_stripe_len = find_raid56_stripe_len(ndevs - 2,
4030                                  btrfs_super_stripesize(info->super_copy));
4031                 data_stripes = num_stripes - 2;
4032         }
4033
4034         /*
4035          * Use the number of data stripes to figure out how big this chunk
4036          * is really going to be in terms of logical address space,
4037          * and compare that answer with the max chunk size
4038          */
4039         if (stripe_size * data_stripes > max_chunk_size) {
4040                 u64 mask = (1ULL << 24) - 1;
4041                 stripe_size = max_chunk_size;
4042                 do_div(stripe_size, data_stripes);
4043
4044                 /* bump the answer up to a 16MB boundary */
4045                 stripe_size = (stripe_size + mask) & ~mask;
4046
4047                 /* but don't go higher than the limits we found
4048                  * while searching for free extents
4049                  */
4050                 if (stripe_size > devices_info[ndevs-1].max_avail)
4051                         stripe_size = devices_info[ndevs-1].max_avail;
4052         }
4053
4054         do_div(stripe_size, dev_stripes);
4055
4056         /* align to BTRFS_STRIPE_LEN */
4057         do_div(stripe_size, raid_stripe_len);
4058         stripe_size *= raid_stripe_len;
4059
4060         map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
4061         if (!map) {
4062                 ret = -ENOMEM;
4063                 goto error;
4064         }
4065         map->num_stripes = num_stripes;
4066
4067         for (i = 0; i < ndevs; ++i) {
4068                 for (j = 0; j < dev_stripes; ++j) {
4069                         int s = i * dev_stripes + j;
4070                         map->stripes[s].dev = devices_info[i].dev;
4071                         map->stripes[s].physical = devices_info[i].dev_offset +
4072                                                    j * stripe_size;
4073                 }
4074         }
4075         map->sector_size = extent_root->sectorsize;
4076         map->stripe_len = raid_stripe_len;
4077         map->io_align = raid_stripe_len;
4078         map->io_width = raid_stripe_len;
4079         map->type = type;
4080         map->sub_stripes = sub_stripes;
4081
4082         num_bytes = stripe_size * data_stripes;
4083
4084         trace_btrfs_chunk_alloc(info->chunk_root, map, start, num_bytes);
4085
4086         em = alloc_extent_map();
4087         if (!em) {
4088                 ret = -ENOMEM;
4089                 goto error;
4090         }
4091         em->bdev = (struct block_device *)map;
4092         em->start = start;
4093         em->len = num_bytes;
4094         em->block_start = 0;
4095         em->block_len = em->len;
4096         em->orig_block_len = stripe_size;
4097
4098         em_tree = &extent_root->fs_info->mapping_tree.map_tree;
4099         write_lock(&em_tree->lock);
4100         ret = add_extent_mapping(em_tree, em, 0);
4101         if (!ret) {
4102                 list_add_tail(&em->list, &trans->transaction->pending_chunks);
4103                 atomic_inc(&em->refs);
4104         }
4105         write_unlock(&em_tree->lock);
4106         if (ret) {
4107                 free_extent_map(em);
4108                 goto error;
4109         }
4110
4111         ret = btrfs_make_block_group(trans, extent_root, 0, type,
4112                                      BTRFS_FIRST_CHUNK_TREE_OBJECTID,
4113                                      start, num_bytes);
4114         if (ret)
4115                 goto error_del_extent;
4116
4117         free_extent_map(em);
4118         check_raid56_incompat_flag(extent_root->fs_info, type);
4119
4120         kfree(devices_info);
4121         return 0;
4122
4123 error_del_extent:
4124         write_lock(&em_tree->lock);
4125         remove_extent_mapping(em_tree, em);
4126         write_unlock(&em_tree->lock);
4127
4128         /* One for our allocation */
4129         free_extent_map(em);
4130         /* One for the tree reference */
4131         free_extent_map(em);
4132 error:
4133         kfree(map);
4134         kfree(devices_info);
4135         return ret;
4136 }
4137
4138 int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
4139                                 struct btrfs_root *extent_root,
4140                                 u64 chunk_offset, u64 chunk_size)
4141 {
4142         struct btrfs_key key;
4143         struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
4144         struct btrfs_device *device;
4145         struct btrfs_chunk *chunk;
4146         struct btrfs_stripe *stripe;
4147         struct extent_map_tree *em_tree;
4148         struct extent_map *em;
4149         struct map_lookup *map;
4150         size_t item_size;
4151         u64 dev_offset;
4152         u64 stripe_size;
4153         int i = 0;
4154         int ret;
4155
4156         em_tree = &extent_root->fs_info->mapping_tree.map_tree;
4157         read_lock(&em_tree->lock);
4158         em = lookup_extent_mapping(em_tree, chunk_offset, chunk_size);
4159         read_unlock(&em_tree->lock);
4160
4161         if (!em) {
4162                 btrfs_crit(extent_root->fs_info, "unable to find logical "
4163                            "%Lu len %Lu", chunk_offset, chunk_size);
4164                 return -EINVAL;
4165         }
4166
4167         if (em->start != chunk_offset || em->len != chunk_size) {
4168                 btrfs_crit(extent_root->fs_info, "found a bad mapping, wanted"
4169                           " %Lu-%Lu, found %Lu-%Lu\n", chunk_offset,
4170                           chunk_size, em->start, em->len);
4171                 free_extent_map(em);
4172                 return -EINVAL;
4173         }
4174
4175         map = (struct map_lookup *)em->bdev;
4176         item_size = btrfs_chunk_item_size(map->num_stripes);
4177         stripe_size = em->orig_block_len;
4178
4179         chunk = kzalloc(item_size, GFP_NOFS);
4180         if (!chunk) {
4181                 ret = -ENOMEM;
4182                 goto out;
4183         }
4184
4185         for (i = 0; i < map->num_stripes; i++) {
4186                 device = map->stripes[i].dev;
4187                 dev_offset = map->stripes[i].physical;
4188
4189                 device->bytes_used += stripe_size;
4190                 ret = btrfs_update_device(trans, device);
4191                 if (ret)
4192                         goto out;
4193                 ret = btrfs_alloc_dev_extent(trans, device,
4194                                              chunk_root->root_key.objectid,
4195                                              BTRFS_FIRST_CHUNK_TREE_OBJECTID,
4196                                              chunk_offset, dev_offset,
4197                                              stripe_size);
4198                 if (ret)
4199                         goto out;
4200         }
4201
4202         spin_lock(&extent_root->fs_info->free_chunk_lock);
4203         extent_root->fs_info->free_chunk_space -= (stripe_size *
4204                                                    map->num_stripes);
4205         spin_unlock(&extent_root->fs_info->free_chunk_lock);
4206
4207         stripe = &chunk->stripe;
4208         for (i = 0; i < map->num_stripes; i++) {
4209                 device = map->stripes[i].dev;
4210                 dev_offset = map->stripes[i].physical;
4211
4212                 btrfs_set_stack_stripe_devid(stripe, device->devid);
4213                 btrfs_set_stack_stripe_offset(stripe, dev_offset);
4214                 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
4215                 stripe++;
4216         }
4217
4218         btrfs_set_stack_chunk_length(chunk, chunk_size);
4219         btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
4220         btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len);
4221         btrfs_set_stack_chunk_type(chunk, map->type);
4222         btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes);
4223         btrfs_set_stack_chunk_io_align(chunk, map->stripe_len);
4224         btrfs_set_stack_chunk_io_width(chunk, map->stripe_len);
4225         btrfs_set_stack_chunk_sector_size(chunk, extent_root->sectorsize);
4226         btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes);
4227
4228         key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
4229         key.type = BTRFS_CHUNK_ITEM_KEY;
4230         key.offset = chunk_offset;
4231
4232         ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
4233         if (ret == 0 && map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
4234                 /*
4235                  * TODO: Cleanup of inserted chunk root in case of
4236                  * failure.
4237                  */
4238                 ret = btrfs_add_system_chunk(chunk_root, &key, chunk,
4239                                              item_size);
4240         }
4241
4242 out:
4243         kfree(chunk);
4244         free_extent_map(em);
4245         return ret;
4246 }
4247
4248 /*
4249  * Chunk allocation falls into two parts. The first part does works
4250  * that make the new allocated chunk useable, but not do any operation
4251  * that modifies the chunk tree. The second part does the works that
4252  * require modifying the chunk tree. This division is important for the
4253  * bootstrap process of adding storage to a seed btrfs.
4254  */
4255 int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
4256                       struct btrfs_root *extent_root, u64 type)
4257 {
4258         u64 chunk_offset;
4259
4260         chunk_offset = find_next_chunk(extent_root->fs_info);
4261         return __btrfs_alloc_chunk(trans, extent_root, chunk_offset, type);
4262 }
4263
4264 static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
4265                                          struct btrfs_root *root,
4266                                          struct btrfs_device *device)
4267 {
4268         u64 chunk_offset;
4269         u64 sys_chunk_offset;
4270         u64 alloc_profile;
4271         struct btrfs_fs_info *fs_info = root->fs_info;
4272         struct btrfs_root *extent_root = fs_info->extent_root;
4273         int ret;
4274
4275         chunk_offset = find_next_chunk(fs_info);
4276         alloc_profile = btrfs_get_alloc_profile(extent_root, 0);
4277         ret = __btrfs_alloc_chunk(trans, extent_root, chunk_offset,
4278                                   alloc_profile);
4279         if (ret)
4280                 return ret;
4281
4282         sys_chunk_offset = find_next_chunk(root->fs_info);
4283         alloc_profile = btrfs_get_alloc_profile(fs_info->chunk_root, 0);
4284         ret = __btrfs_alloc_chunk(trans, extent_root, sys_chunk_offset,
4285                                   alloc_profile);
4286         if (ret) {
4287                 btrfs_abort_transaction(trans, root, ret);
4288                 goto out;
4289         }
4290
4291         ret = btrfs_add_device(trans, fs_info->chunk_root, device);
4292         if (ret)
4293                 btrfs_abort_transaction(trans, root, ret);
4294 out:
4295         return ret;
4296 }
4297
4298 int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset)
4299 {
4300         struct extent_map *em;
4301         struct map_lookup *map;
4302         struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
4303         int readonly = 0;
4304         int i;
4305
4306         read_lock(&map_tree->map_tree.lock);
4307         em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
4308         read_unlock(&map_tree->map_tree.lock);
4309         if (!em)
4310                 return 1;
4311
4312         if (btrfs_test_opt(root, DEGRADED)) {
4313                 free_extent_map(em);
4314                 return 0;
4315         }
4316
4317         map = (struct map_lookup *)em->bdev;
4318         for (i = 0; i < map->num_stripes; i++) {
4319                 if (!map->stripes[i].dev->writeable) {
4320                         readonly = 1;
4321                         break;
4322                 }
4323         }
4324         free_extent_map(em);
4325         return readonly;
4326 }
4327
4328 void btrfs_mapping_init(struct btrfs_mapping_tree *tree)
4329 {
4330         extent_map_tree_init(&tree->map_tree);
4331 }
4332
4333 void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
4334 {
4335         struct extent_map *em;
4336
4337         while (1) {
4338                 write_lock(&tree->map_tree.lock);
4339                 em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1);
4340                 if (em)
4341                         remove_extent_mapping(&tree->map_tree, em);
4342                 write_unlock(&tree->map_tree.lock);
4343                 if (!em)
4344                         break;
4345                 kfree(em->bdev);
4346                 /* once for us */
4347                 free_extent_map(em);
4348                 /* once for the tree */
4349                 free_extent_map(em);
4350         }
4351 }
4352
4353 int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
4354 {
4355         struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
4356         struct extent_map *em;
4357         struct map_lookup *map;
4358         struct extent_map_tree *em_tree = &map_tree->map_tree;
4359         int ret;
4360
4361         read_lock(&em_tree->lock);
4362         em = lookup_extent_mapping(em_tree, logical, len);
4363         read_unlock(&em_tree->lock);
4364
4365         /*
4366          * We could return errors for these cases, but that could get ugly and
4367          * we'd probably do the same thing which is just not do anything else
4368          * and exit, so return 1 so the callers don't try to use other copies.
4369          */
4370         if (!em) {
4371                 btrfs_crit(fs_info, "No mapping for %Lu-%Lu\n", logical,
4372                             logical+len);
4373                 return 1;
4374         }
4375
4376         if (em->start > logical || em->start + em->len < logical) {
4377                 btrfs_crit(fs_info, "Invalid mapping for %Lu-%Lu, got "
4378                             "%Lu-%Lu\n", logical, logical+len, em->start,
4379                             em->start + em->len);
4380                 return 1;
4381         }
4382
4383         map = (struct map_lookup *)em->bdev;
4384         if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1))
4385                 ret = map->num_stripes;
4386         else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
4387                 ret = map->sub_stripes;
4388         else if (map->type & BTRFS_BLOCK_GROUP_RAID5)
4389                 ret = 2;
4390         else if (map->type & BTRFS_BLOCK_GROUP_RAID6)
4391                 ret = 3;
4392         else
4393                 ret = 1;
4394         free_extent_map(em);
4395
4396         btrfs_dev_replace_lock(&fs_info->dev_replace);
4397         if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))
4398                 ret++;
4399         btrfs_dev_replace_unlock(&fs_info->dev_replace);
4400
4401         return ret;
4402 }
4403
4404 unsigned long btrfs_full_stripe_len(struct btrfs_root *root,
4405                                     struct btrfs_mapping_tree *map_tree,
4406                                     u64 logical)
4407 {
4408         struct extent_map *em;
4409         struct map_lookup *map;
4410         struct extent_map_tree *em_tree = &map_tree->map_tree;
4411         unsigned long len = root->sectorsize;
4412
4413         read_lock(&em_tree->lock);
4414         em = lookup_extent_mapping(em_tree, logical, len);
4415         read_unlock(&em_tree->lock);
4416         BUG_ON(!em);
4417
4418         BUG_ON(em->start > logical || em->start + em->len < logical);
4419         map = (struct map_lookup *)em->bdev;
4420         if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
4421                          BTRFS_BLOCK_GROUP_RAID6)) {
4422                 len = map->stripe_len * nr_data_stripes(map);
4423         }
4424         free_extent_map(em);
4425         return len;
4426 }
4427
4428 int btrfs_is_parity_mirror(struct btrfs_mapping_tree *map_tree,
4429                            u64 logical, u64 len, int mirror_num)
4430 {
4431         struct extent_map *em;
4432         struct map_lookup *map;
4433         struct extent_map_tree *em_tree = &map_tree->map_tree;
4434         int ret = 0;
4435
4436         read_lock(&em_tree->lock);
4437         em = lookup_extent_mapping(em_tree, logical, len);
4438         read_unlock(&em_tree->lock);
4439         BUG_ON(!em);
4440
4441         BUG_ON(em->start > logical || em->start + em->len < logical);
4442         map = (struct map_lookup *)em->bdev;
4443         if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
4444                          BTRFS_BLOCK_GROUP_RAID6))
4445                 ret = 1;
4446         free_extent_map(em);
4447         return ret;
4448 }
4449
4450 static int find_live_mirror(struct btrfs_fs_info *fs_info,
4451                             struct map_lookup *map, int first, int num,
4452                             int optimal, int dev_replace_is_ongoing)
4453 {
4454         int i;
4455         int tolerance;
4456         struct btrfs_device *srcdev;
4457
4458         if (dev_replace_is_ongoing &&
4459             fs_info->dev_replace.cont_reading_from_srcdev_mode ==
4460              BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID)
4461                 srcdev = fs_info->dev_replace.srcdev;
4462         else
4463                 srcdev = NULL;
4464
4465         /*
4466          * try to avoid the drive that is the source drive for a
4467          * dev-replace procedure, only choose it if no other non-missing
4468          * mirror is available
4469          */
4470         for (tolerance = 0; tolerance < 2; tolerance++) {
4471                 if (map->stripes[optimal].dev->bdev &&
4472                     (tolerance || map->stripes[optimal].dev != srcdev))
4473                         return optimal;
4474                 for (i = first; i < first + num; i++) {
4475                         if (map->stripes[i].dev->bdev &&
4476                             (tolerance || map->stripes[i].dev != srcdev))
4477                                 return i;
4478                 }
4479         }
4480
4481         /* we couldn't find one that doesn't fail.  Just return something
4482          * and the io error handling code will clean up eventually
4483          */
4484         return optimal;
4485 }
4486
4487 static inline int parity_smaller(u64 a, u64 b)
4488 {
4489         return a > b;
4490 }
4491
4492 /* Bubble-sort the stripe set to put the parity/syndrome stripes last */
4493 static void sort_parity_stripes(struct btrfs_bio *bbio, u64 *raid_map)
4494 {
4495         struct btrfs_bio_stripe s;
4496         int i;
4497         u64 l;
4498         int again = 1;
4499
4500         while (again) {
4501                 again = 0;
4502                 for (i = 0; i < bbio->num_stripes - 1; i++) {
4503                         if (parity_smaller(raid_map[i], raid_map[i+1])) {
4504                                 s = bbio->stripes[i];
4505                                 l = raid_map[i];
4506                                 bbio->stripes[i] = bbio->stripes[i+1];
4507                                 raid_map[i] = raid_map[i+1];
4508                                 bbio->stripes[i+1] = s;
4509                                 raid_map[i+1] = l;
4510                                 again = 1;
4511                         }
4512                 }
4513         }
4514 }
4515
4516 static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
4517                              u64 logical, u64 *length,
4518                              struct btrfs_bio **bbio_ret,
4519                              int mirror_num, u64 **raid_map_ret)
4520 {
4521         struct extent_map *em;
4522         struct map_lookup *map;
4523         struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
4524         struct extent_map_tree *em_tree = &map_tree->map_tree;
4525         u64 offset;
4526         u64 stripe_offset;
4527         u64 stripe_end_offset;
4528         u64 stripe_nr;
4529         u64 stripe_nr_orig;
4530         u64 stripe_nr_end;
4531         u64 stripe_len;
4532         u64 *raid_map = NULL;
4533         int stripe_index;
4534         int i;
4535         int ret = 0;
4536         int num_stripes;
4537         int max_errors = 0;
4538         struct btrfs_bio *bbio = NULL;
4539         struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
4540         int dev_replace_is_ongoing = 0;
4541         int num_alloc_stripes;
4542         int patch_the_first_stripe_for_dev_replace = 0;
4543         u64 physical_to_patch_in_first_stripe = 0;
4544         u64 raid56_full_stripe_start = (u64)-1;
4545
4546         read_lock(&em_tree->lock);
4547         em = lookup_extent_mapping(em_tree, logical, *length);
4548         read_unlock(&em_tree->lock);
4549
4550         if (!em) {
4551                 btrfs_crit(fs_info, "unable to find logical %llu len %llu",
4552                         (unsigned long long)logical,
4553                         (unsigned long long)*length);
4554                 return -EINVAL;
4555         }
4556
4557         if (em->start > logical || em->start + em->len < logical) {
4558                 btrfs_crit(fs_info, "found a bad mapping, wanted %Lu, "
4559                            "found %Lu-%Lu\n", logical, em->start,
4560                            em->start + em->len);
4561                 return -EINVAL;
4562         }
4563
4564         map = (struct map_lookup *)em->bdev;
4565         offset = logical - em->start;
4566
4567         stripe_len = map->stripe_len;
4568         stripe_nr = offset;
4569         /*
4570          * stripe_nr counts the total number of stripes we have to stride
4571          * to get to this block
4572          */
4573         do_div(stripe_nr, stripe_len);
4574
4575         stripe_offset = stripe_nr * stripe_len;
4576         BUG_ON(offset < stripe_offset);
4577
4578         /* stripe_offset is the offset of this block in its stripe*/
4579         stripe_offset = offset - stripe_offset;
4580
4581         /* if we're here for raid56, we need to know the stripe aligned start */
4582         if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6)) {
4583                 unsigned long full_stripe_len = stripe_len * nr_data_stripes(map);
4584                 raid56_full_stripe_start = offset;
4585
4586                 /* allow a write of a full stripe, but make sure we don't
4587                  * allow straddling of stripes
4588                  */
4589                 do_div(raid56_full_stripe_start, full_stripe_len);
4590                 raid56_full_stripe_start *= full_stripe_len;
4591         }
4592
4593         if (rw & REQ_DISCARD) {
4594                 /* we don't discard raid56 yet */
4595                 if (map->type &
4596                     (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6)) {
4597                         ret = -EOPNOTSUPP;
4598                         goto out;
4599                 }
4600                 *length = min_t(u64, em->len - offset, *length);
4601         } else if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
4602                 u64 max_len;
4603                 /* For writes to RAID[56], allow a full stripeset across all disks.
4604                    For other RAID types and for RAID[56] reads, just allow a single
4605                    stripe (on a single disk). */
4606                 if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6) &&
4607                     (rw & REQ_WRITE)) {
4608                         max_len = stripe_len * nr_data_stripes(map) -
4609                                 (offset - raid56_full_stripe_start);
4610                 } else {
4611                         /* we limit the length of each bio to what fits in a stripe */
4612                         max_len = stripe_len - stripe_offset;
4613                 }
4614                 *length = min_t(u64, em->len - offset, max_len);
4615         } else {
4616                 *length = em->len - offset;
4617         }
4618
4619         /* This is for when we're called from btrfs_merge_bio_hook() and all
4620            it cares about is the length */
4621         if (!bbio_ret)
4622                 goto out;
4623
4624         btrfs_dev_replace_lock(dev_replace);
4625         dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace);
4626         if (!dev_replace_is_ongoing)
4627                 btrfs_dev_replace_unlock(dev_replace);
4628
4629         if (dev_replace_is_ongoing && mirror_num == map->num_stripes + 1 &&
4630             !(rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS)) &&
4631             dev_replace->tgtdev != NULL) {
4632                 /*
4633                  * in dev-replace case, for repair case (that's the only
4634                  * case where the mirror is selected explicitly when
4635                  * calling btrfs_map_block), blocks left of the left cursor
4636                  * can also be read from the target drive.
4637                  * For REQ_GET_READ_MIRRORS, the target drive is added as
4638                  * the last one to the array of stripes. For READ, it also
4639                  * needs to be supported using the same mirror number.
4640                  * If the requested block is not left of the left cursor,
4641                  * EIO is returned. This can happen because btrfs_num_copies()
4642                  * returns one more in the dev-replace case.
4643                  */
4644                 u64 tmp_length = *length;
4645                 struct btrfs_bio *tmp_bbio = NULL;
4646                 int tmp_num_stripes;
4647                 u64 srcdev_devid = dev_replace->srcdev->devid;
4648                 int index_srcdev = 0;
4649                 int found = 0;
4650                 u64 physical_of_found = 0;
4651
4652                 ret = __btrfs_map_block(fs_info, REQ_GET_READ_MIRRORS,
4653                              logical, &tmp_length, &tmp_bbio, 0, NULL);
4654                 if (ret) {
4655                         WARN_ON(tmp_bbio != NULL);
4656                         goto out;
4657                 }
4658
4659                 tmp_num_stripes = tmp_bbio->num_stripes;
4660                 if (mirror_num > tmp_num_stripes) {
4661                         /*
4662                          * REQ_GET_READ_MIRRORS does not contain this
4663                          * mirror, that means that the requested area
4664                          * is not left of the left cursor
4665                          */
4666                         ret = -EIO;
4667                         kfree(tmp_bbio);
4668                         goto out;
4669                 }
4670
4671                 /*
4672                  * process the rest of the function using the mirror_num
4673                  * of the source drive. Therefore look it up first.
4674                  * At the end, patch the device pointer to the one of the
4675                  * target drive.
4676                  */
4677                 for (i = 0; i < tmp_num_stripes; i++) {
4678                         if (tmp_bbio->stripes[i].dev->devid == srcdev_devid) {
4679                                 /*
4680                                  * In case of DUP, in order to keep it
4681                                  * simple, only add the mirror with the
4682                                  * lowest physical address
4683                                  */
4684                                 if (found &&
4685                                     physical_of_found <=
4686                                      tmp_bbio->stripes[i].physical)
4687                                         continue;
4688                                 index_srcdev = i;
4689                                 found = 1;
4690                                 physical_of_found =
4691                                         tmp_bbio->stripes[i].physical;
4692                         }
4693                 }
4694
4695                 if (found) {
4696                         mirror_num = index_srcdev + 1;
4697                         patch_the_first_stripe_for_dev_replace = 1;
4698                         physical_to_patch_in_first_stripe = physical_of_found;
4699                 } else {
4700                         WARN_ON(1);
4701                         ret = -EIO;
4702                         kfree(tmp_bbio);
4703                         goto out;
4704                 }
4705
4706                 kfree(tmp_bbio);
4707         } else if (mirror_num > map->num_stripes) {
4708                 mirror_num = 0;
4709         }
4710
4711         num_stripes = 1;
4712         stripe_index = 0;
4713         stripe_nr_orig = stripe_nr;
4714         stripe_nr_end = ALIGN(offset + *length, map->stripe_len);
4715         do_div(stripe_nr_end, map->stripe_len);
4716         stripe_end_offset = stripe_nr_end * map->stripe_len -
4717                             (offset + *length);
4718
4719         if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
4720                 if (rw & REQ_DISCARD)
4721                         num_stripes = min_t(u64, map->num_stripes,
4722                                             stripe_nr_end - stripe_nr_orig);
4723                 stripe_index = do_div(stripe_nr, map->num_stripes);
4724         } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
4725                 if (rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS))
4726                         num_stripes = map->num_stripes;
4727                 else if (mirror_num)
4728                         stripe_index = mirror_num - 1;
4729                 else {
4730                         stripe_index = find_live_mirror(fs_info, map, 0,
4731                                             map->num_stripes,
4732                                             current->pid % map->num_stripes,
4733                                             dev_replace_is_ongoing);
4734                         mirror_num = stripe_index + 1;
4735                 }
4736
4737         } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
4738                 if (rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS)) {
4739                         num_stripes = map->num_stripes;
4740                 } else if (mirror_num) {
4741                         stripe_index = mirror_num - 1;
4742                 } else {
4743                         mirror_num = 1;
4744                 }
4745
4746         } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
4747                 int factor = map->num_stripes / map->sub_stripes;
4748
4749                 stripe_index = do_div(stripe_nr, factor);
4750                 stripe_index *= map->sub_stripes;
4751
4752                 if (rw & (REQ_WRITE | REQ_GET_READ_MIRRORS))
4753                         num_stripes = map->sub_stripes;
4754                 else if (rw & REQ_DISCARD)
4755                         num_stripes = min_t(u64, map->sub_stripes *
4756                                             (stripe_nr_end - stripe_nr_orig),
4757                                             map->num_stripes);
4758                 else if (mirror_num)
4759                         stripe_index += mirror_num - 1;
4760                 else {
4761                         int old_stripe_index = stripe_index;
4762                         stripe_index = find_live_mirror(fs_info, map,
4763                                               stripe_index,
4764                                               map->sub_stripes, stripe_index +
4765                                               current->pid % map->sub_stripes,
4766                                               dev_replace_is_ongoing);
4767                         mirror_num = stripe_index - old_stripe_index + 1;
4768                 }
4769
4770         } else if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
4771                                 BTRFS_BLOCK_GROUP_RAID6)) {
4772                 u64 tmp;
4773
4774                 if (bbio_ret && ((rw & REQ_WRITE) || mirror_num > 1)
4775                     && raid_map_ret) {
4776                         int i, rot;
4777
4778                         /* push stripe_nr back to the start of the full stripe */
4779                         stripe_nr = raid56_full_stripe_start;
4780                         do_div(stripe_nr, stripe_len);
4781
4782                         stripe_index = do_div(stripe_nr, nr_data_stripes(map));
4783
4784                         /* RAID[56] write or recovery. Return all stripes */
4785                         num_stripes = map->num_stripes;
4786                         max_errors = nr_parity_stripes(map);
4787
4788                         raid_map = kmalloc(sizeof(u64) * num_stripes,
4789                                            GFP_NOFS);
4790                         if (!raid_map) {
4791                                 ret = -ENOMEM;
4792                                 goto out;
4793                         }
4794
4795                         /* Work out the disk rotation on this stripe-set */
4796                         tmp = stripe_nr;
4797                         rot = do_div(tmp, num_stripes);
4798
4799                         /* Fill in the logical address of each stripe */
4800                         tmp = stripe_nr * nr_data_stripes(map);
4801                         for (i = 0; i < nr_data_stripes(map); i++)
4802                                 raid_map[(i+rot) % num_stripes] =
4803                                         em->start + (tmp + i) * map->stripe_len;
4804
4805                         raid_map[(i+rot) % map->num_stripes] = RAID5_P_STRIPE;
4806                         if (map->type & BTRFS_BLOCK_GROUP_RAID6)
4807                                 raid_map[(i+rot+1) % num_stripes] =
4808                                         RAID6_Q_STRIPE;
4809
4810                         *length = map->stripe_len;
4811                         stripe_index = 0;
4812                         stripe_offset = 0;
4813                 } else {
4814                         /*
4815                          * Mirror #0 or #1 means the original data block.
4816                          * Mirror #2 is RAID5 parity block.
4817                          * Mirror #3 is RAID6 Q block.
4818                          */
4819                         stripe_index = do_div(stripe_nr, nr_data_stripes(map));
4820                         if (mirror_num > 1)
4821                                 stripe_index = nr_data_stripes(map) +
4822                                                 mirror_num - 2;
4823
4824                         /* We distribute the parity blocks across stripes */
4825                         tmp = stripe_nr + stripe_index;
4826                         stripe_index = do_div(tmp, map->num_stripes);
4827                 }
4828         } else {
4829                 /*
4830                  * after this do_div call, stripe_nr is the number of stripes
4831                  * on this device we have to walk to find the data, and
4832                  * stripe_index is the number of our device in the stripe array
4833                  */
4834                 stripe_index = do_div(stripe_nr, map->num_stripes);
4835                 mirror_num = stripe_index + 1;
4836         }
4837         BUG_ON(stripe_index >= map->num_stripes);
4838
4839         num_alloc_stripes = num_stripes;
4840         if (dev_replace_is_ongoing) {
4841                 if (rw & (REQ_WRITE | REQ_DISCARD))
4842                         num_alloc_stripes <<= 1;
4843                 if (rw & REQ_GET_READ_MIRRORS)
4844                         num_alloc_stripes++;
4845         }
4846         bbio = kzalloc(btrfs_bio_size(num_alloc_stripes), GFP_NOFS);
4847         if (!bbio) {
4848                 kfree(raid_map);
4849                 ret = -ENOMEM;
4850                 goto out;
4851         }
4852         atomic_set(&bbio->error, 0);
4853
4854         if (rw & REQ_DISCARD) {
4855                 int factor = 0;
4856                 int sub_stripes = 0;
4857                 u64 stripes_per_dev = 0;
4858                 u32 remaining_stripes = 0;
4859                 u32 last_stripe = 0;
4860
4861                 if (map->type &
4862                     (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10)) {
4863                         if (map->type & BTRFS_BLOCK_GROUP_RAID0)
4864                                 sub_stripes = 1;
4865                         else
4866                                 sub_stripes = map->sub_stripes;
4867
4868                         factor = map->num_stripes / sub_stripes;
4869                         stripes_per_dev = div_u64_rem(stripe_nr_end -
4870                                                       stripe_nr_orig,
4871                                                       factor,
4872                                                       &remaining_stripes);
4873                         div_u64_rem(stripe_nr_end - 1, factor, &last_stripe);
4874                         last_stripe *= sub_stripes;
4875                 }
4876
4877                 for (i = 0; i < num_stripes; i++) {
4878                         bbio->stripes[i].physical =
4879                                 map->stripes[stripe_index].physical +
4880                                 stripe_offset + stripe_nr * map->stripe_len;
4881                         bbio->stripes[i].dev = map->stripes[stripe_index].dev;
4882
4883                         if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
4884                                          BTRFS_BLOCK_GROUP_RAID10)) {
4885                                 bbio->stripes[i].length = stripes_per_dev *
4886                                                           map->stripe_len;
4887
4888                                 if (i / sub_stripes < remaining_stripes)
4889                                         bbio->stripes[i].length +=
4890                                                 map->stripe_len;
4891
4892                                 /*
4893                                  * Special for the first stripe and
4894                                  * the last stripe:
4895                                  *
4896                                  * |-------|...|-------|
4897                                  *     |----------|
4898                                  *    off     end_off
4899                                  */
4900                                 if (i < sub_stripes)
4901                                         bbio->stripes[i].length -=
4902                                                 stripe_offset;
4903
4904                                 if (stripe_index >= last_stripe &&
4905                                     stripe_index <= (last_stripe +
4906                                                      sub_stripes - 1))
4907                                         bbio->stripes[i].length -=
4908                                                 stripe_end_offset;
4909
4910                                 if (i == sub_stripes - 1)
4911                                         stripe_offset = 0;
4912                         } else
4913                                 bbio->stripes[i].length = *length;
4914
4915                         stripe_index++;
4916                         if (stripe_index == map->num_stripes) {
4917                                 /* This could only happen for RAID0/10 */
4918                                 stripe_index = 0;
4919                                 stripe_nr++;
4920                         }
4921                 }
4922         } else {
4923                 for (i = 0; i < num_stripes; i++) {
4924                         bbio->stripes[i].physical =
4925                                 map->stripes[stripe_index].physical +
4926                                 stripe_offset +
4927                                 stripe_nr * map->stripe_len;
4928                         bbio->stripes[i].dev =
4929                                 map->stripes[stripe_index].dev;
4930                         stripe_index++;
4931                 }
4932         }
4933
4934         if (rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) {
4935                 if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
4936                                  BTRFS_BLOCK_GROUP_RAID10 |
4937                                  BTRFS_BLOCK_GROUP_RAID5 |
4938                                  BTRFS_BLOCK_GROUP_DUP)) {
4939                         max_errors = 1;
4940                 } else if (map->type & BTRFS_BLOCK_GROUP_RAID6) {
4941                         max_errors = 2;
4942                 }
4943         }
4944
4945         if (dev_replace_is_ongoing && (rw & (REQ_WRITE | REQ_DISCARD)) &&
4946             dev_replace->tgtdev != NULL) {
4947                 int index_where_to_add;
4948                 u64 srcdev_devid = dev_replace->srcdev->devid;
4949
4950                 /*
4951                  * duplicate the write operations while the dev replace
4952                  * procedure is running. Since the copying of the old disk
4953                  * to the new disk takes place at run time while the
4954                  * filesystem is mounted writable, the regular write
4955                  * operations to the old disk have to be duplicated to go
4956                  * to the new disk as well.
4957                  * Note that device->missing is handled by the caller, and
4958                  * that the write to the old disk is already set up in the
4959                  * stripes array.
4960                  */
4961                 index_where_to_add = num_stripes;
4962                 for (i = 0; i < num_stripes; i++) {
4963                         if (bbio->stripes[i].dev->devid == srcdev_devid) {
4964                                 /* write to new disk, too */
4965                                 struct btrfs_bio_stripe *new =
4966                                         bbio->stripes + index_where_to_add;
4967                                 struct btrfs_bio_stripe *old =
4968                                         bbio->stripes + i;
4969
4970                                 new->physical = old->physical;
4971                                 new->length = old->length;
4972                                 new->dev = dev_replace->tgtdev;
4973                                 index_where_to_add++;
4974                                 max_errors++;
4975                         }
4976                 }
4977                 num_stripes = index_where_to_add;
4978         } else if (dev_replace_is_ongoing && (rw & REQ_GET_READ_MIRRORS) &&
4979                    dev_replace->tgtdev != NULL) {
4980                 u64 srcdev_devid = dev_replace->srcdev->devid;
4981                 int index_srcdev = 0;
4982                 int found = 0;
4983                 u64 physical_of_found = 0;
4984
4985                 /*
4986                  * During the dev-replace procedure, the target drive can
4987                  * also be used to read data in case it is needed to repair
4988                  * a corrupt block elsewhere. This is possible if the
4989                  * requested area is left of the left cursor. In this area,
4990                  * the target drive is a full copy of the source drive.
4991                  */
4992                 for (i = 0; i < num_stripes; i++) {
4993                         if (bbio->stripes[i].dev->devid == srcdev_devid) {
4994                                 /*
4995                                  * In case of DUP, in order to keep it
4996                                  * simple, only add the mirror with the
4997                                  * lowest physical address
4998                                  */
4999                                 if (found &&
5000                                     physical_of_found <=
5001                                      bbio->stripes[i].physical)
5002                                         continue;
5003                                 index_srcdev = i;
5004                                 found = 1;
5005                                 physical_of_found = bbio->stripes[i].physical;
5006                         }
5007                 }
5008                 if (found) {
5009                         u64 length = map->stripe_len;
5010
5011                         if (physical_of_found + length <=
5012                             dev_replace->cursor_left) {
5013                                 struct btrfs_bio_stripe *tgtdev_stripe =
5014                                         bbio->stripes + num_stripes;
5015
5016                                 tgtdev_stripe->physical = physical_of_found;
5017                                 tgtdev_stripe->length =
5018                                         bbio->stripes[index_srcdev].length;
5019                                 tgtdev_stripe->dev = dev_replace->tgtdev;
5020
5021                                 num_stripes++;
5022                         }
5023                 }
5024         }
5025
5026         *bbio_ret = bbio;
5027         bbio->num_stripes = num_stripes;
5028         bbio->max_errors = max_errors;
5029         bbio->mirror_num = mirror_num;
5030
5031         /*
5032          * this is the case that REQ_READ && dev_replace_is_ongoing &&
5033          * mirror_num == num_stripes + 1 && dev_replace target drive is
5034          * available as a mirror
5035          */
5036         if (patch_the_first_stripe_for_dev_replace && num_stripes > 0) {
5037                 WARN_ON(num_stripes > 1);
5038                 bbio->stripes[0].dev = dev_replace->tgtdev;
5039                 bbio->stripes[0].physical = physical_to_patch_in_first_stripe;
5040                 bbio->mirror_num = map->num_stripes + 1;
5041         }
5042         if (raid_map) {
5043                 sort_parity_stripes(bbio, raid_map);
5044                 *raid_map_ret = raid_map;
5045         }
5046 out:
5047         if (dev_replace_is_ongoing)
5048                 btrfs_dev_replace_unlock(dev_replace);
5049         free_extent_map(em);
5050         return ret;
5051 }
5052
5053 int btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
5054                       u64 logical, u64 *length,
5055                       struct btrfs_bio **bbio_ret, int mirror_num)
5056 {
5057         return __btrfs_map_block(fs_info, rw, logical, length, bbio_ret,
5058                                  mirror_num, NULL);
5059 }
5060
5061 int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
5062                      u64 chunk_start, u64 physical, u64 devid,
5063                      u64 **logical, int *naddrs, int *stripe_len)
5064 {
5065         struct extent_map_tree *em_tree = &map_tree->map_tree;
5066         struct extent_map *em;
5067         struct map_lookup *map;
5068         u64 *buf;
5069         u64 bytenr;
5070         u64 length;
5071         u64 stripe_nr;
5072         u64 rmap_len;
5073         int i, j, nr = 0;
5074
5075         read_lock(&em_tree->lock);
5076         em = lookup_extent_mapping(em_tree, chunk_start, 1);
5077         read_unlock(&em_tree->lock);
5078
5079         if (!em) {
5080                 printk(KERN_ERR "btrfs: couldn't find em for chunk %Lu\n",
5081                        chunk_start);
5082                 return -EIO;
5083         }
5084
5085         if (em->start != chunk_start) {
5086                 printk(KERN_ERR "btrfs: bad chunk start, em=%Lu, wanted=%Lu\n",
5087                        em->start, chunk_start);
5088                 free_extent_map(em);
5089                 return -EIO;
5090         }
5091         map = (struct map_lookup *)em->bdev;
5092
5093         length = em->len;
5094         rmap_len = map->stripe_len;
5095
5096         if (map->type & BTRFS_BLOCK_GROUP_RAID10)
5097                 do_div(length, map->num_stripes / map->sub_stripes);
5098         else if (map->type & BTRFS_BLOCK_GROUP_RAID0)
5099                 do_div(length, map->num_stripes);
5100         else if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
5101                               BTRFS_BLOCK_GROUP_RAID6)) {
5102                 do_div(length, nr_data_stripes(map));
5103                 rmap_len = map->stripe_len * nr_data_stripes(map);
5104         }
5105
5106         buf = kzalloc(sizeof(u64) * map->num_stripes, GFP_NOFS);
5107         BUG_ON(!buf); /* -ENOMEM */
5108
5109         for (i = 0; i < map->num_stripes; i++) {
5110                 if (devid && map->stripes[i].dev->devid != devid)
5111                         continue;
5112                 if (map->stripes[i].physical > physical ||
5113                     map->stripes[i].physical + length <= physical)
5114                         continue;
5115
5116                 stripe_nr = physical - map->stripes[i].physical;
5117                 do_div(stripe_nr, map->stripe_len);
5118
5119                 if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
5120                         stripe_nr = stripe_nr * map->num_stripes + i;
5121                         do_div(stripe_nr, map->sub_stripes);
5122                 } else if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
5123                         stripe_nr = stripe_nr * map->num_stripes + i;
5124                 } /* else if RAID[56], multiply by nr_data_stripes().
5125                    * Alternatively, just use rmap_len below instead of
5126                    * map->stripe_len */
5127
5128                 bytenr = chunk_start + stripe_nr * rmap_len;
5129                 WARN_ON(nr >= map->num_stripes);
5130                 for (j = 0; j < nr; j++) {
5131                         if (buf[j] == bytenr)
5132                                 break;
5133                 }
5134                 if (j == nr) {
5135                         WARN_ON(nr >= map->num_stripes);
5136                         buf[nr++] = bytenr;
5137                 }
5138         }
5139
5140         *logical = buf;
5141         *naddrs = nr;
5142         *stripe_len = rmap_len;
5143
5144         free_extent_map(em);
5145         return 0;
5146 }
5147
5148 static void btrfs_end_bio(struct bio *bio, int err)
5149 {
5150         struct btrfs_bio *bbio = bio->bi_private;
5151         int is_orig_bio = 0;
5152
5153         if (err) {
5154                 atomic_inc(&bbio->error);
5155                 if (err == -EIO || err == -EREMOTEIO) {
5156                         unsigned int stripe_index =
5157                                 btrfs_io_bio(bio)->stripe_index;
5158                         struct btrfs_device *dev;
5159
5160                         BUG_ON(stripe_index >= bbio->num_stripes);
5161                         dev = bbio->stripes[stripe_index].dev;
5162                         if (dev->bdev) {
5163                                 if (bio->bi_rw & WRITE)
5164                                         btrfs_dev_stat_inc(dev,
5165                                                 BTRFS_DEV_STAT_WRITE_ERRS);
5166                                 else
5167                                         btrfs_dev_stat_inc(dev,
5168                                                 BTRFS_DEV_STAT_READ_ERRS);
5169                                 if ((bio->bi_rw & WRITE_FLUSH) == WRITE_FLUSH)
5170                                         btrfs_dev_stat_inc(dev,
5171                                                 BTRFS_DEV_STAT_FLUSH_ERRS);
5172                                 btrfs_dev_stat_print_on_error(dev);
5173                         }
5174                 }
5175         }
5176
5177         if (bio == bbio->orig_bio)
5178                 is_orig_bio = 1;
5179
5180         if (atomic_dec_and_test(&bbio->stripes_pending)) {
5181                 if (!is_orig_bio) {
5182                         bio_put(bio);
5183                         bio = bbio->orig_bio;
5184                 }
5185                 bio->bi_private = bbio->private;
5186                 bio->bi_end_io = bbio->end_io;
5187                 btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
5188                 /* only send an error to the higher layers if it is
5189                  * beyond the tolerance of the btrfs bio
5190                  */
5191                 if (atomic_read(&bbio->error) > bbio->max_errors) {
5192                         err = -EIO;
5193                 } else {
5194                         /*
5195                          * this bio is actually up to date, we didn't
5196                          * go over the max number of errors
5197                          */
5198                         set_bit(BIO_UPTODATE, &bio->bi_flags);
5199                         err = 0;
5200                 }
5201                 kfree(bbio);
5202
5203                 bio_endio(bio, err);
5204         } else if (!is_orig_bio) {
5205                 bio_put(bio);
5206         }
5207 }
5208
5209 struct async_sched {
5210         struct bio *bio;
5211         int rw;
5212         struct btrfs_fs_info *info;
5213         struct btrfs_work work;
5214 };
5215
5216 /*
5217  * see run_scheduled_bios for a description of why bios are collected for
5218  * async submit.
5219  *
5220  * This will add one bio to the pending list for a device and make sure
5221  * the work struct is scheduled.
5222  */
5223 static noinline void btrfs_schedule_bio(struct btrfs_root *root,
5224                                         struct btrfs_device *device,
5225                                         int rw, struct bio *bio)
5226 {
5227         int should_queue = 1;
5228         struct btrfs_pending_bios *pending_bios;
5229
5230         if (device->missing || !device->bdev) {
5231                 bio_endio(bio, -EIO);
5232                 return;
5233         }
5234
5235         /* don't bother with additional async steps for reads, right now */
5236         if (!(rw & REQ_WRITE)) {
5237                 bio_get(bio);
5238                 btrfsic_submit_bio(rw, bio);
5239                 bio_put(bio);
5240                 return;
5241         }
5242
5243         /*
5244          * nr_async_bios allows us to reliably return congestion to the
5245          * higher layers.  Otherwise, the async bio makes it appear we have
5246          * made progress against dirty pages when we've really just put it
5247          * on a queue for later
5248          */
5249         atomic_inc(&root->fs_info->nr_async_bios);
5250         WARN_ON(bio->bi_next);
5251         bio->bi_next = NULL;
5252         bio->bi_rw |= rw;
5253
5254         spin_lock(&device->io_lock);
5255         if (bio->bi_rw & REQ_SYNC)
5256                 pending_bios = &device->pending_sync_bios;
5257         else
5258                 pending_bios = &device->pending_bios;
5259
5260         if (pending_bios->tail)
5261                 pending_bios->tail->bi_next = bio;
5262
5263         pending_bios->tail = bio;
5264         if (!pending_bios->head)
5265                 pending_bios->head = bio;
5266         if (device->running_pending)
5267                 should_queue = 0;
5268
5269         spin_unlock(&device->io_lock);
5270
5271         if (should_queue)
5272                 btrfs_queue_worker(&root->fs_info->submit_workers,
5273                                    &device->work);
5274 }
5275
5276 static int bio_size_ok(struct block_device *bdev, struct bio *bio,
5277                        sector_t sector)
5278 {
5279         struct bio_vec *prev;
5280         struct request_queue *q = bdev_get_queue(bdev);
5281         unsigned short max_sectors = queue_max_sectors(q);
5282         struct bvec_merge_data bvm = {
5283                 .bi_bdev = bdev,
5284                 .bi_sector = sector,
5285                 .bi_rw = bio->bi_rw,
5286         };
5287
5288         if (bio->bi_vcnt == 0) {
5289                 WARN_ON(1);
5290                 return 1;
5291         }
5292
5293         prev = &bio->bi_io_vec[bio->bi_vcnt - 1];
5294         if (bio_sectors(bio) > max_sectors)
5295                 return 0;
5296
5297         if (!q->merge_bvec_fn)
5298                 return 1;
5299
5300         bvm.bi_size = bio->bi_size - prev->bv_len;
5301         if (q->merge_bvec_fn(q, &bvm, prev) < prev->bv_len)
5302                 return 0;
5303         return 1;
5304 }
5305
5306 static void submit_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio,
5307                               struct bio *bio, u64 physical, int dev_nr,
5308                               int rw, int async)
5309 {
5310         struct btrfs_device *dev = bbio->stripes[dev_nr].dev;
5311
5312         bio->bi_private = bbio;
5313         btrfs_io_bio(bio)->stripe_index = dev_nr;
5314         bio->bi_end_io = btrfs_end_bio;
5315         bio->bi_sector = physical >> 9;
5316 #ifdef DEBUG
5317         {
5318                 struct rcu_string *name;
5319
5320                 rcu_read_lock();
5321                 name = rcu_dereference(dev->name);
5322                 pr_debug("btrfs_map_bio: rw %d, sector=%llu, dev=%lu "
5323                          "(%s id %llu), size=%u\n", rw,
5324                          (u64)bio->bi_sector, (u_long)dev->bdev->bd_dev,
5325                          name->str, dev->devid, bio->bi_size);
5326                 rcu_read_unlock();
5327         }
5328 #endif
5329         bio->bi_bdev = dev->bdev;
5330         if (async)
5331                 btrfs_schedule_bio(root, dev, rw, bio);
5332         else
5333                 btrfsic_submit_bio(rw, bio);
5334 }
5335
5336 static int breakup_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio,
5337                               struct bio *first_bio, struct btrfs_device *dev,
5338                               int dev_nr, int rw, int async)
5339 {
5340         struct bio_vec *bvec = first_bio->bi_io_vec;
5341         struct bio *bio;
5342         int nr_vecs = bio_get_nr_vecs(dev->bdev);
5343         u64 physical = bbio->stripes[dev_nr].physical;
5344
5345 again:
5346         bio = btrfs_bio_alloc(dev->bdev, physical >> 9, nr_vecs, GFP_NOFS);
5347         if (!bio)
5348                 return -ENOMEM;
5349
5350         while (bvec <= (first_bio->bi_io_vec + first_bio->bi_vcnt - 1)) {
5351                 if (bio_add_page(bio, bvec->bv_page, bvec->bv_len,
5352                                  bvec->bv_offset) < bvec->bv_len) {
5353                         u64 len = bio->bi_size;
5354
5355                         atomic_inc(&bbio->stripes_pending);
5356                         submit_stripe_bio(root, bbio, bio, physical, dev_nr,
5357                                           rw, async);
5358                         physical += len;
5359                         goto again;
5360                 }
5361                 bvec++;
5362         }
5363
5364         submit_stripe_bio(root, bbio, bio, physical, dev_nr, rw, async);
5365         return 0;
5366 }
5367
5368 static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
5369 {
5370         atomic_inc(&bbio->error);
5371         if (atomic_dec_and_test(&bbio->stripes_pending)) {
5372                 bio->bi_private = bbio->private;
5373                 bio->bi_end_io = bbio->end_io;
5374                 btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
5375                 bio->bi_sector = logical >> 9;
5376                 kfree(bbio);
5377                 bio_endio(bio, -EIO);
5378         }
5379 }
5380
5381 int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
5382                   int mirror_num, int async_submit)
5383 {
5384         struct btrfs_device *dev;
5385         struct bio *first_bio = bio;
5386         u64 logical = (u64)bio->bi_sector << 9;
5387         u64 length = 0;
5388         u64 map_length;
5389         u64 *raid_map = NULL;
5390         int ret;
5391         int dev_nr = 0;
5392         int total_devs = 1;
5393         struct btrfs_bio *bbio = NULL;
5394
5395         length = bio->bi_size;
5396         map_length = length;
5397
5398         ret = __btrfs_map_block(root->fs_info, rw, logical, &map_length, &bbio,
5399                               mirror_num, &raid_map);
5400         if (ret) /* -ENOMEM */
5401                 return ret;
5402
5403         total_devs = bbio->num_stripes;
5404         bbio->orig_bio = first_bio;
5405         bbio->private = first_bio->bi_private;
5406         bbio->end_io = first_bio->bi_end_io;
5407         atomic_set(&bbio->stripes_pending, bbio->num_stripes);
5408
5409         if (raid_map) {
5410                 /* In this case, map_length has been set to the length of
5411                    a single stripe; not the whole write */
5412                 if (rw & WRITE) {
5413                         return raid56_parity_write(root, bio, bbio,
5414                                                    raid_map, map_length);
5415                 } else {
5416                         return raid56_parity_recover(root, bio, bbio,
5417                                                      raid_map, map_length,
5418                                                      mirror_num);
5419                 }
5420         }
5421
5422         if (map_length < length) {
5423                 btrfs_crit(root->fs_info, "mapping failed logical %llu bio len %llu len %llu",
5424                         (unsigned long long)logical,
5425                         (unsigned long long)length,
5426                         (unsigned long long)map_length);
5427                 BUG();
5428         }
5429
5430         while (dev_nr < total_devs) {
5431                 dev = bbio->stripes[dev_nr].dev;
5432                 if (!dev || !dev->bdev || (rw & WRITE && !dev->writeable)) {
5433                         bbio_error(bbio, first_bio, logical);
5434                         dev_nr++;
5435                         continue;
5436                 }
5437
5438                 /*
5439                  * Check and see if we're ok with this bio based on it's size
5440                  * and offset with the given device.
5441                  */
5442                 if (!bio_size_ok(dev->bdev, first_bio,
5443                                  bbio->stripes[dev_nr].physical >> 9)) {
5444                         ret = breakup_stripe_bio(root, bbio, first_bio, dev,
5445                                                  dev_nr, rw, async_submit);
5446                         BUG_ON(ret);
5447                         dev_nr++;
5448                         continue;
5449                 }
5450
5451                 if (dev_nr < total_devs - 1) {
5452                         bio = btrfs_bio_clone(first_bio, GFP_NOFS);
5453                         BUG_ON(!bio); /* -ENOMEM */
5454                 } else {
5455                         bio = first_bio;
5456                 }
5457
5458                 submit_stripe_bio(root, bbio, bio,
5459                                   bbio->stripes[dev_nr].physical, dev_nr, rw,
5460                                   async_submit);
5461                 dev_nr++;
5462         }
5463         return 0;
5464 }
5465
5466 struct btrfs_device *btrfs_find_device(struct btrfs_fs_info *fs_info, u64 devid,
5467                                        u8 *uuid, u8 *fsid)
5468 {
5469         struct btrfs_device *device;
5470         struct btrfs_fs_devices *cur_devices;
5471
5472         cur_devices = fs_info->fs_devices;
5473         while (cur_devices) {
5474                 if (!fsid ||
5475                     !memcmp(cur_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
5476                         device = __find_device(&cur_devices->devices,
5477                                                devid, uuid);
5478                         if (device)
5479                                 return device;
5480                 }
5481                 cur_devices = cur_devices->seed;
5482         }
5483         return NULL;
5484 }
5485
5486 static struct btrfs_device *add_missing_dev(struct btrfs_root *root,
5487                                             u64 devid, u8 *dev_uuid)
5488 {
5489         struct btrfs_device *device;
5490         struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
5491
5492         device = kzalloc(sizeof(*device), GFP_NOFS);
5493         if (!device)
5494                 return NULL;
5495         list_add(&device->dev_list,
5496                  &fs_devices->devices);
5497         device->devid = devid;
5498         device->work.func = pending_bios_fn;
5499         device->fs_devices = fs_devices;
5500         device->missing = 1;
5501         fs_devices->num_devices++;
5502         fs_devices->missing_devices++;
5503         spin_lock_init(&device->io_lock);
5504         INIT_LIST_HEAD(&device->dev_alloc_list);
5505         memcpy(device->uuid, dev_uuid, BTRFS_UUID_SIZE);
5506         return device;
5507 }
5508
5509 static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
5510                           struct extent_buffer *leaf,
5511                           struct btrfs_chunk *chunk)
5512 {
5513         struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
5514         struct map_lookup *map;
5515         struct extent_map *em;
5516         u64 logical;
5517         u64 length;
5518         u64 devid;
5519         u8 uuid[BTRFS_UUID_SIZE];
5520         int num_stripes;
5521         int ret;
5522         int i;
5523
5524         logical = key->offset;
5525         length = btrfs_chunk_length(leaf, chunk);
5526
5527         read_lock(&map_tree->map_tree.lock);
5528         em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
5529         read_unlock(&map_tree->map_tree.lock);
5530
5531         /* already mapped? */
5532         if (em && em->start <= logical && em->start + em->len > logical) {
5533                 free_extent_map(em);
5534                 return 0;
5535         } else if (em) {
5536                 free_extent_map(em);
5537         }
5538
5539         em = alloc_extent_map();
5540         if (!em)
5541                 return -ENOMEM;
5542         num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
5543         map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
5544         if (!map) {
5545                 free_extent_map(em);
5546                 return -ENOMEM;
5547         }
5548
5549         em->bdev = (struct block_device *)map;
5550         em->start = logical;
5551         em->len = length;
5552         em->orig_start = 0;
5553         em->block_start = 0;
5554         em->block_len = em->len;
5555
5556         map->num_stripes = num_stripes;
5557         map->io_width = btrfs_chunk_io_width(leaf, chunk);
5558         map->io_align = btrfs_chunk_io_align(leaf, chunk);
5559         map->sector_size = btrfs_chunk_sector_size(leaf, chunk);
5560         map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
5561         map->type = btrfs_chunk_type(leaf, chunk);
5562         map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
5563         for (i = 0; i < num_stripes; i++) {
5564                 map->stripes[i].physical =
5565                         btrfs_stripe_offset_nr(leaf, chunk, i);
5566                 devid = btrfs_stripe_devid_nr(leaf, chunk, i);
5567                 read_extent_buffer(leaf, uuid, (unsigned long)
5568                                    btrfs_stripe_dev_uuid_nr(chunk, i),
5569                                    BTRFS_UUID_SIZE);
5570                 map->stripes[i].dev = btrfs_find_device(root->fs_info, devid,
5571                                                         uuid, NULL);
5572                 if (!map->stripes[i].dev && !btrfs_test_opt(root, DEGRADED)) {
5573                         kfree(map);
5574                         free_extent_map(em);
5575                         return -EIO;
5576                 }
5577                 if (!map->stripes[i].dev) {
5578                         map->stripes[i].dev =
5579                                 add_missing_dev(root, devid, uuid);
5580                         if (!map->stripes[i].dev) {
5581                                 kfree(map);
5582                                 free_extent_map(em);
5583                                 return -EIO;
5584                         }
5585                 }
5586                 map->stripes[i].dev->in_fs_metadata = 1;
5587         }
5588
5589         write_lock(&map_tree->map_tree.lock);
5590         ret = add_extent_mapping(&map_tree->map_tree, em, 0);
5591         write_unlock(&map_tree->map_tree.lock);
5592         BUG_ON(ret); /* Tree corruption */
5593         free_extent_map(em);
5594
5595         return 0;
5596 }
5597
5598 static void fill_device_from_item(struct extent_buffer *leaf,
5599                                  struct btrfs_dev_item *dev_item,
5600                                  struct btrfs_device *device)
5601 {
5602         unsigned long ptr;
5603
5604         device->devid = btrfs_device_id(leaf, dev_item);
5605         device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item);
5606         device->total_bytes = device->disk_total_bytes;
5607         device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
5608         device->type = btrfs_device_type(leaf, dev_item);
5609         device->io_align = btrfs_device_io_align(leaf, dev_item);
5610         device->io_width = btrfs_device_io_width(leaf, dev_item);
5611         device->sector_size = btrfs_device_sector_size(leaf, dev_item);
5612         WARN_ON(device->devid == BTRFS_DEV_REPLACE_DEVID);
5613         device->is_tgtdev_for_dev_replace = 0;
5614
5615         ptr = (unsigned long)btrfs_device_uuid(dev_item);
5616         read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
5617 }
5618
5619 static int open_seed_devices(struct btrfs_root *root, u8 *fsid)
5620 {
5621         struct btrfs_fs_devices *fs_devices;
5622         int ret;
5623
5624         BUG_ON(!mutex_is_locked(&uuid_mutex));
5625
5626         fs_devices = root->fs_info->fs_devices->seed;
5627         while (fs_devices) {
5628                 if (!memcmp(fs_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
5629                         ret = 0;
5630                         goto out;
5631                 }
5632                 fs_devices = fs_devices->seed;
5633         }
5634
5635         fs_devices = find_fsid(fsid);
5636         if (!fs_devices) {
5637                 ret = -ENOENT;
5638                 goto out;
5639         }
5640
5641         fs_devices = clone_fs_devices(fs_devices);
5642         if (IS_ERR(fs_devices)) {
5643                 ret = PTR_ERR(fs_devices);
5644                 goto out;
5645         }
5646
5647         ret = __btrfs_open_devices(fs_devices, FMODE_READ,
5648                                    root->fs_info->bdev_holder);
5649         if (ret) {
5650                 free_fs_devices(fs_devices);
5651                 goto out;
5652         }
5653
5654         if (!fs_devices->seeding) {
5655                 __btrfs_close_devices(fs_devices);
5656                 free_fs_devices(fs_devices);
5657                 ret = -EINVAL;
5658                 goto out;
5659         }
5660
5661         fs_devices->seed = root->fs_info->fs_devices->seed;
5662         root->fs_info->fs_devices->seed = fs_devices;
5663 out:
5664         return ret;
5665 }
5666
5667 static int read_one_dev(struct btrfs_root *root,
5668                         struct extent_buffer *leaf,
5669                         struct btrfs_dev_item *dev_item)
5670 {
5671         struct btrfs_device *device;
5672         u64 devid;
5673         int ret;
5674         u8 fs_uuid[BTRFS_UUID_SIZE];
5675         u8 dev_uuid[BTRFS_UUID_SIZE];
5676
5677         devid = btrfs_device_id(leaf, dev_item);
5678         read_extent_buffer(leaf, dev_uuid,
5679                            (unsigned long)btrfs_device_uuid(dev_item),
5680                            BTRFS_UUID_SIZE);
5681         read_extent_buffer(leaf, fs_uuid,
5682                            (unsigned long)btrfs_device_fsid(dev_item),
5683                            BTRFS_UUID_SIZE);
5684
5685         if (memcmp(fs_uuid, root->fs_info->fsid, BTRFS_UUID_SIZE)) {
5686                 ret = open_seed_devices(root, fs_uuid);
5687                 if (ret && !btrfs_test_opt(root, DEGRADED))
5688                         return ret;
5689         }
5690
5691         device = btrfs_find_device(root->fs_info, devid, dev_uuid, fs_uuid);
5692         if (!device || !device->bdev) {
5693                 if (!btrfs_test_opt(root, DEGRADED))
5694                         return -EIO;
5695
5696                 if (!device) {
5697                         btrfs_warn(root->fs_info, "devid %llu missing",
5698                                 (unsigned long long)devid);
5699                         device = add_missing_dev(root, devid, dev_uuid);
5700                         if (!device)
5701                                 return -ENOMEM;
5702                 } else if (!device->missing) {
5703                         /*
5704                          * this happens when a device that was properly setup
5705                          * in the device info lists suddenly goes bad.
5706                          * device->bdev is NULL, and so we have to set
5707                          * device->missing to one here
5708                          */
5709                         root->fs_info->fs_devices->missing_devices++;
5710                         device->missing = 1;
5711                 }
5712         }
5713
5714         if (device->fs_devices != root->fs_info->fs_devices) {
5715                 BUG_ON(device->writeable);
5716                 if (device->generation !=
5717                     btrfs_device_generation(leaf, dev_item))
5718                         return -EINVAL;
5719         }
5720
5721         fill_device_from_item(leaf, dev_item, device);
5722         device->in_fs_metadata = 1;
5723         if (device->writeable && !device->is_tgtdev_for_dev_replace) {
5724                 device->fs_devices->total_rw_bytes += device->total_bytes;
5725                 spin_lock(&root->fs_info->free_chunk_lock);
5726                 root->fs_info->free_chunk_space += device->total_bytes -
5727                         device->bytes_used;
5728                 spin_unlock(&root->fs_info->free_chunk_lock);
5729         }
5730         ret = 0;
5731         return ret;
5732 }
5733
5734 int btrfs_read_sys_array(struct btrfs_root *root)
5735 {
5736         struct btrfs_super_block *super_copy = root->fs_info->super_copy;
5737         struct extent_buffer *sb;
5738         struct btrfs_disk_key *disk_key;
5739         struct btrfs_chunk *chunk;
5740         u8 *ptr;
5741         unsigned long sb_ptr;
5742         int ret = 0;
5743         u32 num_stripes;
5744         u32 array_size;
5745         u32 len = 0;
5746         u32 cur;
5747         struct btrfs_key key;
5748
5749         sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET,
5750                                           BTRFS_SUPER_INFO_SIZE);
5751         if (!sb)
5752                 return -ENOMEM;
5753         btrfs_set_buffer_uptodate(sb);
5754         btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0);
5755         /*
5756          * The sb extent buffer is artifical and just used to read the system array.
5757          * btrfs_set_buffer_uptodate() call does not properly mark all it's
5758          * pages up-to-date when the page is larger: extent does not cover the
5759          * whole page and consequently check_page_uptodate does not find all
5760          * the page's extents up-to-date (the hole beyond sb),
5761          * write_extent_buffer then triggers a WARN_ON.
5762          *
5763          * Regular short extents go through mark_extent_buffer_dirty/writeback cycle,
5764          * but sb spans only this function. Add an explicit SetPageUptodate call
5765          * to silence the warning eg. on PowerPC 64.
5766          */
5767         if (PAGE_CACHE_SIZE > BTRFS_SUPER_INFO_SIZE)
5768                 SetPageUptodate(sb->pages[0]);
5769
5770         write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
5771         array_size = btrfs_super_sys_array_size(super_copy);
5772
5773         ptr = super_copy->sys_chunk_array;
5774         sb_ptr = offsetof(struct btrfs_super_block, sys_chunk_array);
5775         cur = 0;
5776
5777         while (cur < array_size) {
5778                 disk_key = (struct btrfs_disk_key *)ptr;
5779                 btrfs_disk_key_to_cpu(&key, disk_key);
5780
5781                 len = sizeof(*disk_key); ptr += len;
5782                 sb_ptr += len;
5783                 cur += len;
5784
5785                 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
5786                         chunk = (struct btrfs_chunk *)sb_ptr;
5787                         ret = read_one_chunk(root, &key, sb, chunk);
5788                         if (ret)
5789                                 break;
5790                         num_stripes = btrfs_chunk_num_stripes(sb, chunk);
5791                         len = btrfs_chunk_item_size(num_stripes);
5792                 } else {
5793                         ret = -EIO;
5794                         break;
5795                 }
5796                 ptr += len;
5797                 sb_ptr += len;
5798                 cur += len;
5799         }
5800         free_extent_buffer(sb);
5801         return ret;
5802 }
5803
5804 int btrfs_read_chunk_tree(struct btrfs_root *root)
5805 {
5806         struct btrfs_path *path;
5807         struct extent_buffer *leaf;
5808         struct btrfs_key key;
5809         struct btrfs_key found_key;
5810         int ret;
5811         int slot;
5812
5813         root = root->fs_info->chunk_root;
5814
5815         path = btrfs_alloc_path();
5816         if (!path)
5817                 return -ENOMEM;
5818
5819         mutex_lock(&uuid_mutex);
5820         lock_chunks(root);
5821
5822         /*
5823          * Read all device items, and then all the chunk items. All
5824          * device items are found before any chunk item (their object id
5825          * is smaller than the lowest possible object id for a chunk
5826          * item - BTRFS_FIRST_CHUNK_TREE_OBJECTID).
5827          */
5828         key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
5829         key.offset = 0;
5830         key.type = 0;
5831         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5832         if (ret < 0)
5833                 goto error;
5834         while (1) {
5835                 leaf = path->nodes[0];
5836                 slot = path->slots[0];
5837                 if (slot >= btrfs_header_nritems(leaf)) {
5838                         ret = btrfs_next_leaf(root, path);
5839                         if (ret == 0)
5840                                 continue;
5841                         if (ret < 0)
5842                                 goto error;
5843                         break;
5844                 }
5845                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
5846                 if (found_key.type == BTRFS_DEV_ITEM_KEY) {
5847                         struct btrfs_dev_item *dev_item;
5848                         dev_item = btrfs_item_ptr(leaf, slot,
5849                                                   struct btrfs_dev_item);
5850                         ret = read_one_dev(root, leaf, dev_item);
5851                         if (ret)
5852                                 goto error;
5853                 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
5854                         struct btrfs_chunk *chunk;
5855                         chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
5856                         ret = read_one_chunk(root, &found_key, leaf, chunk);
5857                         if (ret)
5858                                 goto error;
5859                 }
5860                 path->slots[0]++;
5861         }
5862         ret = 0;
5863 error:
5864         unlock_chunks(root);
5865         mutex_unlock(&uuid_mutex);
5866
5867         btrfs_free_path(path);
5868         return ret;
5869 }
5870
5871 void btrfs_init_devices_late(struct btrfs_fs_info *fs_info)
5872 {
5873         struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
5874         struct btrfs_device *device;
5875
5876         mutex_lock(&fs_devices->device_list_mutex);
5877         list_for_each_entry(device, &fs_devices->devices, dev_list)
5878                 device->dev_root = fs_info->dev_root;
5879         mutex_unlock(&fs_devices->device_list_mutex);
5880 }
5881
5882 static void __btrfs_reset_dev_stats(struct btrfs_device *dev)
5883 {
5884         int i;
5885
5886         for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
5887                 btrfs_dev_stat_reset(dev, i);
5888 }
5889
5890 int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info)
5891 {
5892         struct btrfs_key key;
5893         struct btrfs_key found_key;
5894         struct btrfs_root *dev_root = fs_info->dev_root;
5895         struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
5896         struct extent_buffer *eb;
5897         int slot;
5898         int ret = 0;
5899         struct btrfs_device *device;
5900         struct btrfs_path *path = NULL;
5901         int i;
5902
5903         path = btrfs_alloc_path();
5904         if (!path) {
5905                 ret = -ENOMEM;
5906                 goto out;
5907         }
5908
5909         mutex_lock(&fs_devices->device_list_mutex);
5910         list_for_each_entry(device, &fs_devices->devices, dev_list) {
5911                 int item_size;
5912                 struct btrfs_dev_stats_item *ptr;
5913
5914                 key.objectid = 0;
5915                 key.type = BTRFS_DEV_STATS_KEY;
5916                 key.offset = device->devid;
5917                 ret = btrfs_search_slot(NULL, dev_root, &key, path, 0, 0);
5918                 if (ret) {
5919                         __btrfs_reset_dev_stats(device);
5920                         device->dev_stats_valid = 1;
5921                         btrfs_release_path(path);
5922                         continue;
5923                 }
5924                 slot = path->slots[0];
5925                 eb = path->nodes[0];
5926                 btrfs_item_key_to_cpu(eb, &found_key, slot);
5927                 item_size = btrfs_item_size_nr(eb, slot);
5928
5929                 ptr = btrfs_item_ptr(eb, slot,
5930                                      struct btrfs_dev_stats_item);
5931
5932                 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
5933                         if (item_size >= (1 + i) * sizeof(__le64))
5934                                 btrfs_dev_stat_set(device, i,
5935                                         btrfs_dev_stats_value(eb, ptr, i));
5936                         else
5937                                 btrfs_dev_stat_reset(device, i);
5938                 }
5939
5940                 device->dev_stats_valid = 1;
5941                 btrfs_dev_stat_print_on_load(device);
5942                 btrfs_release_path(path);
5943         }
5944         mutex_unlock(&fs_devices->device_list_mutex);
5945
5946 out:
5947         btrfs_free_path(path);
5948         return ret < 0 ? ret : 0;
5949 }
5950
5951 static int update_dev_stat_item(struct btrfs_trans_handle *trans,
5952                                 struct btrfs_root *dev_root,
5953                                 struct btrfs_device *device)
5954 {
5955         struct btrfs_path *path;
5956         struct btrfs_key key;
5957         struct extent_buffer *eb;
5958         struct btrfs_dev_stats_item *ptr;
5959         int ret;
5960         int i;
5961
5962         key.objectid = 0;
5963         key.type = BTRFS_DEV_STATS_KEY;
5964         key.offset = device->devid;
5965
5966         path = btrfs_alloc_path();
5967         BUG_ON(!path);
5968         ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1);
5969         if (ret < 0) {
5970                 printk_in_rcu(KERN_WARNING "btrfs: error %d while searching for dev_stats item for device %s!\n",
5971                               ret, rcu_str_deref(device->name));
5972                 goto out;
5973         }
5974
5975         if (ret == 0 &&
5976             btrfs_item_size_nr(path->nodes[0], path->slots[0]) < sizeof(*ptr)) {
5977                 /* need to delete old one and insert a new one */
5978                 ret = btrfs_del_item(trans, dev_root, path);
5979                 if (ret != 0) {
5980                         printk_in_rcu(KERN_WARNING "btrfs: delete too small dev_stats item for device %s failed %d!\n",
5981                                       rcu_str_deref(device->name), ret);
5982                         goto out;
5983                 }
5984                 ret = 1;
5985         }
5986
5987         if (ret == 1) {
5988                 /* need to insert a new item */
5989                 btrfs_release_path(path);
5990                 ret = btrfs_insert_empty_item(trans, dev_root, path,
5991                                               &key, sizeof(*ptr));
5992                 if (ret < 0) {
5993                         printk_in_rcu(KERN_WARNING "btrfs: insert dev_stats item for device %s failed %d!\n",
5994                                       rcu_str_deref(device->name), ret);
5995                         goto out;
5996                 }
5997         }
5998
5999         eb = path->nodes[0];
6000         ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item);
6001         for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
6002                 btrfs_set_dev_stats_value(eb, ptr, i,
6003                                           btrfs_dev_stat_read(device, i));
6004         btrfs_mark_buffer_dirty(eb);
6005
6006 out:
6007         btrfs_free_path(path);
6008         return ret;
6009 }
6010
6011 /*
6012  * called from commit_transaction. Writes all changed device stats to disk.
6013  */
6014 int btrfs_run_dev_stats(struct btrfs_trans_handle *trans,
6015                         struct btrfs_fs_info *fs_info)
6016 {
6017         struct btrfs_root *dev_root = fs_info->dev_root;
6018         struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
6019         struct btrfs_device *device;
6020         int ret = 0;
6021
6022         mutex_lock(&fs_devices->device_list_mutex);
6023         list_for_each_entry(device, &fs_devices->devices, dev_list) {
6024                 if (!device->dev_stats_valid || !device->dev_stats_dirty)
6025                         continue;
6026
6027                 ret = update_dev_stat_item(trans, dev_root, device);
6028                 if (!ret)
6029                         device->dev_stats_dirty = 0;
6030         }
6031         mutex_unlock(&fs_devices->device_list_mutex);
6032
6033         return ret;
6034 }
6035
6036 void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index)
6037 {
6038         btrfs_dev_stat_inc(dev, index);
6039         btrfs_dev_stat_print_on_error(dev);
6040 }
6041
6042 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev)
6043 {
6044         if (!dev->dev_stats_valid)
6045                 return;
6046         printk_ratelimited_in_rcu(KERN_ERR
6047                            "btrfs: bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n",
6048                            rcu_str_deref(dev->name),
6049                            btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
6050                            btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
6051                            btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
6052                            btrfs_dev_stat_read(dev,
6053                                                BTRFS_DEV_STAT_CORRUPTION_ERRS),
6054                            btrfs_dev_stat_read(dev,
6055                                                BTRFS_DEV_STAT_GENERATION_ERRS));
6056 }
6057
6058 static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev)
6059 {
6060         int i;
6061
6062         for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
6063                 if (btrfs_dev_stat_read(dev, i) != 0)
6064                         break;
6065         if (i == BTRFS_DEV_STAT_VALUES_MAX)
6066                 return; /* all values == 0, suppress message */
6067
6068         printk_in_rcu(KERN_INFO "btrfs: bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n",
6069                rcu_str_deref(dev->name),
6070                btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
6071                btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
6072                btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
6073                btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
6074                btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
6075 }
6076
6077 int btrfs_get_dev_stats(struct btrfs_root *root,
6078                         struct btrfs_ioctl_get_dev_stats *stats)
6079 {
6080         struct btrfs_device *dev;
6081         struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
6082         int i;
6083
6084         mutex_lock(&fs_devices->device_list_mutex);
6085         dev = btrfs_find_device(root->fs_info, stats->devid, NULL, NULL);
6086         mutex_unlock(&fs_devices->device_list_mutex);
6087
6088         if (!dev) {
6089                 printk(KERN_WARNING
6090                        "btrfs: get dev_stats failed, device not found\n");
6091                 return -ENODEV;
6092         } else if (!dev->dev_stats_valid) {
6093                 printk(KERN_WARNING
6094                        "btrfs: get dev_stats failed, not yet valid\n");
6095                 return -ENODEV;
6096         } else if (stats->flags & BTRFS_DEV_STATS_RESET) {
6097                 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
6098                         if (stats->nr_items > i)
6099                                 stats->values[i] =
6100                                         btrfs_dev_stat_read_and_reset(dev, i);
6101                         else
6102                                 btrfs_dev_stat_reset(dev, i);
6103                 }
6104         } else {
6105                 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
6106                         if (stats->nr_items > i)
6107                                 stats->values[i] = btrfs_dev_stat_read(dev, i);
6108         }
6109         if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX)
6110                 stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX;
6111         return 0;
6112 }
6113
6114 int btrfs_scratch_superblock(struct btrfs_device *device)
6115 {
6116         struct buffer_head *bh;
6117         struct btrfs_super_block *disk_super;
6118
6119         bh = btrfs_read_dev_super(device->bdev);
6120         if (!bh)
6121                 return -EINVAL;
6122         disk_super = (struct btrfs_super_block *)bh->b_data;
6123
6124         memset(&disk_super->magic, 0, sizeof(disk_super->magic));
6125         set_buffer_dirty(bh);
6126         sync_dirty_buffer(bh);
6127         brelse(bh);
6128
6129         return 0;
6130 }